Compare commits
146 Commits
main
...
shielded_c
Author | SHA1 | Date |
---|---|---|
|
0e310971a3 | |
|
9e3f41a5b1 | |
|
7c22f76274 | |
|
04c99c2749 | |
|
e536057fea | |
|
c6b4da5788 | |
|
1f7f84fdfa | |
|
a5bdc6db66 | |
|
9a18b57d38 | |
|
ed10632d97 | |
|
299429a278 | |
|
28fefe4ffe | |
|
08a6a51cb8 | |
|
50465d4b34 | |
|
4f69af872c | |
|
9bc6a61c93 | |
|
23aa97692e | |
|
1e5810e56c | |
|
b54cb6682c | |
|
3ed309f019 | |
|
d08aeaeafe | |
|
c6ee4e5dc1 | |
|
ad5eee5666 | |
|
fc72d75061 | |
|
de1843dc84 | |
|
930d498841 | |
|
5ea112699d | |
|
e244747bc3 | |
|
5a09ccf459 | |
|
ce1bcf6d36 | |
|
28ba5e5435 | |
|
10adf34be5 | |
|
82dcaff8db | |
|
621b252b0c | |
|
20a089c331 | |
|
df50d78042 | |
|
114ec36436 | |
|
179d7d2b04 | |
|
f568fca98f | |
|
6c9bc627d8 | |
|
1d7cf7d1dd | |
|
54a0a0000d | |
|
0268b2ce91 | |
|
81f8e2d4ac | |
|
bf0739c194 | |
|
5fe3f58ea9 | |
|
3e1d033708 | |
|
c35576e196 | |
|
8ce26d692f | |
|
7f29fd8dcf | |
|
7fbada8a15 | |
|
286e75d342 | |
|
df641d9d31 | |
|
35b0c4bef0 | |
|
c4496f21fc | |
|
7e0e627921 | |
|
28ea8e787a | |
|
0294455c5e | |
|
734bc09b67 | |
|
0bcdea28a0 | |
|
fdf3a1b01b | |
|
ce7b8a5e18 | |
|
00024181cd | |
|
814384848d | |
|
bea31f6d19 | |
|
250275d98d | |
|
f415fc43ce | |
|
3f15923537 | |
|
87cd725adb | |
|
48accbd28f | |
|
227c9ea173 | |
|
d651f3d8e9 | |
|
ef0cfc4b20 | |
|
ecb525a2bc | |
|
b77d123edd | |
|
f4e63465de | |
|
df31047ecb | |
|
131674eabd | |
|
5a94e8fb5b | |
|
0518b3ab04 | |
|
2f0bed3018 | |
|
9da3b63644 | |
|
1d6f55543d | |
|
a3ed30e62b | |
|
42d621bba7 | |
|
2e81ccf5b4 | |
|
022bf8ce75 | |
|
0e9457299c | |
|
6b1ceee19f | |
|
1e689ee701 | |
|
190845ce1d | |
|
0c74b04c83 | |
|
215fec1d41 | |
|
fcc8cee9d3 | |
|
ca3f7a1b6b | |
|
87c1113de4 | |
|
43b659dbe4 | |
|
63b1488ab6 | |
|
7eb31f3fea | |
|
534e5d150d | |
|
e4a6223256 | |
|
ab2664da70 | |
|
ae326cbb9a | |
|
07cec02303 | |
|
2fdb8fc25a | |
|
6d951c526a | |
|
575a24adf1 | |
|
919e462f88 | |
|
a09b8560bb | |
|
c4cd573b26 | |
|
d24a9e158f | |
|
18a1634025 | |
|
78c0d2b234 | |
|
4314a59327 | |
|
e94f1261b5 | |
|
86da79a854 | |
|
de89e3a9c4 | |
|
7bed470f5c | |
|
fa9a9cfb1d | |
|
3d0e95513c | |
|
ee151b00af | |
|
22c14e235e | |
|
1102843087 | |
|
e03bec5efc | |
|
bee2c36072 | |
|
b36b3d522f | |
|
4ace8f6037 | |
|
98a7326c85 | |
|
46972df041 | |
|
565d7c3ee5 | |
|
ac695a05bf | |
|
fc56971a2d | |
|
ee87cf0e29 | |
|
ebcb275cd8 | |
|
f745da9fb2 | |
|
4f442efbd7 | |
|
f9a84f0732 | |
|
e0bf964ff0 | |
|
a9fc4c1b91 | |
|
b52ff270c5 | |
|
1713ecd9f8 | |
|
edb82fdd78 | |
|
339d787cf8 | |
|
c32b21b4b1 | |
|
71477290fc | |
|
9716d86825 |
|
@ -3,8 +3,8 @@
|
|||
|gh_actions|
|
||||
|docs|
|
||||
|
||||
``tractor`` is a `structured concurrent`_, multi-processing_ runtime
|
||||
built on trio_.
|
||||
``tractor`` is a `structured concurrent`_, (optionally
|
||||
distributed_) multi-processing_ runtime built on trio_.
|
||||
|
||||
Fundamentally, ``tractor`` gives you parallelism via
|
||||
``trio``-"*actors*": independent Python processes (aka
|
||||
|
@ -17,11 +17,20 @@ protocol" constructed on top of multiple Pythons each running a ``trio``
|
|||
scheduled runtime - a call to ``trio.run()``.
|
||||
|
||||
We believe the system adheres to the `3 axioms`_ of an "`actor model`_"
|
||||
but likely *does not* look like what *you* probably think an "actor
|
||||
model" looks like, and that's *intentional*.
|
||||
but likely **does not** look like what **you** probably *think* an "actor
|
||||
model" looks like, and that's **intentional**.
|
||||
|
||||
The first step to grok ``tractor`` is to get the basics of ``trio`` down.
|
||||
A great place to start is the `trio docs`_ and this `blog post`_.
|
||||
|
||||
Where do i start!?
|
||||
------------------
|
||||
The first step to grok ``tractor`` is to get an intermediate
|
||||
knowledge of ``trio`` and **structured concurrency** B)
|
||||
|
||||
Some great places to start are,
|
||||
- the seminal `blog post`_
|
||||
- obviously the `trio docs`_
|
||||
- wikipedia's nascent SC_ page
|
||||
- the fancy diagrams @ libdill-docs_
|
||||
|
||||
|
||||
Features
|
||||
|
@ -593,6 +602,7 @@ matrix seems too hip, we're also mostly all in the the `trio gitter
|
|||
channel`_!
|
||||
|
||||
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
|
||||
.. _distributed: https://en.wikipedia.org/wiki/Distributed_computing
|
||||
.. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing
|
||||
.. _trio: https://github.com/python-trio/trio
|
||||
.. _nurseries: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/#nurseries-a-structured-replacement-for-go-statements
|
||||
|
@ -611,8 +621,9 @@ channel`_!
|
|||
.. _trio docs: https://trio.readthedocs.io/en/latest/
|
||||
.. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
|
||||
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
|
||||
.. _SC: https://en.wikipedia.org/wiki/Structured_concurrency
|
||||
.. _libdill-docs: https://sustrik.github.io/libdill/structured-concurrency.html
|
||||
.. _structured chadcurrency: https://en.wikipedia.org/wiki/Structured_concurrency
|
||||
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
|
||||
.. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony
|
||||
.. _async generators: https://www.python.org/dev/peps/pep-0525/
|
||||
.. _trio-parallel: https://github.com/richardsheridan/trio-parallel
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
import asyncio
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
from tractor import to_asyncio
|
||||
|
||||
|
||||
async def aio_sleep_forever():
|
||||
await asyncio.sleep(float('inf'))
|
||||
|
||||
|
||||
async def bp_then_error(
|
||||
to_trio: trio.MemorySendChannel,
|
||||
from_trio: asyncio.Queue,
|
||||
|
||||
raise_after_bp: bool = True,
|
||||
|
||||
) -> None:
|
||||
|
||||
# sync with ``trio``-side (caller) task
|
||||
to_trio.send_nowait('start')
|
||||
|
||||
# NOTE: what happens here inside the hook needs some refinement..
|
||||
# => seems like it's still `._debug._set_trace()` but
|
||||
# we set `Lock.local_task_in_debug = 'sync'`, we probably want
|
||||
# some further, at least, meta-data about the task/actoq in debug
|
||||
# in terms of making it clear it's asyncio mucking about.
|
||||
breakpoint()
|
||||
|
||||
# short checkpoint / delay
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
if raise_after_bp:
|
||||
raise ValueError('blah')
|
||||
|
||||
# TODO: test case with this so that it gets cancelled?
|
||||
else:
|
||||
# XXX NOTE: this is required in order to get the SIGINT-ignored
|
||||
# hang case documented in the module script section!
|
||||
await aio_sleep_forever()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def trio_ctx(
|
||||
ctx: tractor.Context,
|
||||
bp_before_started: bool = False,
|
||||
):
|
||||
|
||||
# this will block until the ``asyncio`` task sends a "first"
|
||||
# message, see first line in above func.
|
||||
async with (
|
||||
|
||||
to_asyncio.open_channel_from(
|
||||
bp_then_error,
|
||||
raise_after_bp=not bp_before_started,
|
||||
) as (first, chan),
|
||||
|
||||
trio.open_nursery() as n,
|
||||
):
|
||||
|
||||
assert first == 'start'
|
||||
|
||||
if bp_before_started:
|
||||
await tractor.breakpoint()
|
||||
|
||||
await ctx.started(first)
|
||||
|
||||
n.start_soon(
|
||||
to_asyncio.run_task,
|
||||
aio_sleep_forever,
|
||||
)
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
async def main(
|
||||
bps_all_over: bool = False,
|
||||
|
||||
) -> None:
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
p = await n.start_actor(
|
||||
'aio_daemon',
|
||||
enable_modules=[__name__],
|
||||
infect_asyncio=True,
|
||||
debug_mode=True,
|
||||
loglevel='cancel',
|
||||
)
|
||||
|
||||
async with p.open_context(
|
||||
trio_ctx,
|
||||
bp_before_started=bps_all_over,
|
||||
) as (ctx, first):
|
||||
|
||||
assert first == 'start'
|
||||
|
||||
if bps_all_over:
|
||||
await tractor.breakpoint()
|
||||
|
||||
# await trio.sleep_forever()
|
||||
await ctx.cancel()
|
||||
assert 0
|
||||
|
||||
# TODO: case where we cancel from trio-side while asyncio task
|
||||
# has debugger lock?
|
||||
# await p.cancel_actor()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# works fine B)
|
||||
trio.run(main)
|
||||
|
||||
# will hang and ignores SIGINT !!
|
||||
# NOTE: you'll need to send a SIGQUIT (via ctl-\) to kill it
|
||||
# manually..
|
||||
# trio.run(main, True)
|
|
@ -65,21 +65,28 @@ async def aggregate(seed):
|
|||
print("AGGREGATOR COMPLETE!")
|
||||
|
||||
|
||||
# this is the main actor and *arbiter*
|
||||
async def main():
|
||||
# a nursery which spawns "actors"
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=('127.0.0.1', 1616)
|
||||
) as nursery:
|
||||
async def main() -> list[int]:
|
||||
'''
|
||||
This is the "root" actor's main task's entrypoint.
|
||||
|
||||
By default (and if not otherwise specified) that root process
|
||||
also acts as a "registry actor" / "registrar" on the localhost
|
||||
for the purposes of multi-actor "service discovery".
|
||||
|
||||
'''
|
||||
# yes, a nursery which spawns `trio`-"actors" B)
|
||||
nursery: tractor.ActorNursery
|
||||
async with tractor.open_nursery() as nursery:
|
||||
|
||||
seed = int(1e3)
|
||||
pre_start = time.time()
|
||||
|
||||
portal = await nursery.start_actor(
|
||||
portal: tractor.Portal = await nursery.start_actor(
|
||||
name='aggregator',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
stream: tractor.MsgStream
|
||||
async with portal.open_stream_from(
|
||||
aggregate,
|
||||
seed=seed,
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
# vim: ft=ini
|
||||
# pytest.ini for tractor
|
||||
|
||||
[pytest]
|
||||
# don't show frickin captured logs AGAIN in the report..
|
||||
addopts = --show-capture='no'
|
||||
log_cli = false
|
||||
; minversion = 6.0
|
|
@ -6,3 +6,4 @@ mypy
|
|||
trio_typing
|
||||
pexpect
|
||||
towncrier
|
||||
numpy
|
||||
|
|
18
setup.py
18
setup.py
|
@ -26,7 +26,7 @@ with open('docs/README.rst', encoding='utf-8') as f:
|
|||
setup(
|
||||
name="tractor",
|
||||
version='0.1.0a6dev0', # alpha zone
|
||||
description='structured concurrrent `trio`-"actors"',
|
||||
description='structured concurrent `trio`-"actors"',
|
||||
long_description=readme,
|
||||
license='AGPLv3',
|
||||
author='Tyler Goodlet',
|
||||
|
@ -36,8 +36,10 @@ setup(
|
|||
platforms=['linux', 'windows'],
|
||||
packages=[
|
||||
'tractor',
|
||||
'tractor.experimental',
|
||||
'tractor.trionics',
|
||||
'tractor.experimental', # wacky ideas
|
||||
'tractor.trionics', # trio extensions
|
||||
'tractor.msg', # lowlevel data types
|
||||
'tractor.devx', # "dev-experience"
|
||||
],
|
||||
install_requires=[
|
||||
|
||||
|
@ -50,6 +52,7 @@ setup(
|
|||
'exceptiongroup',
|
||||
|
||||
# tooling
|
||||
'stackscope',
|
||||
'tricycle',
|
||||
'trio_typing',
|
||||
'colorlog',
|
||||
|
@ -61,16 +64,15 @@ setup(
|
|||
# debug mode REPL
|
||||
'pdbp',
|
||||
|
||||
# TODO: distributed transport using
|
||||
# linux kernel networking
|
||||
# 'pyroute2',
|
||||
|
||||
# pip ref docs on these specs:
|
||||
# https://pip.pypa.io/en/stable/reference/requirement-specifiers/#examples
|
||||
# and pep:
|
||||
# https://peps.python.org/pep-0440/#version-specifiers
|
||||
|
||||
# windows deps workaround for ``pdbpp``
|
||||
# https://github.com/pdbpp/pdbpp/issues/498
|
||||
# https://github.com/pdbpp/fancycompleter/issues/37
|
||||
'pyreadline3 ; platform_system == "Windows"',
|
||||
|
||||
],
|
||||
tests_require=['pytest'],
|
||||
python_requires=">=3.10",
|
||||
|
|
|
@ -29,7 +29,7 @@ def tractor_test(fn):
|
|||
|
||||
If fixtures:
|
||||
|
||||
- ``arb_addr`` (a socket addr tuple where arbiter is listening)
|
||||
- ``reg_addr`` (a socket addr tuple where arbiter is listening)
|
||||
- ``loglevel`` (logging level passed to tractor internals)
|
||||
- ``start_method`` (subprocess spawning backend)
|
||||
|
||||
|
@ -40,16 +40,19 @@ def tractor_test(fn):
|
|||
def wrapper(
|
||||
*args,
|
||||
loglevel=None,
|
||||
arb_addr=None,
|
||||
start_method=None,
|
||||
reg_addr=None,
|
||||
start_method: str|None = None,
|
||||
debug_mode: bool = False,
|
||||
**kwargs
|
||||
):
|
||||
# __tracebackhide__ = True
|
||||
|
||||
if 'arb_addr' in inspect.signature(fn).parameters:
|
||||
# NOTE: inject ant test func declared fixture
|
||||
# names by manually checking!
|
||||
if 'reg_addr' in inspect.signature(fn).parameters:
|
||||
# injects test suite fixture value to test as well
|
||||
# as `run()`
|
||||
kwargs['arb_addr'] = arb_addr
|
||||
kwargs['reg_addr'] = reg_addr
|
||||
|
||||
if 'loglevel' in inspect.signature(fn).parameters:
|
||||
# allows test suites to define a 'loglevel' fixture
|
||||
|
@ -64,19 +67,23 @@ def tractor_test(fn):
|
|||
# set of subprocess spawning backends
|
||||
kwargs['start_method'] = start_method
|
||||
|
||||
if 'debug_mode' in inspect.signature(fn).parameters:
|
||||
# set of subprocess spawning backends
|
||||
kwargs['debug_mode'] = debug_mode
|
||||
|
||||
|
||||
if kwargs:
|
||||
|
||||
# use explicit root actor start
|
||||
|
||||
async def _main():
|
||||
async with tractor.open_root_actor(
|
||||
# **kwargs,
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr] if reg_addr else None,
|
||||
loglevel=loglevel,
|
||||
start_method=start_method,
|
||||
|
||||
# TODO: only enable when pytest is passed --pdb
|
||||
# debug_mode=True,
|
||||
debug_mode=debug_mode,
|
||||
|
||||
):
|
||||
await fn(*args, **kwargs)
|
||||
|
@ -92,9 +99,6 @@ def tractor_test(fn):
|
|||
return wrapper
|
||||
|
||||
|
||||
_arb_addr = '127.0.0.1', random.randint(1000, 9999)
|
||||
|
||||
|
||||
# Sending signal.SIGINT on subprocess fails on windows. Use CTRL_* alternatives
|
||||
if platform.system() == 'Windows':
|
||||
_KILL_SIGNAL = signal.CTRL_BREAK_EVENT
|
||||
|
@ -133,22 +137,43 @@ def examples_dir() -> pathlib.Path:
|
|||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--ll", action="store", dest='loglevel',
|
||||
"--ll",
|
||||
action="store",
|
||||
dest='loglevel',
|
||||
default='ERROR', help="logging level to set when testing"
|
||||
)
|
||||
|
||||
parser.addoption(
|
||||
"--spawn-backend", action="store", dest='spawn_backend',
|
||||
"--spawn-backend",
|
||||
action="store",
|
||||
dest='spawn_backend',
|
||||
default='trio',
|
||||
help="Processing spawning backend to use for test run",
|
||||
)
|
||||
|
||||
parser.addoption(
|
||||
"--tpdb", "--debug-mode",
|
||||
action="store_true",
|
||||
dest='tractor_debug_mode',
|
||||
# default=False,
|
||||
help=(
|
||||
'Enable a flag that can be used by tests to to set the '
|
||||
'`debug_mode: bool` for engaging the internal '
|
||||
'multi-proc debugger sys.'
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
backend = config.option.spawn_backend
|
||||
tractor._spawn.try_set_start_method(backend)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def debug_mode(request):
|
||||
return request.config.option.tractor_debug_mode
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
def loglevel(request):
|
||||
orig = tractor.log._default_loglevel
|
||||
|
@ -173,9 +198,23 @@ def ci_env() -> bool:
|
|||
return _ci_env
|
||||
|
||||
|
||||
# choose randomly at import time
|
||||
_reg_addr: tuple[str, int] = (
|
||||
'127.0.0.1',
|
||||
random.randint(1000, 9999),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def arb_addr():
|
||||
return _arb_addr
|
||||
def reg_addr() -> tuple[str, int]:
|
||||
|
||||
# globally override the runtime to the per-test-session-dynamic
|
||||
# addr so that all tests never conflict with any other actor
|
||||
# tree using the default.
|
||||
from tractor import _root
|
||||
_root._default_lo_addrs = [_reg_addr]
|
||||
|
||||
return _reg_addr
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
|
@ -216,30 +255,35 @@ def sig_prog(proc, sig):
|
|||
def daemon(
|
||||
loglevel: str,
|
||||
testdir,
|
||||
arb_addr: tuple[str, int],
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
'''
|
||||
Run a daemon actor as a "remote arbiter".
|
||||
Run a daemon root actor as a separate actor-process tree and
|
||||
"remote registrar" for discovery-protocol related tests.
|
||||
|
||||
'''
|
||||
if loglevel in ('trace', 'debug'):
|
||||
# too much logging will lock up the subproc (smh)
|
||||
loglevel = 'info'
|
||||
# XXX: too much logging will lock up the subproc (smh)
|
||||
loglevel: str = 'info'
|
||||
|
||||
cmdargs = [
|
||||
sys.executable, '-c',
|
||||
"import tractor; tractor.run_daemon([], registry_addr={}, loglevel={})"
|
||||
.format(
|
||||
arb_addr,
|
||||
"'{}'".format(loglevel) if loglevel else None)
|
||||
code: str = (
|
||||
"import tractor; "
|
||||
"tractor.run_daemon([], registry_addrs={reg_addrs}, loglevel={ll})"
|
||||
).format(
|
||||
reg_addrs=str([reg_addr]),
|
||||
ll="'{}'".format(loglevel) if loglevel else None,
|
||||
)
|
||||
cmd: list[str] = [
|
||||
sys.executable,
|
||||
'-c', code,
|
||||
]
|
||||
kwargs = dict()
|
||||
kwargs = {}
|
||||
if platform.system() == 'Windows':
|
||||
# without this, tests hang on windows forever
|
||||
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
|
||||
proc = testdir.popen(
|
||||
cmdargs,
|
||||
cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
**kwargs,
|
||||
|
|
|
@ -298,44 +298,69 @@ async def inf_streamer(
|
|||
|
||||
async with (
|
||||
ctx.open_stream() as stream,
|
||||
trio.open_nursery() as n,
|
||||
trio.open_nursery() as tn,
|
||||
):
|
||||
async def bail_on_sentinel():
|
||||
async def close_stream_on_sentinel():
|
||||
async for msg in stream:
|
||||
if msg == 'done':
|
||||
print(
|
||||
'streamer RXed "done" sentinel msg!\n'
|
||||
'CLOSING `MsgStream`!'
|
||||
)
|
||||
await stream.aclose()
|
||||
else:
|
||||
print(f'streamer received {msg}')
|
||||
else:
|
||||
print('streamer exited recv loop')
|
||||
|
||||
# start termination detector
|
||||
n.start_soon(bail_on_sentinel)
|
||||
tn.start_soon(close_stream_on_sentinel)
|
||||
|
||||
for val in itertools.count():
|
||||
cap: int = 10000 # so that we don't spin forever when bug..
|
||||
for val in range(cap):
|
||||
try:
|
||||
print(f'streamer sending {val}')
|
||||
await stream.send(val)
|
||||
if val > cap:
|
||||
raise RuntimeError(
|
||||
'Streamer never cancelled by setinel?'
|
||||
)
|
||||
await trio.sleep(0.001)
|
||||
|
||||
# close out the stream gracefully
|
||||
except trio.ClosedResourceError:
|
||||
# close out the stream gracefully
|
||||
print('transport closed on streamer side!')
|
||||
assert stream.closed
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'Streamer not cancelled before finished sending?'
|
||||
)
|
||||
|
||||
print('terminating streamer')
|
||||
print('streamer exited .open_streamer() block')
|
||||
|
||||
|
||||
def test_local_task_fanout_from_stream():
|
||||
def test_local_task_fanout_from_stream(
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
Single stream with multiple local consumer tasks using the
|
||||
``MsgStream.subscribe()` api.
|
||||
|
||||
Ensure all tasks receive all values after stream completes sending.
|
||||
Ensure all tasks receive all values after stream completes
|
||||
sending.
|
||||
|
||||
'''
|
||||
consumers = 22
|
||||
consumers: int = 22
|
||||
|
||||
async def main():
|
||||
|
||||
counts = Counter()
|
||||
|
||||
async with tractor.open_nursery() as tn:
|
||||
p = await tn.start_actor(
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as tn:
|
||||
p: tractor.Portal = await tn.start_actor(
|
||||
'inf_streamer',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
@ -343,7 +368,6 @@ def test_local_task_fanout_from_stream():
|
|||
p.open_context(inf_streamer) as (ctx, _),
|
||||
ctx.open_stream() as stream,
|
||||
):
|
||||
|
||||
async def pull_and_count(name: str):
|
||||
# name = trio.lowlevel.current_task().name
|
||||
async with stream.subscribe() as recver:
|
||||
|
@ -352,7 +376,7 @@ def test_local_task_fanout_from_stream():
|
|||
tractor.trionics.BroadcastReceiver
|
||||
)
|
||||
async for val in recver:
|
||||
# print(f'{name}: {val}')
|
||||
print(f'bx {name} rx: {val}')
|
||||
counts[name] += 1
|
||||
|
||||
print(f'{name} bcaster ended')
|
||||
|
@ -362,10 +386,14 @@ def test_local_task_fanout_from_stream():
|
|||
with trio.fail_after(3):
|
||||
async with trio.open_nursery() as nurse:
|
||||
for i in range(consumers):
|
||||
nurse.start_soon(pull_and_count, i)
|
||||
nurse.start_soon(
|
||||
pull_and_count,
|
||||
i,
|
||||
)
|
||||
|
||||
# delay to let bcast consumers pull msgs
|
||||
await trio.sleep(0.5)
|
||||
print('\nterminating')
|
||||
print('terminating nursery of bcast rxer consumers!')
|
||||
await stream.send('done')
|
||||
|
||||
print('closed stream connection')
|
||||
|
|
|
@ -47,17 +47,19 @@ async def do_nuthin():
|
|||
],
|
||||
ids=['no_args', 'unexpected_args'],
|
||||
)
|
||||
def test_remote_error(arb_addr, args_err):
|
||||
"""Verify an error raised in a subactor that is propagated
|
||||
def test_remote_error(reg_addr, args_err):
|
||||
'''
|
||||
Verify an error raised in a subactor that is propagated
|
||||
to the parent nursery, contains the underlying boxed builtin
|
||||
error type info and causes cancellation and reraising all the
|
||||
way up the stack.
|
||||
"""
|
||||
|
||||
'''
|
||||
args, errtype = args_err
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
) as nursery:
|
||||
|
||||
# on a remote type error caused by bad input args
|
||||
|
@ -65,7 +67,9 @@ def test_remote_error(arb_addr, args_err):
|
|||
# an exception group outside the nursery since the error
|
||||
# here and the far end task error are one in the same?
|
||||
portal = await nursery.run_in_actor(
|
||||
assert_err, name='errorer', **args
|
||||
assert_err,
|
||||
name='errorer',
|
||||
**args
|
||||
)
|
||||
|
||||
# get result(s) from main task
|
||||
|
@ -97,7 +101,7 @@ def test_remote_error(arb_addr, args_err):
|
|||
assert exc.type == errtype
|
||||
|
||||
|
||||
def test_multierror(arb_addr):
|
||||
def test_multierror(reg_addr):
|
||||
'''
|
||||
Verify we raise a ``BaseExceptionGroup`` out of a nursery where
|
||||
more then one actor errors.
|
||||
|
@ -105,7 +109,7 @@ def test_multierror(arb_addr):
|
|||
'''
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
) as nursery:
|
||||
|
||||
await nursery.run_in_actor(assert_err, name='errorer1')
|
||||
|
@ -130,14 +134,14 @@ def test_multierror(arb_addr):
|
|||
@pytest.mark.parametrize(
|
||||
'num_subactors', range(25, 26),
|
||||
)
|
||||
def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay):
|
||||
def test_multierror_fast_nursery(reg_addr, start_method, num_subactors, delay):
|
||||
"""Verify we raise a ``BaseExceptionGroup`` out of a nursery where
|
||||
more then one actor errors and also with a delay before failure
|
||||
to test failure during an ongoing spawning.
|
||||
"""
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
) as nursery:
|
||||
|
||||
for i in range(num_subactors):
|
||||
|
@ -175,15 +179,20 @@ async def do_nothing():
|
|||
|
||||
|
||||
@pytest.mark.parametrize('mechanism', ['nursery_cancel', KeyboardInterrupt])
|
||||
def test_cancel_single_subactor(arb_addr, mechanism):
|
||||
"""Ensure a ``ActorNursery.start_actor()`` spawned subactor
|
||||
def test_cancel_single_subactor(reg_addr, mechanism):
|
||||
'''
|
||||
Ensure a ``ActorNursery.start_actor()`` spawned subactor
|
||||
cancels when the nursery is cancelled.
|
||||
"""
|
||||
|
||||
'''
|
||||
async def spawn_actor():
|
||||
"""Spawn an actor that blocks indefinitely.
|
||||
"""
|
||||
'''
|
||||
Spawn an actor that blocks indefinitely then cancel via
|
||||
either `ActorNursery.cancel()` or an exception raise.
|
||||
|
||||
'''
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
) as nursery:
|
||||
|
||||
portal = await nursery.start_actor(
|
||||
|
|
|
@ -10,7 +10,6 @@ from contextlib import asynccontextmanager as acm
|
|||
|
||||
import pytest
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
import tractor
|
||||
from tractor import RemoteActorError
|
||||
from async_generator import aclosing
|
||||
|
@ -141,7 +140,7 @@ async def open_actor_local_nursery(
|
|||
)
|
||||
def test_actor_managed_trio_nursery_task_error_cancels_aio(
|
||||
asyncio_mode: bool,
|
||||
arb_addr
|
||||
reg_addr: tuple,
|
||||
):
|
||||
'''
|
||||
Verify that a ``trio`` nursery created managed in a child actor
|
||||
|
|
|
@ -1,18 +1,26 @@
|
|||
'''
|
||||
``async with ():`` inlined context-stream cancellation testing.
|
||||
|
||||
Verify the we raise errors when streams are opened prior to sync-opening
|
||||
a ``tractor.Context`` beforehand.
|
||||
Verify the we raise errors when streams are opened prior to
|
||||
sync-opening a ``tractor.Context`` beforehand.
|
||||
|
||||
'''
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from itertools import count
|
||||
import platform
|
||||
from typing import Optional
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Callable,
|
||||
)
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
from tractor import (
|
||||
Actor,
|
||||
Context,
|
||||
current_actor,
|
||||
)
|
||||
from tractor._exceptions import (
|
||||
StreamOverrun,
|
||||
ContextCancelled,
|
||||
|
@ -64,7 +72,7 @@ _state: bool = False
|
|||
|
||||
@tractor.context
|
||||
async def too_many_starteds(
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
) -> None:
|
||||
'''
|
||||
Call ``Context.started()`` more then once (an error).
|
||||
|
@ -79,7 +87,7 @@ async def too_many_starteds(
|
|||
|
||||
@tractor.context
|
||||
async def not_started_but_stream_opened(
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
) -> None:
|
||||
'''
|
||||
Enter ``Context.open_stream()`` without calling ``.started()``.
|
||||
|
@ -100,11 +108,15 @@ async def not_started_but_stream_opened(
|
|||
],
|
||||
ids='misuse_type={}'.format,
|
||||
)
|
||||
def test_started_misuse(target):
|
||||
|
||||
def test_started_misuse(
|
||||
target: Callable,
|
||||
debug_mode: bool,
|
||||
):
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
portal = await n.start_actor(
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
portal = await an.start_actor(
|
||||
target.__name__,
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
@ -119,7 +131,7 @@ def test_started_misuse(target):
|
|||
@tractor.context
|
||||
async def simple_setup_teardown(
|
||||
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
data: int,
|
||||
block_forever: bool = False,
|
||||
|
||||
|
@ -165,6 +177,7 @@ def test_simple_context(
|
|||
error_parent,
|
||||
callee_blocks_forever,
|
||||
pointlessly_open_stream,
|
||||
debug_mode: bool,
|
||||
):
|
||||
|
||||
timeout = 1.5 if not platform.system() == 'Windows' else 4
|
||||
|
@ -172,9 +185,10 @@ def test_simple_context(
|
|||
async def main():
|
||||
|
||||
with trio.fail_after(timeout):
|
||||
async with tractor.open_nursery() as nursery:
|
||||
|
||||
portal = await nursery.start_actor(
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
portal = await an.start_actor(
|
||||
'simple_context',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
@ -193,9 +207,6 @@ def test_simple_context(
|
|||
else:
|
||||
assert await ctx.result() == 'yo'
|
||||
|
||||
if not error_parent:
|
||||
await ctx.cancel()
|
||||
|
||||
if pointlessly_open_stream:
|
||||
async with ctx.open_stream():
|
||||
if error_parent:
|
||||
|
@ -208,10 +219,15 @@ def test_simple_context(
|
|||
# 'stop' msg to the far end which needs
|
||||
# to be ignored
|
||||
pass
|
||||
|
||||
else:
|
||||
if error_parent:
|
||||
raise error_parent
|
||||
|
||||
# cancel AFTER we open a stream
|
||||
# to avoid a cancel raised inside
|
||||
# `.open_stream()`
|
||||
await ctx.cancel()
|
||||
finally:
|
||||
|
||||
# after cancellation
|
||||
|
@ -234,6 +250,17 @@ def test_simple_context(
|
|||
trio.run(main)
|
||||
|
||||
|
||||
@acm
|
||||
async def expect_ctxc(yay: bool) -> None:
|
||||
if yay:
|
||||
try:
|
||||
yield
|
||||
except ContextCancelled:
|
||||
return
|
||||
else:
|
||||
yield
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'callee_returns_early',
|
||||
[True, False],
|
||||
|
@ -253,6 +280,7 @@ def test_caller_cancels(
|
|||
cancel_method: str,
|
||||
chk_ctx_result_before_exit: bool,
|
||||
callee_returns_early: bool,
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
Verify that when the opening side of a context (aka the caller)
|
||||
|
@ -261,37 +289,81 @@ def test_caller_cancels(
|
|||
|
||||
'''
|
||||
async def check_canceller(
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
) -> None:
|
||||
# should not raise yet return the remote
|
||||
# context cancelled error.
|
||||
res = await ctx.result()
|
||||
actor: Actor = current_actor()
|
||||
uid: tuple = actor.uid
|
||||
|
||||
if (
|
||||
cancel_method == 'portal'
|
||||
and not callee_returns_early
|
||||
):
|
||||
try:
|
||||
res = await ctx.result()
|
||||
assert 0, 'Portal cancel should raise!'
|
||||
|
||||
except ContextCancelled as ctxc:
|
||||
assert ctx.chan._cancel_called
|
||||
assert ctxc.canceller == uid
|
||||
assert ctxc is ctx.maybe_error
|
||||
|
||||
# NOTE: should not ever raise even in the `ctx`
|
||||
# case since self-cancellation should swallow the ctxc
|
||||
# silently!
|
||||
else:
|
||||
res = await ctx.result()
|
||||
|
||||
# we actually get a result
|
||||
if callee_returns_early:
|
||||
assert res == 'yo'
|
||||
assert ctx.outcome is res
|
||||
assert ctx.maybe_error is None
|
||||
|
||||
else:
|
||||
err = res
|
||||
err: Exception = ctx.outcome
|
||||
assert isinstance(err, ContextCancelled)
|
||||
assert (
|
||||
tuple(err.canceller)
|
||||
==
|
||||
tractor.current_actor().uid
|
||||
uid
|
||||
)
|
||||
assert (
|
||||
err
|
||||
is ctx.maybe_error
|
||||
is ctx._remote_error
|
||||
)
|
||||
if le := ctx._local_error:
|
||||
assert err is le
|
||||
|
||||
# else:
|
||||
# TODO: what should this be then?
|
||||
# not defined until block closes right?
|
||||
#
|
||||
# await tractor.pause()
|
||||
# assert ctx._local_error is None
|
||||
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery() as nursery:
|
||||
portal = await nursery.start_actor(
|
||||
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
portal = await an.start_actor(
|
||||
'simple_context',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
timeout = 0.5 if not callee_returns_early else 2
|
||||
with trio.fail_after(timeout):
|
||||
async with portal.open_context(
|
||||
simple_setup_teardown,
|
||||
data=10,
|
||||
block_forever=not callee_returns_early,
|
||||
) as (ctx, sent):
|
||||
async with (
|
||||
|
||||
expect_ctxc(yay=cancel_method == 'portal'),
|
||||
|
||||
portal.open_context(
|
||||
simple_setup_teardown,
|
||||
data=10,
|
||||
block_forever=not callee_returns_early,
|
||||
) as (ctx, sent),
|
||||
):
|
||||
|
||||
if callee_returns_early:
|
||||
# ensure we block long enough before sending
|
||||
|
@ -313,6 +385,16 @@ def test_caller_cancels(
|
|||
if cancel_method != 'portal':
|
||||
await portal.cancel_actor()
|
||||
|
||||
# since the `.cancel_actor()` call just above
|
||||
# will cause the `.open_context().__aexit__()` raise
|
||||
# a ctxc which should in turn cause `ctx._scope` to
|
||||
# catch any cancellation?
|
||||
if (
|
||||
not callee_returns_early
|
||||
and cancel_method == 'portal'
|
||||
):
|
||||
assert ctx._scope.cancelled_caught
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
|
@ -331,7 +413,7 @@ def test_caller_cancels(
|
|||
@tractor.context
|
||||
async def close_ctx_immediately(
|
||||
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
|
||||
) -> None:
|
||||
|
||||
|
@ -343,17 +425,33 @@ async def close_ctx_immediately(
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_callee_closes_ctx_after_stream_open():
|
||||
'callee context closes without using stream'
|
||||
async def test_callee_closes_ctx_after_stream_open(
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
callee context closes without using stream.
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
This should result in a msg sequence
|
||||
|_<root>_
|
||||
|_<fast_stream_closer>
|
||||
|
||||
portal = await n.start_actor(
|
||||
<= {'started': <Any>, 'cid': <str>}
|
||||
<= {'stop': True, 'cid': <str>}
|
||||
<= {'result': Any, ..}
|
||||
|
||||
(ignored by child)
|
||||
=> {'stop': True, 'cid': <str>}
|
||||
|
||||
'''
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
portal = await an.start_actor(
|
||||
'fast_stream_closer',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
with trio.fail_after(2):
|
||||
with trio.fail_after(0.5):
|
||||
async with portal.open_context(
|
||||
close_ctx_immediately,
|
||||
|
||||
|
@ -361,10 +459,9 @@ async def test_callee_closes_ctx_after_stream_open():
|
|||
# cancel_on_exit=True,
|
||||
|
||||
) as (ctx, sent):
|
||||
|
||||
assert sent is None
|
||||
|
||||
with trio.fail_after(0.5):
|
||||
with trio.fail_after(0.4):
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
# should fall through since ``StopAsyncIteration``
|
||||
|
@ -372,12 +469,15 @@ async def test_callee_closes_ctx_after_stream_open():
|
|||
# a ``trio.EndOfChannel`` by
|
||||
# ``trio.abc.ReceiveChannel.__anext__()``
|
||||
async for _ in stream:
|
||||
# trigger failure if we DO NOT
|
||||
# get an EOC!
|
||||
assert 0
|
||||
else:
|
||||
|
||||
# verify stream is now closed
|
||||
try:
|
||||
await stream.receive()
|
||||
with trio.fail_after(0.3):
|
||||
await stream.receive()
|
||||
except trio.EndOfChannel:
|
||||
pass
|
||||
|
||||
|
@ -397,8 +497,7 @@ async def test_callee_closes_ctx_after_stream_open():
|
|||
|
||||
@tractor.context
|
||||
async def expect_cancelled(
|
||||
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
|
||||
) -> None:
|
||||
global _state
|
||||
|
@ -417,7 +516,7 @@ async def expect_cancelled(
|
|||
raise
|
||||
|
||||
else:
|
||||
assert 0, "Wasn't cancelled!?"
|
||||
assert 0, "callee wasn't cancelled !?"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -427,12 +526,18 @@ async def expect_cancelled(
|
|||
@tractor_test
|
||||
async def test_caller_closes_ctx_after_callee_opens_stream(
|
||||
use_ctx_cancel_method: bool,
|
||||
debug_mode: bool,
|
||||
):
|
||||
'caller context closes without using stream'
|
||||
'''
|
||||
caller context closes without using/opening stream
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
'''
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
|
||||
portal = await n.start_actor(
|
||||
root: Actor = current_actor()
|
||||
portal = await an.start_actor(
|
||||
'ctx_cancelled',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
@ -440,22 +545,37 @@ async def test_caller_closes_ctx_after_callee_opens_stream(
|
|||
async with portal.open_context(
|
||||
expect_cancelled,
|
||||
) as (ctx, sent):
|
||||
await portal.run(assert_state, value=True)
|
||||
|
||||
assert sent is None
|
||||
|
||||
# call cancel explicitly
|
||||
if use_ctx_cancel_method:
|
||||
await portal.run(assert_state, value=True)
|
||||
|
||||
# call `ctx.cancel()` explicitly
|
||||
if use_ctx_cancel_method:
|
||||
await ctx.cancel()
|
||||
|
||||
# NOTE: means the local side `ctx._scope` will
|
||||
# have been cancelled by an ctxc ack and thus
|
||||
# `._scope.cancelled_caught` should be set.
|
||||
try:
|
||||
async with ctx.open_stream() as stream:
|
||||
async for msg in stream:
|
||||
pass
|
||||
|
||||
except tractor.ContextCancelled:
|
||||
raise # XXX: must be propagated to __aexit__
|
||||
except tractor.ContextCancelled as ctxc:
|
||||
# XXX: the cause is US since we call
|
||||
# `Context.cancel()` just above!
|
||||
assert (
|
||||
ctxc.canceller
|
||||
==
|
||||
current_actor().uid
|
||||
==
|
||||
root.uid
|
||||
)
|
||||
|
||||
# XXX: must be propagated to __aexit__
|
||||
# and should be silently absorbed there
|
||||
# since we called `.cancel()` just above ;)
|
||||
raise
|
||||
|
||||
else:
|
||||
assert 0, "Should have context cancelled?"
|
||||
|
@ -464,15 +584,36 @@ async def test_caller_closes_ctx_after_callee_opens_stream(
|
|||
assert portal.channel.connected()
|
||||
|
||||
# ctx is closed here
|
||||
await portal.run(assert_state, value=False)
|
||||
await portal.run(
|
||||
assert_state,
|
||||
value=False,
|
||||
)
|
||||
|
||||
else:
|
||||
try:
|
||||
with trio.fail_after(0.2):
|
||||
await ctx.result()
|
||||
assert 0, "Callee should have blocked!?"
|
||||
|
||||
except trio.TooSlowError:
|
||||
# NO-OP -> since already called above
|
||||
await ctx.cancel()
|
||||
|
||||
# NOTE: local scope should have absorbed the cancellation since
|
||||
# in this case we call `ctx.cancel()` and the local
|
||||
# `._scope` gets `.cancel_called` on the ctxc ack.
|
||||
if use_ctx_cancel_method:
|
||||
assert ctx._scope.cancelled_caught
|
||||
|
||||
# rxed ctxc response from far end
|
||||
assert ctx.cancel_acked
|
||||
assert (
|
||||
ctx._remote_error
|
||||
is ctx._local_error
|
||||
is ctx.maybe_error
|
||||
is ctx.outcome
|
||||
)
|
||||
|
||||
try:
|
||||
async with ctx.open_stream() as stream:
|
||||
async for msg in stream:
|
||||
|
@ -494,11 +635,13 @@ async def test_caller_closes_ctx_after_callee_opens_stream(
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_multitask_caller_cancels_from_nonroot_task():
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
async def test_multitask_caller_cancels_from_nonroot_task(
|
||||
debug_mode: bool,
|
||||
):
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
portal = await an.start_actor(
|
||||
'ctx_cancelled',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
@ -545,25 +688,31 @@ async def test_multitask_caller_cancels_from_nonroot_task():
|
|||
@tractor.context
|
||||
async def cancel_self(
|
||||
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
|
||||
) -> None:
|
||||
global _state
|
||||
_state = True
|
||||
|
||||
# since we call this the below `.open_stream()` should always
|
||||
# error!
|
||||
await ctx.cancel()
|
||||
|
||||
# should inline raise immediately
|
||||
try:
|
||||
async with ctx.open_stream():
|
||||
pass
|
||||
except tractor.ContextCancelled:
|
||||
# except tractor.ContextCancelled:
|
||||
except RuntimeError:
|
||||
# suppress for now so we can do checkpoint tests below
|
||||
pass
|
||||
print('Got expected runtime error for stream-after-cancel')
|
||||
|
||||
else:
|
||||
raise RuntimeError('Context didnt cancel itself?!')
|
||||
|
||||
# check a real ``trio.Cancelled`` is raised on a checkpoint
|
||||
# check that``trio.Cancelled`` is now raised on any further
|
||||
# checkpoints since the self cancel above will have cancelled
|
||||
# the `Context._scope.cancel_scope: trio.CancelScope`
|
||||
try:
|
||||
with trio.fail_after(0.1):
|
||||
await trio.sleep_forever()
|
||||
|
@ -574,17 +723,22 @@ async def cancel_self(
|
|||
# should never get here
|
||||
assert 0
|
||||
|
||||
raise RuntimeError('Context didnt cancel itself?!')
|
||||
|
||||
|
||||
@tractor_test
|
||||
async def test_callee_cancels_before_started():
|
||||
async def test_callee_cancels_before_started(
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
Callee calls `Context.cancel()` while streaming and caller
|
||||
sees stream terminated in `ContextCancelled`.
|
||||
|
||||
'''
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
portal = await an.start_actor(
|
||||
'cancels_self',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
@ -601,7 +755,7 @@ async def test_callee_cancels_before_started():
|
|||
ce.type == trio.Cancelled
|
||||
|
||||
# the traceback should be informative
|
||||
assert 'cancelled itself' in ce.msgdata['tb_str']
|
||||
assert 'itself' in ce.msgdata['tb_str']
|
||||
|
||||
# teardown the actor
|
||||
await portal.cancel_actor()
|
||||
|
@ -610,7 +764,7 @@ async def test_callee_cancels_before_started():
|
|||
@tractor.context
|
||||
async def never_open_stream(
|
||||
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
|
@ -624,8 +778,8 @@ async def never_open_stream(
|
|||
@tractor.context
|
||||
async def keep_sending_from_callee(
|
||||
|
||||
ctx: tractor.Context,
|
||||
msg_buffer_size: Optional[int] = None,
|
||||
ctx: Context,
|
||||
msg_buffer_size: int|None = None,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
|
@ -650,7 +804,10 @@ async def keep_sending_from_callee(
|
|||
],
|
||||
ids='overrun_condition={}'.format,
|
||||
)
|
||||
def test_one_end_stream_not_opened(overrun_by):
|
||||
def test_one_end_stream_not_opened(
|
||||
overrun_by: tuple[str, int, Callable],
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
This should exemplify the bug from:
|
||||
https://github.com/goodboy/tractor/issues/265
|
||||
|
@ -661,8 +818,10 @@ def test_one_end_stream_not_opened(overrun_by):
|
|||
buf_size = buf_size_increase + Actor.msg_buffer_size
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
portal = await n.start_actor(
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
portal = await an.start_actor(
|
||||
entrypoint.__name__,
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
@ -719,7 +878,7 @@ def test_one_end_stream_not_opened(overrun_by):
|
|||
@tractor.context
|
||||
async def echo_back_sequence(
|
||||
|
||||
ctx: tractor.Context,
|
||||
ctx: Context,
|
||||
seq: list[int],
|
||||
wait_for_cancel: bool,
|
||||
allow_overruns_side: str,
|
||||
|
@ -736,7 +895,10 @@ async def echo_back_sequence(
|
|||
# NOTE: ensure that if the caller is expecting to cancel this task
|
||||
# that we stay echoing much longer then they are so we don't
|
||||
# return early instead of receive the cancel msg.
|
||||
total_batches: int = 1000 if wait_for_cancel else 6
|
||||
total_batches: int = (
|
||||
1000 if wait_for_cancel
|
||||
else 6
|
||||
)
|
||||
|
||||
await ctx.started()
|
||||
# await tractor.breakpoint()
|
||||
|
@ -755,8 +917,23 @@ async def echo_back_sequence(
|
|||
)
|
||||
|
||||
seq = list(seq) # bleh, msgpack sometimes ain't decoded right
|
||||
for _ in range(total_batches):
|
||||
for i in range(total_batches):
|
||||
print(f'starting new stream batch {i} iter in child')
|
||||
batch = []
|
||||
|
||||
# EoC case, delay a little instead of hot
|
||||
# iter-stopping (since apparently py3.11+ can do that
|
||||
# faster then a ctxc can be sent) on the async for
|
||||
# loop when child was requested to ctxc.
|
||||
if (
|
||||
stream.closed
|
||||
or
|
||||
ctx.cancel_called
|
||||
):
|
||||
print('child stream already closed!?!')
|
||||
await trio.sleep(0.05)
|
||||
continue
|
||||
|
||||
async for msg in stream:
|
||||
batch.append(msg)
|
||||
if batch == seq:
|
||||
|
@ -767,15 +944,18 @@ async def echo_back_sequence(
|
|||
|
||||
print('callee waiting on next')
|
||||
|
||||
print(f'callee echoing back latest batch\n{batch}')
|
||||
for msg in batch:
|
||||
print(f'callee sending {msg}')
|
||||
print(f'callee sending msg\n{msg}')
|
||||
await stream.send(msg)
|
||||
|
||||
print(
|
||||
'EXITING CALLEEE:\n'
|
||||
f'{ctx.cancel_called_remote}'
|
||||
)
|
||||
return 'yo'
|
||||
try:
|
||||
return 'yo'
|
||||
finally:
|
||||
print(
|
||||
'exiting callee with context:\n'
|
||||
f'{pformat(ctx)}\n'
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -802,6 +982,7 @@ def test_maybe_allow_overruns_stream(
|
|||
slow_side: str,
|
||||
allow_overruns_side: str,
|
||||
loglevel: str,
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
Demonstrate small overruns of each task back and forth
|
||||
|
@ -820,13 +1001,14 @@ def test_maybe_allow_overruns_stream(
|
|||
|
||||
'''
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
portal = await n.start_actor(
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
portal = await an.start_actor(
|
||||
'callee_sends_forever',
|
||||
enable_modules=[__name__],
|
||||
loglevel=loglevel,
|
||||
|
||||
# debug_mode=True,
|
||||
debug_mode=debug_mode,
|
||||
)
|
||||
seq = list(range(10))
|
||||
async with portal.open_context(
|
||||
|
@ -835,8 +1017,8 @@ def test_maybe_allow_overruns_stream(
|
|||
wait_for_cancel=cancel_ctx,
|
||||
be_slow=(slow_side == 'child'),
|
||||
allow_overruns_side=allow_overruns_side,
|
||||
) as (ctx, sent):
|
||||
|
||||
) as (ctx, sent):
|
||||
assert sent is None
|
||||
|
||||
async with ctx.open_stream(
|
||||
|
@ -864,14 +1046,14 @@ def test_maybe_allow_overruns_stream(
|
|||
|
||||
if cancel_ctx:
|
||||
# cancel the remote task
|
||||
print('sending root side cancel')
|
||||
print('Requesting `ctx.cancel()` in parent!')
|
||||
await ctx.cancel()
|
||||
|
||||
res = await ctx.result()
|
||||
res: str|ContextCancelled = await ctx.result()
|
||||
|
||||
if cancel_ctx:
|
||||
assert isinstance(res, ContextCancelled)
|
||||
assert tuple(res.canceller) == tractor.current_actor().uid
|
||||
assert tuple(res.canceller) == current_actor().uid
|
||||
|
||||
else:
|
||||
print(f'RX ROOT SIDE RESULT {res}')
|
||||
|
@ -922,93 +1104,3 @@ def test_maybe_allow_overruns_stream(
|
|||
# if this hits the logic blocks from above are not
|
||||
# exhaustive..
|
||||
pytest.fail('PARAMETRIZED CASE GEN PROBLEM YO')
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def sleep_forever(
|
||||
ctx: tractor.Context,
|
||||
) -> None:
|
||||
await ctx.started()
|
||||
async with ctx.open_stream():
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
@acm
|
||||
async def attach_to_sleep_forever():
|
||||
'''
|
||||
Cancel a context **before** any underlying error is raised in order
|
||||
to trigger a local reception of a ``ContextCancelled`` which **should not**
|
||||
be re-raised in the local surrounding ``Context`` *iff* the cancel was
|
||||
requested by **this** side of the context.
|
||||
|
||||
'''
|
||||
async with tractor.wait_for_actor('sleeper') as p2:
|
||||
async with (
|
||||
p2.open_context(sleep_forever) as (peer_ctx, first),
|
||||
peer_ctx.open_stream(),
|
||||
):
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# XXX: previously this would trigger local
|
||||
# ``ContextCancelled`` to be received and raised in the
|
||||
# local context overriding any local error due to logic
|
||||
# inside ``_invoke()`` which checked for an error set on
|
||||
# ``Context._error`` and raised it in a cancellation
|
||||
# scenario.
|
||||
# ------
|
||||
# The problem is you can have a remote cancellation that
|
||||
# is part of a local error and we shouldn't raise
|
||||
# ``ContextCancelled`` **iff** we **were not** the side
|
||||
# of the context to initiate it, i.e.
|
||||
# ``Context._cancel_called`` should **NOT** have been
|
||||
# set. The special logic to handle this case is now
|
||||
# inside ``Context._maybe_raise_from_remote_msg()`` XD
|
||||
await peer_ctx.cancel()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def error_before_started(
|
||||
ctx: tractor.Context,
|
||||
) -> None:
|
||||
'''
|
||||
This simulates exactly an original bug discovered in:
|
||||
https://github.com/pikers/piker/issues/244
|
||||
|
||||
'''
|
||||
async with attach_to_sleep_forever():
|
||||
# send an unserializable type which should raise a type error
|
||||
# here and **NOT BE SWALLOWED** by the surrounding acm!!?!
|
||||
await ctx.started(object())
|
||||
|
||||
|
||||
def test_do_not_swallow_error_before_started_by_remote_contextcancelled():
|
||||
'''
|
||||
Verify that an error raised in a remote context which itself opens
|
||||
another remote context, which it cancels, does not ovverride the
|
||||
original error that caused the cancellation of the secondardy
|
||||
context.
|
||||
|
||||
'''
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
portal = await n.start_actor(
|
||||
'errorer',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
await n.start_actor(
|
||||
'sleeper',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async with (
|
||||
portal.open_context(
|
||||
error_before_started
|
||||
) as (ctx, sent),
|
||||
):
|
||||
await trio.sleep_forever()
|
||||
|
||||
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
||||
trio.run(main)
|
||||
|
||||
assert excinfo.value.type == TypeError
|
||||
|
|
|
@ -10,12 +10,13 @@ TODO:
|
|||
- wonder if any of it'll work on OS X?
|
||||
|
||||
"""
|
||||
from functools import partial
|
||||
import itertools
|
||||
from os import path
|
||||
# from os import path
|
||||
from typing import Optional
|
||||
import platform
|
||||
import pathlib
|
||||
import sys
|
||||
# import sys
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
@ -25,6 +26,10 @@ from pexpect.exceptions import (
|
|||
EOF,
|
||||
)
|
||||
|
||||
from tractor.devx._debug import (
|
||||
_pause_msg,
|
||||
_crash_msg,
|
||||
)
|
||||
from conftest import (
|
||||
examples_dir,
|
||||
_ci_env,
|
||||
|
@ -78,7 +83,7 @@ has_nested_actors = pytest.mark.has_nested_actors
|
|||
def spawn(
|
||||
start_method,
|
||||
testdir,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
) -> 'pexpect.spawn':
|
||||
|
||||
if start_method != 'trio':
|
||||
|
@ -123,20 +128,52 @@ def expect(
|
|||
raise
|
||||
|
||||
|
||||
def in_prompt_msg(
|
||||
prompt: str,
|
||||
parts: list[str],
|
||||
|
||||
pause_on_false: bool = False,
|
||||
print_prompt_on_false: bool = True,
|
||||
|
||||
) -> bool:
|
||||
'''
|
||||
Predicate check if (the prompt's) std-streams output has all
|
||||
`str`-parts in it.
|
||||
|
||||
Can be used in test asserts for bulk matching expected
|
||||
log/REPL output for a given `pdb` interact point.
|
||||
|
||||
'''
|
||||
for part in parts:
|
||||
if part not in prompt:
|
||||
|
||||
if pause_on_false:
|
||||
import pdbp
|
||||
pdbp.set_trace()
|
||||
|
||||
if print_prompt_on_false:
|
||||
print(prompt)
|
||||
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def assert_before(
|
||||
child,
|
||||
patts: list[str],
|
||||
|
||||
**kwargs,
|
||||
|
||||
) -> None:
|
||||
|
||||
before = str(child.before.decode())
|
||||
# as in before the prompt end
|
||||
before: str = str(child.before.decode())
|
||||
assert in_prompt_msg(
|
||||
prompt=before,
|
||||
parts=patts,
|
||||
|
||||
for patt in patts:
|
||||
try:
|
||||
assert patt in before
|
||||
except AssertionError:
|
||||
print(before)
|
||||
raise
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
|
@ -166,7 +203,7 @@ def ctlc(
|
|||
# XXX: disable pygments highlighting for auto-tests
|
||||
# since some envs (like actions CI) will struggle
|
||||
# the the added color-char encoding..
|
||||
from tractor._debug import TractorConfig
|
||||
from tractor.devx._debug import TractorConfig
|
||||
TractorConfig.use_pygements = False
|
||||
|
||||
yield use_ctlc
|
||||
|
@ -195,7 +232,10 @@ def test_root_actor_error(spawn, user_in_out):
|
|||
before = str(child.before.decode())
|
||||
|
||||
# make sure expected logging and error arrives
|
||||
assert "Attaching to pdb in crashed actor: ('root'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_crash_msg, "('root'"]
|
||||
)
|
||||
assert 'AssertionError' in before
|
||||
|
||||
# send user command
|
||||
|
@ -332,7 +372,10 @@ def test_subactor_error(
|
|||
child.expect(PROMPT)
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching to pdb in crashed actor: ('name_error'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_crash_msg, "('name_error'"]
|
||||
)
|
||||
|
||||
if do_next:
|
||||
child.sendline('n')
|
||||
|
@ -353,9 +396,15 @@ def test_subactor_error(
|
|||
before = str(child.before.decode())
|
||||
|
||||
# root actor gets debugger engaged
|
||||
assert "Attaching to pdb in crashed actor: ('root'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_crash_msg, "('root'"]
|
||||
)
|
||||
# error is a remote error propagated from the subactor
|
||||
assert "RemoteActorError: ('name_error'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_crash_msg, "('name_error'"]
|
||||
)
|
||||
|
||||
# another round
|
||||
if ctlc:
|
||||
|
@ -380,7 +429,10 @@ def test_subactor_breakpoint(
|
|||
child.expect(PROMPT)
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_pause_msg, "('breakpoint_forever'"]
|
||||
)
|
||||
|
||||
# do some "next" commands to demonstrate recurrent breakpoint
|
||||
# entries
|
||||
|
@ -396,7 +448,10 @@ def test_subactor_breakpoint(
|
|||
child.sendline('continue')
|
||||
child.expect(PROMPT)
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_pause_msg, "('breakpoint_forever'"]
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(child)
|
||||
|
@ -441,7 +496,10 @@ def test_multi_subactors(
|
|||
child.expect(PROMPT)
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_pause_msg, "('breakpoint_forever'"]
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(child)
|
||||
|
@ -461,7 +519,10 @@ def test_multi_subactors(
|
|||
# first name_error failure
|
||||
child.expect(PROMPT)
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching to pdb in crashed actor: ('name_error'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_crash_msg, "('name_error'"]
|
||||
)
|
||||
assert "NameError" in before
|
||||
|
||||
if ctlc:
|
||||
|
@ -487,7 +548,10 @@ def test_multi_subactors(
|
|||
child.sendline('c')
|
||||
child.expect(PROMPT)
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_pause_msg, "('breakpoint_forever'"]
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(child)
|
||||
|
@ -527,17 +591,21 @@ def test_multi_subactors(
|
|||
child.expect(PROMPT)
|
||||
before = str(child.before.decode())
|
||||
|
||||
assert_before(child, [
|
||||
# debugger attaches to root
|
||||
"Attaching to pdb in crashed actor: ('root'",
|
||||
assert_before(
|
||||
child, [
|
||||
# debugger attaches to root
|
||||
# "Attaching to pdb in crashed actor: ('root'",
|
||||
_crash_msg,
|
||||
"('root'",
|
||||
|
||||
# expect a multierror with exceptions for each sub-actor
|
||||
"RemoteActorError: ('breakpoint_forever'",
|
||||
"RemoteActorError: ('name_error'",
|
||||
"RemoteActorError: ('spawn_error'",
|
||||
"RemoteActorError: ('name_error_1'",
|
||||
'bdb.BdbQuit',
|
||||
])
|
||||
# expect a multierror with exceptions for each sub-actor
|
||||
"RemoteActorError: ('breakpoint_forever'",
|
||||
"RemoteActorError: ('name_error'",
|
||||
"RemoteActorError: ('spawn_error'",
|
||||
"RemoteActorError: ('name_error_1'",
|
||||
'bdb.BdbQuit',
|
||||
]
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(child)
|
||||
|
@ -574,15 +642,22 @@ def test_multi_daemon_subactors(
|
|||
# the root's tty lock first so anticipate either crash
|
||||
# message on the first entry.
|
||||
|
||||
bp_forever_msg = "Attaching pdb to actor: ('bp_forever'"
|
||||
bp_forev_parts = [_pause_msg, "('bp_forever'"]
|
||||
bp_forev_in_msg = partial(
|
||||
in_prompt_msg,
|
||||
parts=bp_forev_parts,
|
||||
)
|
||||
|
||||
name_error_msg = "NameError: name 'doggypants' is not defined"
|
||||
name_error_parts = [name_error_msg]
|
||||
|
||||
before = str(child.before.decode())
|
||||
if bp_forever_msg in before:
|
||||
next_msg = name_error_msg
|
||||
|
||||
if bp_forev_in_msg(prompt=before):
|
||||
next_parts = name_error_parts
|
||||
|
||||
elif name_error_msg in before:
|
||||
next_msg = bp_forever_msg
|
||||
next_parts = bp_forev_parts
|
||||
|
||||
else:
|
||||
raise ValueError("Neither log msg was found !?")
|
||||
|
@ -599,7 +674,10 @@ def test_multi_daemon_subactors(
|
|||
|
||||
child.sendline('c')
|
||||
child.expect(PROMPT)
|
||||
assert_before(child, [next_msg])
|
||||
assert_before(
|
||||
child,
|
||||
next_parts,
|
||||
)
|
||||
|
||||
# XXX: hooray the root clobbering the child here was fixed!
|
||||
# IMO, this demonstrates the true power of SC system design.
|
||||
|
@ -607,7 +685,7 @@ def test_multi_daemon_subactors(
|
|||
# now the root actor won't clobber the bp_forever child
|
||||
# during it's first access to the debug lock, but will instead
|
||||
# wait for the lock to release, by the edge triggered
|
||||
# ``_debug.Lock.no_remote_has_tty`` event before sending cancel messages
|
||||
# ``devx._debug.Lock.no_remote_has_tty`` event before sending cancel messages
|
||||
# (via portals) to its underlings B)
|
||||
|
||||
# at some point here there should have been some warning msg from
|
||||
|
@ -623,9 +701,15 @@ def test_multi_daemon_subactors(
|
|||
child.expect(PROMPT)
|
||||
|
||||
try:
|
||||
assert_before(child, [bp_forever_msg])
|
||||
assert_before(
|
||||
child,
|
||||
bp_forev_parts,
|
||||
)
|
||||
except AssertionError:
|
||||
assert_before(child, [name_error_msg])
|
||||
assert_before(
|
||||
child,
|
||||
name_error_parts,
|
||||
)
|
||||
|
||||
else:
|
||||
if ctlc:
|
||||
|
@ -637,7 +721,10 @@ def test_multi_daemon_subactors(
|
|||
|
||||
child.sendline('c')
|
||||
child.expect(PROMPT)
|
||||
assert_before(child, [name_error_msg])
|
||||
assert_before(
|
||||
child,
|
||||
name_error_parts,
|
||||
)
|
||||
|
||||
# wait for final error in root
|
||||
# where it crashs with boxed error
|
||||
|
@ -647,7 +734,7 @@ def test_multi_daemon_subactors(
|
|||
child.expect(PROMPT)
|
||||
assert_before(
|
||||
child,
|
||||
[bp_forever_msg]
|
||||
bp_forev_parts
|
||||
)
|
||||
except AssertionError:
|
||||
break
|
||||
|
@ -656,7 +743,9 @@ def test_multi_daemon_subactors(
|
|||
child,
|
||||
[
|
||||
# boxed error raised in root task
|
||||
"Attaching to pdb in crashed actor: ('root'",
|
||||
# "Attaching to pdb in crashed actor: ('root'",
|
||||
_crash_msg,
|
||||
"('root'",
|
||||
"_exceptions.RemoteActorError: ('name_error'",
|
||||
]
|
||||
)
|
||||
|
@ -770,7 +859,7 @@ def test_multi_nested_subactors_error_through_nurseries(
|
|||
|
||||
child = spawn('multi_nested_subactors_error_up_through_nurseries')
|
||||
|
||||
timed_out_early: bool = False
|
||||
# timed_out_early: bool = False
|
||||
|
||||
for send_char in itertools.cycle(['c', 'q']):
|
||||
try:
|
||||
|
@ -871,11 +960,14 @@ def test_root_nursery_cancels_before_child_releases_tty_lock(
|
|||
|
||||
if not timed_out_early:
|
||||
before = str(child.before.decode())
|
||||
assert_before(child, [
|
||||
"tractor._exceptions.RemoteActorError: ('spawner0'",
|
||||
"tractor._exceptions.RemoteActorError: ('name_error'",
|
||||
"NameError: name 'doggypants' is not defined",
|
||||
])
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
"tractor._exceptions.RemoteActorError: ('spawner0'",
|
||||
"tractor._exceptions.RemoteActorError: ('name_error'",
|
||||
"NameError: name 'doggypants' is not defined",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def test_root_cancels_child_context_during_startup(
|
||||
|
@ -909,8 +1001,10 @@ def test_different_debug_mode_per_actor(
|
|||
|
||||
# only one actor should enter the debugger
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching to pdb in crashed actor: ('debugged_boi'" in before
|
||||
assert "RuntimeError" in before
|
||||
assert in_prompt_msg(
|
||||
before,
|
||||
[_crash_msg, "('debugged_boi'", "RuntimeError"],
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(child)
|
||||
|
|
|
@ -15,19 +15,19 @@ from conftest import tractor_test
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_reg_then_unreg(arb_addr):
|
||||
async def test_reg_then_unreg(reg_addr):
|
||||
actor = tractor.current_actor()
|
||||
assert actor.is_arbiter
|
||||
assert len(actor._registry) == 1 # only self is registered
|
||||
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
) as n:
|
||||
|
||||
portal = await n.start_actor('actor', enable_modules=[__name__])
|
||||
uid = portal.channel.uid
|
||||
|
||||
async with tractor.get_arbiter(*arb_addr) as aportal:
|
||||
async with tractor.get_arbiter(*reg_addr) as aportal:
|
||||
# this local actor should be the arbiter
|
||||
assert actor is aportal.actor
|
||||
|
||||
|
@ -53,15 +53,27 @@ async def hi():
|
|||
return the_line.format(tractor.current_actor().name)
|
||||
|
||||
|
||||
async def say_hello(other_actor):
|
||||
async def say_hello(
|
||||
other_actor: str,
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
await trio.sleep(1) # wait for other actor to spawn
|
||||
async with tractor.find_actor(other_actor) as portal:
|
||||
async with tractor.find_actor(
|
||||
other_actor,
|
||||
registry_addrs=[reg_addr],
|
||||
) as portal:
|
||||
assert portal is not None
|
||||
return await portal.run(__name__, 'hi')
|
||||
|
||||
|
||||
async def say_hello_use_wait(other_actor):
|
||||
async with tractor.wait_for_actor(other_actor) as portal:
|
||||
async def say_hello_use_wait(
|
||||
other_actor: str,
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
async with tractor.wait_for_actor(
|
||||
other_actor,
|
||||
registry_addr=reg_addr,
|
||||
) as portal:
|
||||
assert portal is not None
|
||||
result = await portal.run(__name__, 'hi')
|
||||
return result
|
||||
|
@ -69,21 +81,29 @@ async def say_hello_use_wait(other_actor):
|
|||
|
||||
@tractor_test
|
||||
@pytest.mark.parametrize('func', [say_hello, say_hello_use_wait])
|
||||
async def test_trynamic_trio(func, start_method, arb_addr):
|
||||
"""Main tractor entry point, the "master" process (for now
|
||||
acts as the "director").
|
||||
"""
|
||||
async def test_trynamic_trio(
|
||||
func,
|
||||
start_method,
|
||||
reg_addr,
|
||||
):
|
||||
'''
|
||||
Root actor acting as the "director" and running one-shot-task-actors
|
||||
for the directed subs.
|
||||
|
||||
'''
|
||||
async with tractor.open_nursery() as n:
|
||||
print("Alright... Action!")
|
||||
|
||||
donny = await n.run_in_actor(
|
||||
func,
|
||||
other_actor='gretchen',
|
||||
reg_addr=reg_addr,
|
||||
name='donny',
|
||||
)
|
||||
gretchen = await n.run_in_actor(
|
||||
func,
|
||||
other_actor='donny',
|
||||
reg_addr=reg_addr,
|
||||
name='gretchen',
|
||||
)
|
||||
print(await gretchen.result())
|
||||
|
@ -131,7 +151,7 @@ async def unpack_reg(actor_or_portal):
|
|||
|
||||
|
||||
async def spawn_and_check_registry(
|
||||
arb_addr: tuple,
|
||||
reg_addr: tuple,
|
||||
use_signal: bool,
|
||||
remote_arbiter: bool = False,
|
||||
with_streaming: bool = False,
|
||||
|
@ -139,9 +159,9 @@ async def spawn_and_check_registry(
|
|||
) -> None:
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
):
|
||||
async with tractor.get_arbiter(*arb_addr) as portal:
|
||||
async with tractor.get_arbiter(*reg_addr) as portal:
|
||||
# runtime needs to be up to call this
|
||||
actor = tractor.current_actor()
|
||||
|
||||
|
@ -213,17 +233,19 @@ async def spawn_and_check_registry(
|
|||
def test_subactors_unregister_on_cancel(
|
||||
start_method,
|
||||
use_signal,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
with_streaming,
|
||||
):
|
||||
"""Verify that cancelling a nursery results in all subactors
|
||||
'''
|
||||
Verify that cancelling a nursery results in all subactors
|
||||
deregistering themselves with the arbiter.
|
||||
"""
|
||||
|
||||
'''
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
trio.run(
|
||||
partial(
|
||||
spawn_and_check_registry,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
use_signal,
|
||||
remote_arbiter=False,
|
||||
with_streaming=with_streaming,
|
||||
|
@ -237,7 +259,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
|
|||
daemon,
|
||||
start_method,
|
||||
use_signal,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
with_streaming,
|
||||
):
|
||||
"""Verify that cancelling a nursery results in all subactors
|
||||
|
@ -248,7 +270,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
|
|||
trio.run(
|
||||
partial(
|
||||
spawn_and_check_registry,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
use_signal,
|
||||
remote_arbiter=True,
|
||||
with_streaming=with_streaming,
|
||||
|
@ -262,7 +284,7 @@ async def streamer(agen):
|
|||
|
||||
|
||||
async def close_chans_before_nursery(
|
||||
arb_addr: tuple,
|
||||
reg_addr: tuple,
|
||||
use_signal: bool,
|
||||
remote_arbiter: bool = False,
|
||||
) -> None:
|
||||
|
@ -275,9 +297,9 @@ async def close_chans_before_nursery(
|
|||
entries_at_end = 1
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
):
|
||||
async with tractor.get_arbiter(*arb_addr) as aportal:
|
||||
async with tractor.get_arbiter(*reg_addr) as aportal:
|
||||
try:
|
||||
get_reg = partial(unpack_reg, aportal)
|
||||
|
||||
|
@ -329,7 +351,7 @@ async def close_chans_before_nursery(
|
|||
def test_close_channel_explicit(
|
||||
start_method,
|
||||
use_signal,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
):
|
||||
"""Verify that closing a stream explicitly and killing the actor's
|
||||
"root nursery" **before** the containing nursery tears down also
|
||||
|
@ -339,7 +361,7 @@ def test_close_channel_explicit(
|
|||
trio.run(
|
||||
partial(
|
||||
close_chans_before_nursery,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
use_signal,
|
||||
remote_arbiter=False,
|
||||
),
|
||||
|
@ -351,7 +373,7 @@ def test_close_channel_explicit_remote_arbiter(
|
|||
daemon,
|
||||
start_method,
|
||||
use_signal,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
):
|
||||
"""Verify that closing a stream explicitly and killing the actor's
|
||||
"root nursery" **before** the containing nursery tears down also
|
||||
|
@ -361,7 +383,7 @@ def test_close_channel_explicit_remote_arbiter(
|
|||
trio.run(
|
||||
partial(
|
||||
close_chans_before_nursery,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
use_signal,
|
||||
remote_arbiter=True,
|
||||
),
|
||||
|
|
|
@ -21,7 +21,7 @@ from conftest import (
|
|||
def run_example_in_subproc(
|
||||
loglevel: str,
|
||||
testdir,
|
||||
arb_addr: tuple[str, int],
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
|
||||
@contextmanager
|
||||
|
|
|
@ -47,7 +47,7 @@ async def trio_cancels_single_aio_task():
|
|||
await tractor.to_asyncio.run_task(sleep_forever)
|
||||
|
||||
|
||||
def test_trio_cancels_aio_on_actor_side(arb_addr):
|
||||
def test_trio_cancels_aio_on_actor_side(reg_addr):
|
||||
'''
|
||||
Spawn an infected actor that is cancelled by the ``trio`` side
|
||||
task using std cancel scope apis.
|
||||
|
@ -55,7 +55,7 @@ def test_trio_cancels_aio_on_actor_side(arb_addr):
|
|||
'''
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr
|
||||
registry_addrs=[reg_addr]
|
||||
) as n:
|
||||
await n.run_in_actor(
|
||||
trio_cancels_single_aio_task,
|
||||
|
@ -94,7 +94,7 @@ async def asyncio_actor(
|
|||
raise
|
||||
|
||||
|
||||
def test_aio_simple_error(arb_addr):
|
||||
def test_aio_simple_error(reg_addr):
|
||||
'''
|
||||
Verify a simple remote asyncio error propagates back through trio
|
||||
to the parent actor.
|
||||
|
@ -103,7 +103,7 @@ def test_aio_simple_error(arb_addr):
|
|||
'''
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr
|
||||
registry_addrs=[reg_addr]
|
||||
) as n:
|
||||
await n.run_in_actor(
|
||||
asyncio_actor,
|
||||
|
@ -120,7 +120,7 @@ def test_aio_simple_error(arb_addr):
|
|||
assert err.type == AssertionError
|
||||
|
||||
|
||||
def test_tractor_cancels_aio(arb_addr):
|
||||
def test_tractor_cancels_aio(reg_addr):
|
||||
'''
|
||||
Verify we can cancel a spawned asyncio task gracefully.
|
||||
|
||||
|
@ -139,7 +139,7 @@ def test_tractor_cancels_aio(arb_addr):
|
|||
trio.run(main)
|
||||
|
||||
|
||||
def test_trio_cancels_aio(arb_addr):
|
||||
def test_trio_cancels_aio(reg_addr):
|
||||
'''
|
||||
Much like the above test with ``tractor.Portal.cancel_actor()``
|
||||
except we just use a standard ``trio`` cancellation api.
|
||||
|
@ -194,7 +194,7 @@ async def trio_ctx(
|
|||
ids='parent_actor_cancels_child={}'.format
|
||||
)
|
||||
def test_context_spawns_aio_task_that_errors(
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
parent_cancels: bool,
|
||||
):
|
||||
'''
|
||||
|
@ -225,7 +225,7 @@ def test_context_spawns_aio_task_that_errors(
|
|||
|
||||
await trio.sleep_forever()
|
||||
|
||||
return await ctx.result()
|
||||
return await ctx.result()
|
||||
|
||||
if parent_cancels:
|
||||
# bc the parent made the cancel request,
|
||||
|
@ -258,7 +258,7 @@ async def aio_cancel():
|
|||
await sleep_forever()
|
||||
|
||||
|
||||
def test_aio_cancelled_from_aio_causes_trio_cancelled(arb_addr):
|
||||
def test_aio_cancelled_from_aio_causes_trio_cancelled(reg_addr):
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
|
@ -395,7 +395,7 @@ async def stream_from_aio(
|
|||
'fan_out', [False, True],
|
||||
ids='fan_out_w_chan_subscribe={}'.format
|
||||
)
|
||||
def test_basic_interloop_channel_stream(arb_addr, fan_out):
|
||||
def test_basic_interloop_channel_stream(reg_addr, fan_out):
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
portal = await n.run_in_actor(
|
||||
|
@ -409,7 +409,7 @@ def test_basic_interloop_channel_stream(arb_addr, fan_out):
|
|||
|
||||
|
||||
# TODO: parametrize the above test and avoid the duplication here?
|
||||
def test_trio_error_cancels_intertask_chan(arb_addr):
|
||||
def test_trio_error_cancels_intertask_chan(reg_addr):
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
portal = await n.run_in_actor(
|
||||
|
@ -428,7 +428,7 @@ def test_trio_error_cancels_intertask_chan(arb_addr):
|
|||
assert exc.type == Exception
|
||||
|
||||
|
||||
def test_trio_closes_early_and_channel_exits(arb_addr):
|
||||
def test_trio_closes_early_and_channel_exits(reg_addr):
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
portal = await n.run_in_actor(
|
||||
|
@ -443,7 +443,7 @@ def test_trio_closes_early_and_channel_exits(arb_addr):
|
|||
trio.run(main)
|
||||
|
||||
|
||||
def test_aio_errors_and_channel_propagates_and_closes(arb_addr):
|
||||
def test_aio_errors_and_channel_propagates_and_closes(reg_addr):
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
portal = await n.run_in_actor(
|
||||
|
@ -520,7 +520,7 @@ async def trio_to_aio_echo_server(
|
|||
ids='raise_error={}'.format,
|
||||
)
|
||||
def test_echoserver_detailed_mechanics(
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
raise_error_mid_stream,
|
||||
):
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -55,7 +55,7 @@ async def context_stream(
|
|||
|
||||
|
||||
async def stream_from_single_subactor(
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
stream_func,
|
||||
):
|
||||
|
@ -64,7 +64,7 @@ async def stream_from_single_subactor(
|
|||
# only one per host address, spawns an actor if None
|
||||
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
start_method=start_method,
|
||||
) as nursery:
|
||||
|
||||
|
@ -115,13 +115,13 @@ async def stream_from_single_subactor(
|
|||
@pytest.mark.parametrize(
|
||||
'stream_func', [async_gen_stream, context_stream]
|
||||
)
|
||||
def test_stream_from_single_subactor(arb_addr, start_method, stream_func):
|
||||
def test_stream_from_single_subactor(reg_addr, start_method, stream_func):
|
||||
"""Verify streaming from a spawned async generator.
|
||||
"""
|
||||
trio.run(
|
||||
partial(
|
||||
stream_from_single_subactor,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
stream_func=stream_func,
|
||||
),
|
||||
|
@ -225,14 +225,14 @@ async def a_quadruple_example():
|
|||
return result_stream
|
||||
|
||||
|
||||
async def cancel_after(wait, arb_addr):
|
||||
async with tractor.open_root_actor(arbiter_addr=arb_addr):
|
||||
async def cancel_after(wait, reg_addr):
|
||||
async with tractor.open_root_actor(registry_addrs=[reg_addr]):
|
||||
with trio.move_on_after(wait):
|
||||
return await a_quadruple_example()
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def time_quad_ex(arb_addr, ci_env, spawn_backend):
|
||||
def time_quad_ex(reg_addr, ci_env, spawn_backend):
|
||||
if spawn_backend == 'mp':
|
||||
"""no idea but the mp *nix runs are flaking out here often...
|
||||
"""
|
||||
|
@ -240,7 +240,7 @@ def time_quad_ex(arb_addr, ci_env, spawn_backend):
|
|||
|
||||
timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4
|
||||
start = time.time()
|
||||
results = trio.run(cancel_after, timeout, arb_addr)
|
||||
results = trio.run(cancel_after, timeout, reg_addr)
|
||||
diff = time.time() - start
|
||||
assert results
|
||||
return results, diff
|
||||
|
@ -260,14 +260,14 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend):
|
|||
list(map(lambda i: i/10, range(3, 9)))
|
||||
)
|
||||
def test_not_fast_enough_quad(
|
||||
arb_addr, time_quad_ex, cancel_delay, ci_env, spawn_backend
|
||||
reg_addr, time_quad_ex, cancel_delay, ci_env, spawn_backend
|
||||
):
|
||||
"""Verify we can cancel midway through the quad example and all actors
|
||||
cancel gracefully.
|
||||
"""
|
||||
results, diff = time_quad_ex
|
||||
delay = max(diff - cancel_delay, 0)
|
||||
results = trio.run(cancel_after, delay, arb_addr)
|
||||
results = trio.run(cancel_after, delay, reg_addr)
|
||||
system = platform.system()
|
||||
if system in ('Windows', 'Darwin') and results is not None:
|
||||
# In CI envoirments it seems later runs are quicker then the first
|
||||
|
@ -280,7 +280,7 @@ def test_not_fast_enough_quad(
|
|||
|
||||
@tractor_test
|
||||
async def test_respawn_consumer_task(
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
spawn_backend,
|
||||
loglevel,
|
||||
):
|
||||
|
|
|
@ -24,7 +24,7 @@ async def test_no_runtime():
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_self_is_registered(arb_addr):
|
||||
async def test_self_is_registered(reg_addr):
|
||||
"Verify waiting on the arbiter to register itself using the standard api."
|
||||
actor = tractor.current_actor()
|
||||
assert actor.is_arbiter
|
||||
|
@ -34,20 +34,20 @@ async def test_self_is_registered(arb_addr):
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_self_is_registered_localportal(arb_addr):
|
||||
async def test_self_is_registered_localportal(reg_addr):
|
||||
"Verify waiting on the arbiter to register itself using a local portal."
|
||||
actor = tractor.current_actor()
|
||||
assert actor.is_arbiter
|
||||
async with tractor.get_arbiter(*arb_addr) as portal:
|
||||
async with tractor.get_arbiter(*reg_addr) as portal:
|
||||
assert isinstance(portal, tractor._portal.LocalPortal)
|
||||
|
||||
with trio.fail_after(0.2):
|
||||
sockaddr = await portal.run_from_ns(
|
||||
'self', 'wait_for_actor', name='root')
|
||||
assert sockaddr[0] == arb_addr
|
||||
assert sockaddr[0] == reg_addr
|
||||
|
||||
|
||||
def test_local_actor_async_func(arb_addr):
|
||||
def test_local_actor_async_func(reg_addr):
|
||||
"""Verify a simple async function in-process.
|
||||
"""
|
||||
nums = []
|
||||
|
@ -55,7 +55,7 @@ def test_local_actor_async_func(arb_addr):
|
|||
async def print_loop():
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
):
|
||||
# arbiter is started in-proc if dne
|
||||
assert tractor.current_actor().is_arbiter
|
||||
|
|
|
@ -28,9 +28,9 @@ def test_abort_on_sigint(daemon):
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_cancel_remote_arbiter(daemon, arb_addr):
|
||||
async def test_cancel_remote_arbiter(daemon, reg_addr):
|
||||
assert not tractor.current_actor().is_arbiter
|
||||
async with tractor.get_arbiter(*arb_addr) as portal:
|
||||
async with tractor.get_arbiter(*reg_addr) as portal:
|
||||
await portal.cancel_actor()
|
||||
|
||||
time.sleep(0.1)
|
||||
|
@ -39,16 +39,16 @@ async def test_cancel_remote_arbiter(daemon, arb_addr):
|
|||
|
||||
# no arbiter socket should exist
|
||||
with pytest.raises(OSError):
|
||||
async with tractor.get_arbiter(*arb_addr) as portal:
|
||||
async with tractor.get_arbiter(*reg_addr) as portal:
|
||||
pass
|
||||
|
||||
|
||||
def test_register_duplicate_name(daemon, arb_addr):
|
||||
def test_register_duplicate_name(daemon, reg_addr):
|
||||
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
) as n:
|
||||
|
||||
assert not tractor.current_actor().is_arbiter
|
||||
|
|
|
@ -160,7 +160,7 @@ async def test_required_args(callwith_expecterror):
|
|||
)
|
||||
def test_multi_actor_subs_arbiter_pub(
|
||||
loglevel,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
pub_actor,
|
||||
):
|
||||
"""Try out the neato @pub decorator system.
|
||||
|
@ -170,7 +170,7 @@ def test_multi_actor_subs_arbiter_pub(
|
|||
async def main():
|
||||
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
enable_modules=[__name__],
|
||||
) as n:
|
||||
|
||||
|
@ -255,12 +255,12 @@ def test_multi_actor_subs_arbiter_pub(
|
|||
|
||||
def test_single_subactor_pub_multitask_subs(
|
||||
loglevel,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
):
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
enable_modules=[__name__],
|
||||
) as n:
|
||||
|
||||
|
|
|
@ -34,7 +34,6 @@ def test_resource_only_entered_once(key_on):
|
|||
global _resource
|
||||
_resource = 0
|
||||
|
||||
kwargs = {}
|
||||
key = None
|
||||
if key_on == 'key_value':
|
||||
key = 'some_common_key'
|
||||
|
@ -139,7 +138,7 @@ def test_open_local_sub_to_stream():
|
|||
N local tasks using ``trionics.maybe_open_context():``.
|
||||
|
||||
'''
|
||||
timeout = 3 if platform.system() != "Windows" else 10
|
||||
timeout: float = 3.6 if platform.system() != "Windows" else 10
|
||||
|
||||
async def main():
|
||||
|
||||
|
|
|
@ -13,9 +13,19 @@ async def sleep_back_actor(
|
|||
func_name,
|
||||
func_defined,
|
||||
exposed_mods,
|
||||
*,
|
||||
reg_addr: tuple,
|
||||
):
|
||||
if actor_name:
|
||||
async with tractor.find_actor(actor_name) as portal:
|
||||
async with tractor.find_actor(
|
||||
actor_name,
|
||||
# NOTE: must be set manually since
|
||||
# the subactor doesn't have the reg_addr
|
||||
# fixture code run in it!
|
||||
# TODO: maybe we should just set this once in the
|
||||
# _state mod and derive to all children?
|
||||
registry_addrs=[reg_addr],
|
||||
) as portal:
|
||||
try:
|
||||
await portal.run(__name__, func_name)
|
||||
except tractor.RemoteActorError as err:
|
||||
|
@ -45,11 +55,17 @@ async def short_sleep():
|
|||
ids=['no_mods', 'this_mod', 'this_mod_bad_func', 'fail_to_import',
|
||||
'fail_on_syntax'],
|
||||
)
|
||||
def test_rpc_errors(arb_addr, to_call, testdir):
|
||||
"""Test errors when making various RPC requests to an actor
|
||||
def test_rpc_errors(
|
||||
reg_addr,
|
||||
to_call,
|
||||
testdir,
|
||||
):
|
||||
'''
|
||||
Test errors when making various RPC requests to an actor
|
||||
that either doesn't have the requested module exposed or doesn't define
|
||||
the named function.
|
||||
"""
|
||||
|
||||
'''
|
||||
exposed_mods, funcname, inside_err = to_call
|
||||
subactor_exposed_mods = []
|
||||
func_defined = globals().get(funcname, False)
|
||||
|
@ -77,8 +93,13 @@ def test_rpc_errors(arb_addr, to_call, testdir):
|
|||
|
||||
# spawn a subactor which calls us back
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
registry_addrs=[reg_addr],
|
||||
enable_modules=exposed_mods.copy(),
|
||||
|
||||
# NOTE: will halt test in REPL if uncommented, so only
|
||||
# do that if actually debugging subactor but keep it
|
||||
# disabled for the test.
|
||||
# debug_mode=True,
|
||||
) as n:
|
||||
|
||||
actor = tractor.current_actor()
|
||||
|
@ -95,6 +116,7 @@ def test_rpc_errors(arb_addr, to_call, testdir):
|
|||
exposed_mods=exposed_mods,
|
||||
func_defined=True if func_defined else False,
|
||||
enable_modules=subactor_exposed_mods,
|
||||
reg_addr=reg_addr,
|
||||
)
|
||||
|
||||
def run():
|
||||
|
@ -112,7 +134,7 @@ def test_rpc_errors(arb_addr, to_call, testdir):
|
|||
value = err.value
|
||||
|
||||
# might get multiple `trio.Cancelled`s as well inside an inception
|
||||
if isinstance(value, trio.MultiError):
|
||||
if isinstance(value, ExceptionGroup):
|
||||
value = next(itertools.dropwhile(
|
||||
lambda exc: not isinstance(exc, tractor.RemoteActorError),
|
||||
value.exceptions
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
"""
|
||||
Shared mem primitives and APIs.
|
||||
|
||||
"""
|
||||
import uuid
|
||||
|
||||
# import numpy
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
from tractor._shm import (
|
||||
open_shm_list,
|
||||
attach_shm_list,
|
||||
)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def child_attach_shml_alot(
|
||||
ctx: tractor.Context,
|
||||
shm_key: str,
|
||||
) -> None:
|
||||
|
||||
await ctx.started(shm_key)
|
||||
|
||||
# now try to attach a boatload of times in a loop..
|
||||
for _ in range(1000):
|
||||
shml = attach_shm_list(
|
||||
key=shm_key,
|
||||
readonly=False,
|
||||
)
|
||||
assert shml.shm.name == shm_key
|
||||
await trio.sleep(0.001)
|
||||
|
||||
|
||||
def test_child_attaches_alot():
|
||||
async def main():
|
||||
async with tractor.open_nursery() as an:
|
||||
|
||||
# allocate writeable list in parent
|
||||
key = f'shml_{uuid.uuid4()}'
|
||||
shml = open_shm_list(
|
||||
key=key,
|
||||
)
|
||||
|
||||
portal = await an.start_actor(
|
||||
'shm_attacher',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async with (
|
||||
portal.open_context(
|
||||
child_attach_shml_alot,
|
||||
shm_key=shml.key,
|
||||
) as (ctx, start_val),
|
||||
):
|
||||
assert start_val == key
|
||||
await ctx.result()
|
||||
|
||||
await portal.cancel_actor()
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def child_read_shm_list(
|
||||
ctx: tractor.Context,
|
||||
shm_key: str,
|
||||
use_str: bool,
|
||||
frame_size: int,
|
||||
) -> None:
|
||||
|
||||
# attach in child
|
||||
shml = attach_shm_list(
|
||||
key=shm_key,
|
||||
# dtype=str if use_str else float,
|
||||
)
|
||||
await ctx.started(shml.key)
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
async for i in stream:
|
||||
print(f'(child): reading shm list index: {i}')
|
||||
|
||||
if use_str:
|
||||
expect = str(float(i))
|
||||
else:
|
||||
expect = float(i)
|
||||
|
||||
if frame_size == 1:
|
||||
val = shml[i]
|
||||
assert expect == val
|
||||
print(f'(child): reading value: {val}')
|
||||
else:
|
||||
frame = shml[i - frame_size:i]
|
||||
print(f'(child): reading frame: {frame}')
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'use_str',
|
||||
[False, True],
|
||||
ids=lambda i: f'use_str_values={i}',
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
'frame_size',
|
||||
[1, 2**6, 2**10],
|
||||
ids=lambda i: f'frame_size={i}',
|
||||
)
|
||||
def test_parent_writer_child_reader(
|
||||
use_str: bool,
|
||||
frame_size: int,
|
||||
):
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
# debug_mode=True,
|
||||
) as an:
|
||||
|
||||
portal = await an.start_actor(
|
||||
'shm_reader',
|
||||
enable_modules=[__name__],
|
||||
debug_mode=True,
|
||||
)
|
||||
|
||||
# allocate writeable list in parent
|
||||
key = 'shm_list'
|
||||
seq_size = int(2 * 2 ** 10)
|
||||
shml = open_shm_list(
|
||||
key=key,
|
||||
size=seq_size,
|
||||
dtype=str if use_str else float,
|
||||
readonly=False,
|
||||
)
|
||||
|
||||
async with (
|
||||
portal.open_context(
|
||||
child_read_shm_list,
|
||||
shm_key=key,
|
||||
use_str=use_str,
|
||||
frame_size=frame_size,
|
||||
) as (ctx, sent),
|
||||
|
||||
ctx.open_stream() as stream,
|
||||
):
|
||||
|
||||
assert sent == key
|
||||
|
||||
for i in range(seq_size):
|
||||
|
||||
val = float(i)
|
||||
if use_str:
|
||||
val = str(val)
|
||||
|
||||
# print(f'(parent): writing {val}')
|
||||
shml[i] = val
|
||||
|
||||
# only on frame fills do we
|
||||
# signal to the child that a frame's
|
||||
# worth is ready.
|
||||
if (i % frame_size) == 0:
|
||||
print(f'(parent): signalling frame full on {val}')
|
||||
await stream.send(i)
|
||||
else:
|
||||
print(f'(parent): signalling final frame on {val}')
|
||||
await stream.send(i)
|
||||
|
||||
await portal.cancel_actor()
|
||||
|
||||
trio.run(main)
|
|
@ -16,14 +16,14 @@ data_to_pass_down = {'doggy': 10, 'kitty': 4}
|
|||
async def spawn(
|
||||
is_arbiter: bool,
|
||||
data: dict,
|
||||
arb_addr: tuple[str, int],
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
namespaces = [__name__]
|
||||
|
||||
await trio.sleep(0.1)
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
arbiter_addr=arb_addr,
|
||||
arbiter_addr=reg_addr,
|
||||
):
|
||||
|
||||
actor = tractor.current_actor()
|
||||
|
@ -32,8 +32,7 @@ async def spawn(
|
|||
|
||||
if actor.is_arbiter:
|
||||
|
||||
async with tractor.open_nursery(
|
||||
) as nursery:
|
||||
async with tractor.open_nursery() as nursery:
|
||||
|
||||
# forks here
|
||||
portal = await nursery.run_in_actor(
|
||||
|
@ -41,7 +40,7 @@ async def spawn(
|
|||
is_arbiter=False,
|
||||
name='sub-actor',
|
||||
data=data,
|
||||
arb_addr=arb_addr,
|
||||
reg_addr=reg_addr,
|
||||
enable_modules=namespaces,
|
||||
)
|
||||
|
||||
|
@ -55,12 +54,14 @@ async def spawn(
|
|||
return 10
|
||||
|
||||
|
||||
def test_local_arbiter_subactor_global_state(arb_addr):
|
||||
def test_local_arbiter_subactor_global_state(
|
||||
reg_addr,
|
||||
):
|
||||
result = trio.run(
|
||||
spawn,
|
||||
True,
|
||||
data_to_pass_down,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
)
|
||||
assert result == 10
|
||||
|
||||
|
@ -140,7 +141,7 @@ async def check_loglevel(level):
|
|||
def test_loglevel_propagated_to_subactor(
|
||||
start_method,
|
||||
capfd,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
):
|
||||
if start_method == 'mp_forkserver':
|
||||
pytest.skip(
|
||||
|
@ -152,7 +153,7 @@ def test_loglevel_propagated_to_subactor(
|
|||
async with tractor.open_nursery(
|
||||
name='arbiter',
|
||||
start_method=start_method,
|
||||
arbiter_addr=arb_addr,
|
||||
arbiter_addr=reg_addr,
|
||||
|
||||
) as tn:
|
||||
await tn.run_in_actor(
|
||||
|
|
|
@ -66,13 +66,13 @@ async def ensure_sequence(
|
|||
async def open_sequence_streamer(
|
||||
|
||||
sequence: list[int],
|
||||
arb_addr: tuple[str, int],
|
||||
reg_addr: tuple[str, int],
|
||||
start_method: str,
|
||||
|
||||
) -> tractor.MsgStream:
|
||||
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=arb_addr,
|
||||
arbiter_addr=reg_addr,
|
||||
start_method=start_method,
|
||||
) as tn:
|
||||
|
||||
|
@ -93,7 +93,7 @@ async def open_sequence_streamer(
|
|||
|
||||
|
||||
def test_stream_fan_out_to_local_subscriptions(
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
):
|
||||
|
||||
|
@ -103,7 +103,7 @@ def test_stream_fan_out_to_local_subscriptions(
|
|||
|
||||
async with open_sequence_streamer(
|
||||
sequence,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
) as stream:
|
||||
|
||||
|
@ -138,7 +138,7 @@ def test_stream_fan_out_to_local_subscriptions(
|
|||
]
|
||||
)
|
||||
def test_consumer_and_parent_maybe_lag(
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
task_delays,
|
||||
):
|
||||
|
@ -150,7 +150,7 @@ def test_consumer_and_parent_maybe_lag(
|
|||
|
||||
async with open_sequence_streamer(
|
||||
sequence,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
) as stream:
|
||||
|
||||
|
@ -211,7 +211,7 @@ def test_consumer_and_parent_maybe_lag(
|
|||
|
||||
|
||||
def test_faster_task_to_recv_is_cancelled_by_slower(
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
):
|
||||
'''
|
||||
|
@ -225,7 +225,7 @@ def test_faster_task_to_recv_is_cancelled_by_slower(
|
|||
|
||||
async with open_sequence_streamer(
|
||||
sequence,
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
|
||||
) as stream:
|
||||
|
@ -302,7 +302,7 @@ def test_subscribe_errors_after_close():
|
|||
|
||||
|
||||
def test_ensure_slow_consumers_lag_out(
|
||||
arb_addr,
|
||||
reg_addr,
|
||||
start_method,
|
||||
):
|
||||
'''This is a pure local task test; no tractor
|
||||
|
|
|
@ -18,71 +18,49 @@
|
|||
tractor: structured concurrent ``trio``-"actors".
|
||||
|
||||
"""
|
||||
from exceptiongroup import BaseExceptionGroup
|
||||
from exceptiongroup import BaseExceptionGroup as BaseExceptionGroup
|
||||
|
||||
from ._clustering import open_actor_cluster
|
||||
from ._ipc import Channel
|
||||
from ._clustering import (
|
||||
open_actor_cluster as open_actor_cluster,
|
||||
)
|
||||
from ._context import (
|
||||
Context,
|
||||
context,
|
||||
Context as Context, # the type
|
||||
context as context, # a func-decorator
|
||||
)
|
||||
from ._streaming import (
|
||||
MsgStream,
|
||||
stream,
|
||||
MsgStream as MsgStream,
|
||||
stream as stream,
|
||||
)
|
||||
from ._discovery import (
|
||||
get_arbiter,
|
||||
find_actor,
|
||||
wait_for_actor,
|
||||
query_actor,
|
||||
get_arbiter as get_arbiter,
|
||||
find_actor as find_actor,
|
||||
wait_for_actor as wait_for_actor,
|
||||
query_actor as query_actor,
|
||||
)
|
||||
from ._supervise import (
|
||||
open_nursery as open_nursery,
|
||||
ActorNursery as ActorNursery,
|
||||
)
|
||||
from ._supervise import open_nursery
|
||||
from ._state import (
|
||||
current_actor,
|
||||
is_root_process,
|
||||
current_actor as current_actor,
|
||||
is_root_process as is_root_process,
|
||||
)
|
||||
from ._exceptions import (
|
||||
RemoteActorError,
|
||||
ModuleNotExposed,
|
||||
ContextCancelled,
|
||||
RemoteActorError as RemoteActorError,
|
||||
ModuleNotExposed as ModuleNotExposed,
|
||||
ContextCancelled as ContextCancelled,
|
||||
)
|
||||
from ._debug import (
|
||||
breakpoint,
|
||||
post_mortem,
|
||||
from .devx import (
|
||||
breakpoint as breakpoint,
|
||||
pause as pause,
|
||||
pause_from_sync as pause_from_sync,
|
||||
post_mortem as post_mortem,
|
||||
)
|
||||
from . import msg
|
||||
from . import msg as msg
|
||||
from ._root import (
|
||||
run_daemon,
|
||||
open_root_actor,
|
||||
run_daemon as run_daemon,
|
||||
open_root_actor as open_root_actor,
|
||||
)
|
||||
from ._portal import Portal
|
||||
from ._runtime import Actor
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Actor',
|
||||
'Channel',
|
||||
'Context',
|
||||
'ContextCancelled',
|
||||
'ModuleNotExposed',
|
||||
'MsgStream',
|
||||
'BaseExceptionGroup',
|
||||
'Portal',
|
||||
'RemoteActorError',
|
||||
'breakpoint',
|
||||
'context',
|
||||
'current_actor',
|
||||
'find_actor',
|
||||
'get_arbiter',
|
||||
'is_root_process',
|
||||
'msg',
|
||||
'open_actor_cluster',
|
||||
'open_nursery',
|
||||
'open_root_actor',
|
||||
'post_mortem',
|
||||
'query_actor',
|
||||
'run_daemon',
|
||||
'stream',
|
||||
'to_asyncio',
|
||||
'wait_for_actor',
|
||||
]
|
||||
from ._ipc import Channel as Channel
|
||||
from ._portal import Portal as Portal
|
||||
from ._runtime import Actor as Actor
|
||||
|
|
|
@ -18,8 +18,6 @@
|
|||
This is the "bootloader" for actors started using the native trio backend.
|
||||
|
||||
"""
|
||||
import sys
|
||||
import trio
|
||||
import argparse
|
||||
|
||||
from ast import literal_eval
|
||||
|
@ -37,8 +35,6 @@ def parse_ipaddr(arg):
|
|||
return (str(host), int(port))
|
||||
|
||||
|
||||
from ._entry import _trio_main
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
|
1594
tractor/_context.py
1594
tractor/_context.py
File diff suppressed because it is too large
Load Diff
|
@ -15,16 +15,20 @@
|
|||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Actor discovery API.
|
||||
Discovery (protocols) API for automatic addressing and location
|
||||
management of (service) actors.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from typing import (
|
||||
Optional,
|
||||
Union,
|
||||
AsyncGenerator,
|
||||
AsyncContextManager,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
from contextlib import asynccontextmanager as acm
|
||||
import warnings
|
||||
|
||||
from .trionics import gather_contexts
|
||||
from ._ipc import _connect_chan, Channel
|
||||
from ._portal import (
|
||||
Portal,
|
||||
|
@ -34,13 +38,19 @@ from ._portal import (
|
|||
from ._state import current_actor, _runtime_vars
|
||||
|
||||
|
||||
@acm
|
||||
async def get_arbiter(
|
||||
if TYPE_CHECKING:
|
||||
from ._runtime import Actor
|
||||
|
||||
|
||||
@acm
|
||||
async def get_registry(
|
||||
host: str,
|
||||
port: int,
|
||||
|
||||
) -> AsyncGenerator[Union[Portal, LocalPortal], None]:
|
||||
) -> AsyncGenerator[
|
||||
Portal | LocalPortal | None,
|
||||
None,
|
||||
]:
|
||||
'''
|
||||
Return a portal instance connected to a local or remote
|
||||
arbiter.
|
||||
|
@ -51,16 +61,33 @@ async def get_arbiter(
|
|||
if not actor:
|
||||
raise RuntimeError("No actor instance has been defined yet?")
|
||||
|
||||
if actor.is_arbiter:
|
||||
if actor.is_registrar:
|
||||
# we're already the arbiter
|
||||
# (likely a re-entrant call from the arbiter actor)
|
||||
yield LocalPortal(actor, Channel((host, port)))
|
||||
yield LocalPortal(
|
||||
actor,
|
||||
Channel((host, port))
|
||||
)
|
||||
else:
|
||||
async with _connect_chan(host, port) as chan:
|
||||
async with (
|
||||
_connect_chan(host, port) as chan,
|
||||
open_portal(chan) as regstr_ptl,
|
||||
):
|
||||
yield regstr_ptl
|
||||
|
||||
async with open_portal(chan) as arb_portal:
|
||||
|
||||
yield arb_portal
|
||||
|
||||
# TODO: deprecate and this remove _arbiter form!
|
||||
@acm
|
||||
async def get_arbiter(*args, **kwargs):
|
||||
warnings.warn(
|
||||
'`tractor.get_arbiter()` is now deprecated!\n'
|
||||
'Use `.get_registry()` instead!',
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
async with get_registry(*args, **kwargs) as to_yield:
|
||||
yield to_yield
|
||||
|
||||
|
||||
@acm
|
||||
|
@ -68,51 +95,80 @@ async def get_root(
|
|||
**kwargs,
|
||||
) -> AsyncGenerator[Portal, None]:
|
||||
|
||||
# TODO: rename mailbox to `_root_maddr` when we finally
|
||||
# add and impl libp2p multi-addrs?
|
||||
host, port = _runtime_vars['_root_mailbox']
|
||||
assert host is not None
|
||||
|
||||
async with _connect_chan(host, port) as chan:
|
||||
async with open_portal(chan, **kwargs) as portal:
|
||||
yield portal
|
||||
async with (
|
||||
_connect_chan(host, port) as chan,
|
||||
open_portal(chan, **kwargs) as portal,
|
||||
):
|
||||
yield portal
|
||||
|
||||
|
||||
@acm
|
||||
async def query_actor(
|
||||
name: str,
|
||||
arbiter_sockaddr: Optional[tuple[str, int]] = None,
|
||||
arbiter_sockaddr: tuple[str, int] | None = None,
|
||||
regaddr: tuple[str, int] | None = None,
|
||||
|
||||
) -> AsyncGenerator[tuple[str, int], None]:
|
||||
) -> AsyncGenerator[
|
||||
tuple[str, int] | None,
|
||||
None,
|
||||
]:
|
||||
'''
|
||||
Simple address lookup for a given actor name.
|
||||
Make a transport address lookup for an actor name to a specific
|
||||
registrar.
|
||||
|
||||
Returns the (socket) address or ``None``.
|
||||
Returns the (socket) address or ``None`` if no entry under that
|
||||
name exists for the given registrar listening @ `regaddr`.
|
||||
|
||||
'''
|
||||
actor = current_actor()
|
||||
async with get_arbiter(
|
||||
*arbiter_sockaddr or actor._arb_addr
|
||||
) as arb_portal:
|
||||
actor: Actor = current_actor()
|
||||
if (
|
||||
name == 'registrar'
|
||||
and actor.is_registrar
|
||||
):
|
||||
raise RuntimeError(
|
||||
'The current actor IS the registry!?'
|
||||
)
|
||||
|
||||
sockaddr = await arb_portal.run_from_ns(
|
||||
if arbiter_sockaddr is not None:
|
||||
warnings.warn(
|
||||
'`tractor.query_actor(regaddr=<blah>)` is deprecated.\n'
|
||||
'Use `registry_addrs: list[tuple]` instead!',
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
regaddr: list[tuple[str, int]] = arbiter_sockaddr
|
||||
|
||||
reg_portal: Portal
|
||||
regaddr: tuple[str, int] = regaddr or actor.reg_addrs[0]
|
||||
async with get_registry(*regaddr) as reg_portal:
|
||||
# TODO: return portals to all available actors - for now
|
||||
# just the last one that registered
|
||||
sockaddr: tuple[str, int] = await reg_portal.run_from_ns(
|
||||
'self',
|
||||
'find_actor',
|
||||
name=name,
|
||||
)
|
||||
|
||||
# TODO: return portals to all available actors - for now just
|
||||
# the last one that registered
|
||||
if name == 'arbiter' and actor.is_arbiter:
|
||||
raise RuntimeError("The current actor is the arbiter")
|
||||
|
||||
yield sockaddr if sockaddr else None
|
||||
yield sockaddr
|
||||
|
||||
|
||||
@acm
|
||||
async def find_actor(
|
||||
name: str,
|
||||
arbiter_sockaddr: tuple[str, int] | None = None
|
||||
arbiter_sockaddr: tuple[str, int] | None = None,
|
||||
registry_addrs: list[tuple[str, int]] | None = None,
|
||||
|
||||
) -> AsyncGenerator[Optional[Portal], None]:
|
||||
only_first: bool = True,
|
||||
raise_on_none: bool = False,
|
||||
|
||||
) -> AsyncGenerator[
|
||||
Portal | list[Portal] | None,
|
||||
None,
|
||||
]:
|
||||
'''
|
||||
Ask the arbiter to find actor(s) by name.
|
||||
|
||||
|
@ -120,24 +176,79 @@ async def find_actor(
|
|||
known to the arbiter.
|
||||
|
||||
'''
|
||||
async with query_actor(
|
||||
name=name,
|
||||
arbiter_sockaddr=arbiter_sockaddr,
|
||||
) as sockaddr:
|
||||
if arbiter_sockaddr is not None:
|
||||
warnings.warn(
|
||||
'`tractor.find_actor(arbiter_sockaddr=<blah>)` is deprecated.\n'
|
||||
'Use `registry_addrs: list[tuple]` instead!',
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
registry_addrs: list[tuple[str, int]] = [arbiter_sockaddr]
|
||||
|
||||
if sockaddr:
|
||||
async with _connect_chan(*sockaddr) as chan:
|
||||
async with open_portal(chan) as portal:
|
||||
yield portal
|
||||
else:
|
||||
@acm
|
||||
async def maybe_open_portal_from_reg_addr(
|
||||
addr: tuple[str, int],
|
||||
):
|
||||
async with query_actor(
|
||||
name=name,
|
||||
regaddr=addr,
|
||||
) as sockaddr:
|
||||
if sockaddr:
|
||||
async with _connect_chan(*sockaddr) as chan:
|
||||
async with open_portal(chan) as portal:
|
||||
yield portal
|
||||
else:
|
||||
yield None
|
||||
|
||||
if not registry_addrs:
|
||||
# XXX NOTE: make sure to dynamically read the value on
|
||||
# every call since something may change it globally (eg.
|
||||
# like in our discovery test suite)!
|
||||
from . import _root
|
||||
registry_addrs = _root._default_lo_addrs
|
||||
|
||||
maybe_portals: list[
|
||||
AsyncContextManager[tuple[str, int]]
|
||||
] = list(
|
||||
maybe_open_portal_from_reg_addr(addr)
|
||||
for addr in registry_addrs
|
||||
)
|
||||
|
||||
async with gather_contexts(
|
||||
mngrs=maybe_portals,
|
||||
) as portals:
|
||||
# log.runtime(
|
||||
# 'Gathered portals:\n'
|
||||
# f'{portals}'
|
||||
# )
|
||||
# NOTE: `gather_contexts()` will return a
|
||||
# `tuple[None, None, ..., None]` if no contact
|
||||
# can be made with any regstrar at any of the
|
||||
# N provided addrs!
|
||||
if not any(portals):
|
||||
if raise_on_none:
|
||||
raise RuntimeError(
|
||||
f'No actor "{name}" found registered @ {registry_addrs}'
|
||||
)
|
||||
yield None
|
||||
return
|
||||
|
||||
portals: list[Portal] = list(portals)
|
||||
if only_first:
|
||||
yield portals[0]
|
||||
|
||||
else:
|
||||
# TODO: currently this may return multiple portals
|
||||
# given there are multi-homed or multiple registrars..
|
||||
# SO, we probably need de-duplication logic?
|
||||
yield portals
|
||||
|
||||
|
||||
@acm
|
||||
async def wait_for_actor(
|
||||
name: str,
|
||||
arbiter_sockaddr: tuple[str, int] | None = None,
|
||||
# registry_addr: tuple[str, int] | None = None,
|
||||
registry_addr: tuple[str, int] | None = None,
|
||||
|
||||
) -> AsyncGenerator[Portal, None]:
|
||||
'''
|
||||
|
@ -146,17 +257,31 @@ async def wait_for_actor(
|
|||
A portal to the first registered actor is returned.
|
||||
|
||||
'''
|
||||
actor = current_actor()
|
||||
actor: Actor = current_actor()
|
||||
|
||||
async with get_arbiter(
|
||||
*arbiter_sockaddr or actor._arb_addr,
|
||||
) as arb_portal:
|
||||
sockaddrs = await arb_portal.run_from_ns(
|
||||
if arbiter_sockaddr is not None:
|
||||
warnings.warn(
|
||||
'`tractor.wait_for_actor(arbiter_sockaddr=<foo>)` is deprecated.\n'
|
||||
'Use `registry_addr: tuple` instead!',
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
registry_addr: tuple[str, int] = arbiter_sockaddr
|
||||
|
||||
# TODO: use `.trionics.gather_contexts()` like
|
||||
# above in `find_actor()` as well?
|
||||
reg_portal: Portal
|
||||
regaddr: tuple[str, int] = registry_addr or actor.reg_addrs[0]
|
||||
async with get_registry(*regaddr) as reg_portal:
|
||||
sockaddrs = await reg_portal.run_from_ns(
|
||||
'self',
|
||||
'wait_for_actor',
|
||||
name=name,
|
||||
)
|
||||
sockaddr = sockaddrs[-1]
|
||||
|
||||
# get latest registered addr by default?
|
||||
# TODO: offer multi-portal yields in multi-homed case?
|
||||
sockaddr: tuple[str, int] = sockaddrs[-1]
|
||||
|
||||
async with _connect_chan(*sockaddr) as chan:
|
||||
async with open_portal(chan) as portal:
|
||||
|
|
|
@ -47,8 +47,8 @@ log = get_logger(__name__)
|
|||
|
||||
def _mp_main(
|
||||
|
||||
actor: Actor, # type: ignore
|
||||
accept_addr: tuple[str, int],
|
||||
actor: Actor,
|
||||
accept_addrs: list[tuple[str, int]],
|
||||
forkserver_info: tuple[Any, Any, Any, Any, Any],
|
||||
start_method: SpawnMethodKey,
|
||||
parent_addr: tuple[str, int] | None = None,
|
||||
|
@ -77,8 +77,8 @@ def _mp_main(
|
|||
log.debug(f"parent_addr is {parent_addr}")
|
||||
trio_main = partial(
|
||||
async_main,
|
||||
actor,
|
||||
accept_addr,
|
||||
actor=actor,
|
||||
accept_addrs=accept_addrs,
|
||||
parent_addr=parent_addr
|
||||
)
|
||||
try:
|
||||
|
@ -96,7 +96,7 @@ def _mp_main(
|
|||
|
||||
def _trio_main(
|
||||
|
||||
actor: Actor, # type: ignore
|
||||
actor: Actor,
|
||||
*,
|
||||
parent_addr: tuple[str, int] | None = None,
|
||||
infect_asyncio: bool = False,
|
||||
|
@ -106,25 +106,29 @@ def _trio_main(
|
|||
Entry point for a `trio_run_in_process` subactor.
|
||||
|
||||
'''
|
||||
log.info(f"Started new trio process for {actor.uid}")
|
||||
|
||||
if actor.loglevel is not None:
|
||||
log.info(
|
||||
f"Setting loglevel for {actor.uid} to {actor.loglevel}")
|
||||
get_console_log(actor.loglevel)
|
||||
|
||||
log.info(
|
||||
f"Started {actor.uid}")
|
||||
|
||||
_state._current_actor = actor
|
||||
|
||||
log.debug(f"parent_addr is {parent_addr}")
|
||||
trio_main = partial(
|
||||
async_main,
|
||||
actor,
|
||||
parent_addr=parent_addr
|
||||
)
|
||||
|
||||
if actor.loglevel is not None:
|
||||
get_console_log(actor.loglevel)
|
||||
import os
|
||||
actor_info: str = (
|
||||
f'|_{actor}\n'
|
||||
f' uid: {actor.uid}\n'
|
||||
f' pid: {os.getpid()}\n'
|
||||
f' parent_addr: {parent_addr}\n'
|
||||
f' loglevel: {actor.loglevel}\n'
|
||||
)
|
||||
log.info(
|
||||
'Started new trio process:\n'
|
||||
+
|
||||
actor_info
|
||||
)
|
||||
|
||||
try:
|
||||
if infect_asyncio:
|
||||
actor._infected_aio = True
|
||||
|
@ -132,7 +136,15 @@ def _trio_main(
|
|||
else:
|
||||
trio.run(trio_main)
|
||||
except KeyboardInterrupt:
|
||||
log.cancel(f"Actor {actor.uid} received KBI")
|
||||
log.cancel(
|
||||
'Actor received KBI\n'
|
||||
+
|
||||
actor_info
|
||||
)
|
||||
|
||||
finally:
|
||||
log.info(f"Actor {actor.uid} terminated")
|
||||
log.info(
|
||||
'Actor terminated\n'
|
||||
+
|
||||
actor_info
|
||||
)
|
||||
|
|
|
@ -14,22 +14,35 @@
|
|||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
'''
|
||||
Our classy exception set.
|
||||
|
||||
"""
|
||||
'''
|
||||
from __future__ import annotations
|
||||
import builtins
|
||||
import importlib
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Any,
|
||||
Type,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
import textwrap
|
||||
import traceback
|
||||
|
||||
import exceptiongroup as eg
|
||||
import trio
|
||||
|
||||
from ._state import current_actor
|
||||
from .log import get_logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._context import Context
|
||||
from .log import StackLevelAdapter
|
||||
from ._stream import MsgStream
|
||||
from ._ipc import Channel
|
||||
|
||||
log = get_logger('tractor')
|
||||
|
||||
_this_mod = importlib.import_module(__name__)
|
||||
|
||||
|
@ -38,9 +51,40 @@ class ActorFailure(Exception):
|
|||
"General actor failure"
|
||||
|
||||
|
||||
class InternalError(RuntimeError):
|
||||
'''
|
||||
Entirely unexpected internal machinery error indicating
|
||||
a completely invalid state or interface.
|
||||
|
||||
'''
|
||||
|
||||
_body_fields: list[str] = [
|
||||
'src_actor_uid',
|
||||
'canceller',
|
||||
'sender',
|
||||
]
|
||||
|
||||
_msgdata_keys: list[str] = [
|
||||
'type_str',
|
||||
] + _body_fields
|
||||
|
||||
|
||||
|
||||
# TODO: rename to just `RemoteError`?
|
||||
class RemoteActorError(Exception):
|
||||
# TODO: local recontruction of remote exception deats
|
||||
"Remote actor exception bundled locally"
|
||||
'''
|
||||
A box(ing) type which bundles a remote actor `BaseException` for
|
||||
(near identical, and only if possible,) local object/instance
|
||||
re-construction in the local process memory domain.
|
||||
|
||||
Normally each instance is expected to be constructed from
|
||||
a special "error" IPC msg sent by some remote actor-runtime.
|
||||
|
||||
'''
|
||||
reprol_fields: list[str] = [
|
||||
'src_actor_uid',
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str,
|
||||
|
@ -50,18 +94,101 @@ class RemoteActorError(Exception):
|
|||
) -> None:
|
||||
super().__init__(message)
|
||||
|
||||
self.type = suberror_type
|
||||
self.msgdata = msgdata
|
||||
# TODO: maybe a better name?
|
||||
# - .errtype
|
||||
# - .retype
|
||||
# - .boxed_errtype
|
||||
# - .boxed_type
|
||||
# - .remote_type
|
||||
# also pertains to our long long oustanding issue XD
|
||||
# https://github.com/goodboy/tractor/issues/5
|
||||
self.boxed_type: str = suberror_type
|
||||
self.msgdata: dict[str, Any] = msgdata
|
||||
|
||||
@property
|
||||
def src_actor_uid(self) -> tuple[str, str] | None:
|
||||
def type(self) -> str:
|
||||
return self.boxed_type
|
||||
|
||||
@property
|
||||
def type_str(self) -> str:
|
||||
return str(type(self.boxed_type).__name__)
|
||||
|
||||
@property
|
||||
def src_actor_uid(self) -> tuple[str, str]|None:
|
||||
return self.msgdata.get('src_actor_uid')
|
||||
|
||||
@property
|
||||
def tb_str(
|
||||
self,
|
||||
indent: str = ' '*3,
|
||||
) -> str:
|
||||
if remote_tb := self.msgdata.get('tb_str'):
|
||||
return textwrap.indent(
|
||||
remote_tb,
|
||||
prefix=indent,
|
||||
)
|
||||
|
||||
return ''
|
||||
|
||||
def reprol(self) -> str:
|
||||
'''
|
||||
Represent this error for "one line" display, like in
|
||||
a field of our `Context.__repr__()` output.
|
||||
|
||||
'''
|
||||
_repr: str = f'{type(self).__name__}('
|
||||
for key in self.reprol_fields:
|
||||
val: Any|None = self.msgdata.get(key)
|
||||
if val:
|
||||
_repr += f'{key}={repr(val)} '
|
||||
|
||||
return _repr
|
||||
|
||||
def __repr__(self) -> str:
|
||||
|
||||
fields: str = ''
|
||||
for key in _body_fields:
|
||||
val: str|None = self.msgdata.get(key)
|
||||
if val:
|
||||
fields += f'{key}={val}\n'
|
||||
|
||||
fields: str = textwrap.indent(
|
||||
fields,
|
||||
# prefix=' '*2,
|
||||
prefix=' |_',
|
||||
)
|
||||
indent: str = ''*1
|
||||
body: str = (
|
||||
f'{fields}'
|
||||
f' |\n'
|
||||
f' ------ - ------\n\n'
|
||||
f'{self.tb_str}\n'
|
||||
f' ------ - ------\n'
|
||||
f' _|\n'
|
||||
)
|
||||
# f'|\n'
|
||||
# f' |\n'
|
||||
if indent:
|
||||
body: str = textwrap.indent(
|
||||
body,
|
||||
prefix=indent,
|
||||
)
|
||||
return (
|
||||
f'<{type(self).__name__}(\n'
|
||||
f'{body}'
|
||||
')>'
|
||||
)
|
||||
|
||||
# TODO: local recontruction of remote exception deats
|
||||
# def unbox(self) -> BaseException:
|
||||
# ...
|
||||
|
||||
|
||||
class InternalActorError(RemoteActorError):
|
||||
'''
|
||||
Remote internal ``tractor`` error indicating
|
||||
failure of some primitive or machinery.
|
||||
(Remote) internal `tractor` error indicating failure of some
|
||||
primitive, machinery state or lowlevel task that should never
|
||||
occur.
|
||||
|
||||
'''
|
||||
|
||||
|
@ -72,12 +199,43 @@ class ContextCancelled(RemoteActorError):
|
|||
``Portal.cancel_actor()`` or ``Context.cancel()``.
|
||||
|
||||
'''
|
||||
reprol_fields: list[str] = [
|
||||
'canceller',
|
||||
]
|
||||
@property
|
||||
def canceller(self) -> tuple[str, str] | None:
|
||||
def canceller(self) -> tuple[str, str]|None:
|
||||
'''
|
||||
Return the (maybe) `Actor.uid` for the requesting-author
|
||||
of this ctxc.
|
||||
|
||||
Emit a warning msg when `.canceller` has not been set,
|
||||
which usually idicates that a `None` msg-loop setinel was
|
||||
sent before expected in the runtime. This can happen in
|
||||
a few situations:
|
||||
|
||||
- (simulating) an IPC transport network outage
|
||||
- a (malicious) pkt sent specifically to cancel an actor's
|
||||
runtime non-gracefully without ensuring ongoing RPC tasks are
|
||||
incrementally cancelled as is done with:
|
||||
`Actor`
|
||||
|_`.cancel()`
|
||||
|_`.cancel_soon()`
|
||||
|_`._cancel_task()`
|
||||
|
||||
'''
|
||||
value = self.msgdata.get('canceller')
|
||||
if value:
|
||||
return tuple(value)
|
||||
|
||||
log.warning(
|
||||
'IPC Context cancelled without a requesting actor?\n'
|
||||
'Maybe the IPC transport ended abruptly?\n\n'
|
||||
f'{self}'
|
||||
)
|
||||
|
||||
# to make `.__repr__()` work uniformly
|
||||
# src_actor_uid = canceller
|
||||
|
||||
|
||||
class TransportClosed(trio.ClosedResourceError):
|
||||
"Underlying channel transport was closed prior to use"
|
||||
|
@ -95,8 +253,22 @@ class NoRuntime(RuntimeError):
|
|||
"The root actor has not been initialized yet"
|
||||
|
||||
|
||||
class StreamOverrun(trio.TooSlowError):
|
||||
"This stream was overrun by sender"
|
||||
class StreamOverrun(
|
||||
RemoteActorError,
|
||||
trio.TooSlowError,
|
||||
):
|
||||
reprol_fields: list[str] = [
|
||||
'sender',
|
||||
]
|
||||
'''
|
||||
This stream was overrun by sender
|
||||
|
||||
'''
|
||||
@property
|
||||
def sender(self) -> tuple[str, str] | None:
|
||||
value = self.msgdata.get('sender')
|
||||
if value:
|
||||
return tuple(value)
|
||||
|
||||
|
||||
class AsyncioCancelled(Exception):
|
||||
|
@ -107,55 +279,95 @@ class AsyncioCancelled(Exception):
|
|||
|
||||
'''
|
||||
|
||||
class MessagingError(Exception):
|
||||
'Some kind of unexpected SC messaging dialog issue'
|
||||
|
||||
|
||||
def pack_error(
|
||||
exc: BaseException,
|
||||
tb=None,
|
||||
tb: str|None = None,
|
||||
cid: str|None = None,
|
||||
|
||||
) -> dict[str, Any]:
|
||||
"""Create an "error message" for tranmission over
|
||||
a channel (aka the wire).
|
||||
"""
|
||||
) -> dict[str, dict]:
|
||||
'''
|
||||
Create an "error message" which boxes a locally caught
|
||||
exception's meta-data and encodes it for wire transport via an
|
||||
IPC `Channel`; expected to be unpacked (and thus unboxed) on
|
||||
the receiver side using `unpack_error()` below.
|
||||
|
||||
'''
|
||||
if tb:
|
||||
tb_str = ''.join(traceback.format_tb(tb))
|
||||
else:
|
||||
tb_str = traceback.format_exc()
|
||||
|
||||
error_msg = {
|
||||
error_msg: dict[
|
||||
str,
|
||||
str | tuple[str, str]
|
||||
] = {
|
||||
'tb_str': tb_str,
|
||||
'type_str': type(exc).__name__,
|
||||
'boxed_type': type(exc).__name__,
|
||||
'src_actor_uid': current_actor().uid,
|
||||
}
|
||||
|
||||
if isinstance(exc, ContextCancelled):
|
||||
# TODO: ?just wholesale proxy `.msgdata: dict`?
|
||||
# XXX WARNING, when i swapped these ctx-semantics
|
||||
# tests started hanging..???!!!???
|
||||
# if msgdata := exc.getattr('msgdata', {}):
|
||||
# error_msg.update(msgdata)
|
||||
if (
|
||||
isinstance(exc, ContextCancelled)
|
||||
or isinstance(exc, StreamOverrun)
|
||||
):
|
||||
error_msg.update(exc.msgdata)
|
||||
|
||||
return {'error': error_msg}
|
||||
pkt: dict = {'error': error_msg}
|
||||
if cid:
|
||||
pkt['cid'] = cid
|
||||
|
||||
return pkt
|
||||
|
||||
|
||||
def unpack_error(
|
||||
|
||||
msg: dict[str, Any],
|
||||
chan=None,
|
||||
err_type=RemoteActorError
|
||||
|
||||
) -> Exception:
|
||||
chan: Channel|None = None,
|
||||
box_type: RemoteActorError = RemoteActorError,
|
||||
|
||||
hide_tb: bool = True,
|
||||
|
||||
) -> None|Exception:
|
||||
'''
|
||||
Unpack an 'error' message from the wire
|
||||
into a local ``RemoteActorError``.
|
||||
into a local `RemoteActorError` (subtype).
|
||||
|
||||
NOTE: this routine DOES not RAISE the embedded remote error,
|
||||
which is the responsibilitiy of the caller.
|
||||
|
||||
'''
|
||||
__tracebackhide__ = True
|
||||
error = msg['error']
|
||||
__tracebackhide__: bool = hide_tb
|
||||
|
||||
tb_str = error.get('tb_str', '')
|
||||
message = f"{chan.uid}\n" + tb_str
|
||||
type_name = error['type_str']
|
||||
error_dict: dict[str, dict] | None
|
||||
if (
|
||||
error_dict := msg.get('error')
|
||||
) is None:
|
||||
# no error field, nothing to unpack.
|
||||
return None
|
||||
|
||||
# retrieve the remote error's msg encoded details
|
||||
tb_str: str = error_dict.get('tb_str', '')
|
||||
message: str = f'{chan.uid}\n' + tb_str
|
||||
type_name: str = (
|
||||
error_dict.get('type_str')
|
||||
or error_dict['boxed_type']
|
||||
)
|
||||
suberror_type: Type[BaseException] = Exception
|
||||
|
||||
if type_name == 'ContextCancelled':
|
||||
err_type = ContextCancelled
|
||||
suberror_type = RemoteActorError
|
||||
box_type = ContextCancelled
|
||||
suberror_type = box_type
|
||||
|
||||
else: # try to lookup a suitable local error type
|
||||
for ns in [
|
||||
|
@ -164,18 +376,19 @@ def unpack_error(
|
|||
eg,
|
||||
trio,
|
||||
]:
|
||||
try:
|
||||
suberror_type = getattr(ns, type_name)
|
||||
if suberror_type := getattr(
|
||||
ns,
|
||||
type_name,
|
||||
False,
|
||||
):
|
||||
break
|
||||
except AttributeError:
|
||||
continue
|
||||
|
||||
exc = err_type(
|
||||
exc = box_type(
|
||||
message,
|
||||
suberror_type=suberror_type,
|
||||
|
||||
# unpack other fields into error type init
|
||||
**msg['error'],
|
||||
**error_dict,
|
||||
)
|
||||
|
||||
return exc
|
||||
|
@ -194,3 +407,115 @@ def is_multi_cancelled(exc: BaseException) -> bool:
|
|||
) is not None
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _raise_from_no_key_in_msg(
|
||||
ctx: Context,
|
||||
msg: dict,
|
||||
src_err: KeyError,
|
||||
log: StackLevelAdapter, # caller specific `log` obj
|
||||
|
||||
expect_key: str = 'yield',
|
||||
stream: MsgStream | None = None,
|
||||
|
||||
# allow "deeper" tbs when debugging B^o
|
||||
hide_tb: bool = True,
|
||||
|
||||
) -> bool:
|
||||
'''
|
||||
Raise an appopriate local error when a
|
||||
`MsgStream` msg arrives which does not
|
||||
contain the expected (at least under normal
|
||||
operation) `'yield'` field.
|
||||
|
||||
`Context` and any embedded `MsgStream` termination,
|
||||
as well as remote task errors are handled in order
|
||||
of priority as:
|
||||
|
||||
- any 'error' msg is re-boxed and raised locally as
|
||||
-> `RemoteActorError`|`ContextCancelled`
|
||||
|
||||
- a `MsgStream` 'stop' msg is constructed, assigned
|
||||
and raised locally as -> `trio.EndOfChannel`
|
||||
|
||||
- All other mis-keyed msgss (like say a "final result"
|
||||
'return' msg, normally delivered from `Context.result()`)
|
||||
are re-boxed inside a `MessagingError` with an explicit
|
||||
exc content describing the missing IPC-msg-key.
|
||||
|
||||
'''
|
||||
__tracebackhide__: bool = hide_tb
|
||||
|
||||
# an internal error should never get here
|
||||
try:
|
||||
cid: str = msg['cid']
|
||||
except KeyError as src_err:
|
||||
raise MessagingError(
|
||||
f'IPC `Context` rx-ed msg without a ctx-id (cid)!?\n'
|
||||
f'cid: {cid}\n\n'
|
||||
|
||||
f'{pformat(msg)}\n'
|
||||
) from src_err
|
||||
|
||||
# TODO: test that shows stream raising an expected error!!!
|
||||
|
||||
# raise the error message in a boxed exception type!
|
||||
if msg.get('error'):
|
||||
raise unpack_error(
|
||||
msg,
|
||||
ctx.chan,
|
||||
hide_tb=hide_tb,
|
||||
|
||||
) from None
|
||||
|
||||
# `MsgStream` termination msg.
|
||||
# TODO: does it make more sense to pack
|
||||
# the stream._eoc outside this in the calleer always?
|
||||
elif (
|
||||
msg.get('stop')
|
||||
or (
|
||||
stream
|
||||
and stream._eoc
|
||||
)
|
||||
):
|
||||
log.debug(
|
||||
f'Context[{cid}] stream was stopped by remote side\n'
|
||||
f'cid: {cid}\n'
|
||||
)
|
||||
|
||||
# TODO: if the a local task is already blocking on
|
||||
# a `Context.result()` and thus a `.receive()` on the
|
||||
# rx-chan, we close the chan and set state ensuring that
|
||||
# an eoc is raised!
|
||||
|
||||
# XXX: this causes ``ReceiveChannel.__anext__()`` to
|
||||
# raise a ``StopAsyncIteration`` **and** in our catch
|
||||
# block below it will trigger ``.aclose()``.
|
||||
eoc = trio.EndOfChannel(
|
||||
f'Context stream ended due to msg:\n\n'
|
||||
f'{pformat(msg)}\n'
|
||||
)
|
||||
# XXX: important to set so that a new `.receive()`
|
||||
# call (likely by another task using a broadcast receiver)
|
||||
# doesn't accidentally pull the `return` message
|
||||
# value out of the underlying feed mem chan which is
|
||||
# destined for the `Context.result()` call during ctx-exit!
|
||||
stream._eoc: Exception = eoc
|
||||
|
||||
raise eoc from src_err
|
||||
|
||||
if (
|
||||
stream
|
||||
and stream._closed
|
||||
):
|
||||
raise trio.ClosedResourceError('This stream was closed')
|
||||
|
||||
|
||||
# always re-raise the source error if no translation error case
|
||||
# is activated above.
|
||||
_type: str = 'Stream' if stream else 'Context'
|
||||
raise MessagingError(
|
||||
f"{_type} was expecting a '{expect_key}' message"
|
||||
" BUT received a non-error msg:\n"
|
||||
f'{pformat(msg)}'
|
||||
) from src_err
|
||||
|
|
112
tractor/_ipc.py
112
tractor/_ipc.py
|
@ -19,17 +19,17 @@ Inter-process comms abstractions
|
|||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
import platform
|
||||
import struct
|
||||
import typing
|
||||
import platform
|
||||
from pprint import pformat
|
||||
from collections.abc import (
|
||||
AsyncGenerator,
|
||||
AsyncIterator,
|
||||
)
|
||||
import typing
|
||||
from typing import (
|
||||
Any,
|
||||
runtime_checkable,
|
||||
Optional,
|
||||
Protocol,
|
||||
Type,
|
||||
TypeVar,
|
||||
|
@ -112,6 +112,13 @@ class MsgpackTCPStream(MsgTransport):
|
|||
using the ``msgspec`` codec lib.
|
||||
|
||||
'''
|
||||
layer_key: int = 4
|
||||
name_key: str = 'tcp'
|
||||
|
||||
# TODO: better naming for this?
|
||||
# -[ ] check how libp2p does naming for such things?
|
||||
codec_key: str = 'msgpack'
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stream: trio.SocketStream,
|
||||
|
@ -267,7 +274,7 @@ class Channel:
|
|||
def __init__(
|
||||
|
||||
self,
|
||||
destaddr: Optional[tuple[str, int]],
|
||||
destaddr: tuple[str, int]|None,
|
||||
|
||||
msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'),
|
||||
|
||||
|
@ -285,18 +292,29 @@ class Channel:
|
|||
|
||||
# Either created in ``.connect()`` or passed in by
|
||||
# user in ``.from_stream()``.
|
||||
self._stream: Optional[trio.SocketStream] = None
|
||||
self.msgstream: Optional[MsgTransport] = None
|
||||
self._stream: trio.SocketStream|None = None
|
||||
self._transport: MsgTransport|None = None
|
||||
|
||||
# set after handshake - always uid of far end
|
||||
self.uid: Optional[tuple[str, str]] = None
|
||||
self.uid: tuple[str, str]|None = None
|
||||
|
||||
self._agen = self._aiter_recv()
|
||||
self._exc: Optional[Exception] = None # set if far end actor errors
|
||||
self._exc: Exception|None = None # set if far end actor errors
|
||||
self._closed: bool = False
|
||||
# flag set on ``Portal.cancel_actor()`` indicating
|
||||
# remote (peer) cancellation of the far end actor runtime.
|
||||
self._cancel_called: bool = False # set on ``Portal.cancel_actor()``
|
||||
|
||||
# flag set by ``Portal.cancel_actor()`` indicating remote
|
||||
# (possibly peer) cancellation of the far end actor
|
||||
# runtime.
|
||||
self._cancel_called: bool = False
|
||||
|
||||
@property
|
||||
def msgstream(self) -> MsgTransport:
|
||||
log.info('`Channel.msgstream` is an old name, use `._transport`')
|
||||
return self._transport
|
||||
|
||||
@property
|
||||
def transport(self) -> MsgTransport:
|
||||
return self._transport
|
||||
|
||||
@classmethod
|
||||
def from_stream(
|
||||
|
@ -307,37 +325,44 @@ class Channel:
|
|||
) -> Channel:
|
||||
|
||||
src, dst = get_stream_addrs(stream)
|
||||
chan = Channel(destaddr=dst, **kwargs)
|
||||
chan = Channel(
|
||||
destaddr=dst,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# set immediately here from provided instance
|
||||
chan._stream = stream
|
||||
chan._stream: trio.SocketStream = stream
|
||||
chan.set_msg_transport(stream)
|
||||
return chan
|
||||
|
||||
def set_msg_transport(
|
||||
self,
|
||||
stream: trio.SocketStream,
|
||||
type_key: Optional[tuple[str, str]] = None,
|
||||
type_key: tuple[str, str]|None = None,
|
||||
|
||||
) -> MsgTransport:
|
||||
type_key = type_key or self._transport_key
|
||||
self.msgstream = get_msg_transport(type_key)(stream)
|
||||
return self.msgstream
|
||||
self._transport = get_msg_transport(type_key)(stream)
|
||||
return self._transport
|
||||
|
||||
def __repr__(self) -> str:
|
||||
if self.msgstream:
|
||||
return repr(
|
||||
self.msgstream.stream.socket._sock).replace( # type: ignore
|
||||
"socket.socket", "Channel")
|
||||
return object.__repr__(self)
|
||||
if not self._transport:
|
||||
return '<Channel with inactive transport?>'
|
||||
|
||||
return repr(
|
||||
self._transport.stream.socket._sock
|
||||
).replace( # type: ignore
|
||||
"socket.socket",
|
||||
"Channel",
|
||||
)
|
||||
|
||||
@property
|
||||
def laddr(self) -> Optional[tuple[str, int]]:
|
||||
return self.msgstream.laddr if self.msgstream else None
|
||||
def laddr(self) -> tuple[str, int]|None:
|
||||
return self._transport.laddr if self._transport else None
|
||||
|
||||
@property
|
||||
def raddr(self) -> Optional[tuple[str, int]]:
|
||||
return self.msgstream.raddr if self.msgstream else None
|
||||
def raddr(self) -> tuple[str, int]|None:
|
||||
return self._transport.raddr if self._transport else None
|
||||
|
||||
async def connect(
|
||||
self,
|
||||
|
@ -356,26 +381,29 @@ class Channel:
|
|||
*destaddr,
|
||||
**kwargs
|
||||
)
|
||||
msgstream = self.set_msg_transport(stream)
|
||||
transport = self.set_msg_transport(stream)
|
||||
|
||||
log.transport(
|
||||
f'Opened channel[{type(msgstream)}]: {self.laddr} -> {self.raddr}'
|
||||
f'Opened channel[{type(transport)}]: {self.laddr} -> {self.raddr}'
|
||||
)
|
||||
return msgstream
|
||||
return transport
|
||||
|
||||
async def send(self, item: Any) -> None:
|
||||
|
||||
log.transport(f"send `{item}`") # type: ignore
|
||||
assert self.msgstream
|
||||
log.transport(
|
||||
'=> send IPC msg:\n\n'
|
||||
f'{pformat(item)}\n'
|
||||
) # type: ignore
|
||||
assert self._transport
|
||||
|
||||
await self.msgstream.send(item)
|
||||
await self._transport.send(item)
|
||||
|
||||
async def recv(self) -> Any:
|
||||
assert self.msgstream
|
||||
return await self.msgstream.recv()
|
||||
assert self._transport
|
||||
return await self._transport.recv()
|
||||
|
||||
# try:
|
||||
# return await self.msgstream.recv()
|
||||
# return await self._transport.recv()
|
||||
# except trio.BrokenResourceError:
|
||||
# if self._autorecon:
|
||||
# await self._reconnect()
|
||||
|
@ -388,8 +416,8 @@ class Channel:
|
|||
f'Closing channel to {self.uid} '
|
||||
f'{self.laddr} -> {self.raddr}'
|
||||
)
|
||||
assert self.msgstream
|
||||
await self.msgstream.stream.aclose()
|
||||
assert self._transport
|
||||
await self._transport.stream.aclose()
|
||||
self._closed = True
|
||||
|
||||
async def __aenter__(self):
|
||||
|
@ -440,16 +468,16 @@ class Channel:
|
|||
Async iterate items from underlying stream.
|
||||
|
||||
'''
|
||||
assert self.msgstream
|
||||
assert self._transport
|
||||
while True:
|
||||
try:
|
||||
async for item in self.msgstream:
|
||||
async for item in self._transport:
|
||||
yield item
|
||||
# sent = yield item
|
||||
# if sent is not None:
|
||||
# # optimization, passing None through all the
|
||||
# # time is pointless
|
||||
# await self.msgstream.send(sent)
|
||||
# await self._transport.send(sent)
|
||||
except trio.BrokenResourceError:
|
||||
|
||||
# if not self._autorecon:
|
||||
|
@ -462,12 +490,14 @@ class Channel:
|
|||
# continue
|
||||
|
||||
def connected(self) -> bool:
|
||||
return self.msgstream.connected() if self.msgstream else False
|
||||
return self._transport.connected() if self._transport else False
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _connect_chan(
|
||||
host: str, port: int
|
||||
host: str,
|
||||
port: int
|
||||
|
||||
) -> typing.AsyncGenerator[Channel, None]:
|
||||
'''
|
||||
Create and connect a channel with disconnect on context manager
|
||||
|
|
|
@ -0,0 +1,151 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Multiaddress parser and utils according the spec(s) defined by
|
||||
`libp2p` and used in dependent project such as `ipfs`:
|
||||
|
||||
- https://docs.libp2p.io/concepts/fundamentals/addressing/
|
||||
- https://github.com/libp2p/specs/blob/master/addressing/README.md
|
||||
|
||||
'''
|
||||
from typing import Iterator
|
||||
|
||||
from bidict import bidict
|
||||
|
||||
# TODO: see if we can leverage libp2p ecosys projects instead of
|
||||
# rolling our own (parser) impls of the above addressing specs:
|
||||
# - https://github.com/libp2p/py-libp2p
|
||||
# - https://docs.libp2p.io/concepts/nat/circuit-relay/#relay-addresses
|
||||
# prots: bidict[int, str] = bidict({
|
||||
prots: bidict[int, str] = {
|
||||
'ipv4': 3,
|
||||
'ipv6': 3,
|
||||
'wg': 3,
|
||||
|
||||
'tcp': 4,
|
||||
'udp': 4,
|
||||
|
||||
# TODO: support the next-gen shite Bo
|
||||
# 'quic': 4,
|
||||
# 'ssh': 7, # via rsyscall bootstrapping
|
||||
}
|
||||
|
||||
prot_params: dict[str, tuple[str]] = {
|
||||
'ipv4': ('addr',),
|
||||
'ipv6': ('addr',),
|
||||
'wg': ('addr', 'port', 'pubkey'),
|
||||
|
||||
'tcp': ('port',),
|
||||
'udp': ('port',),
|
||||
|
||||
# 'quic': ('port',),
|
||||
# 'ssh': ('port',),
|
||||
}
|
||||
|
||||
|
||||
def iter_prot_layers(
|
||||
multiaddr: str,
|
||||
) -> Iterator[
|
||||
tuple[
|
||||
int,
|
||||
list[str]
|
||||
]
|
||||
]:
|
||||
'''
|
||||
Unpack a libp2p style "multiaddress" into multiple "segments"
|
||||
for each "layer" of the protocoll stack (in OSI terms).
|
||||
|
||||
'''
|
||||
tokens: list[str] = multiaddr.split('/')
|
||||
root, tokens = tokens[0], tokens[1:]
|
||||
assert not root # there is a root '/' on LHS
|
||||
itokens = iter(tokens)
|
||||
|
||||
prot: str | None = None
|
||||
params: list[str] = []
|
||||
for token in itokens:
|
||||
# every prot path should start with a known
|
||||
# key-str.
|
||||
if token in prots:
|
||||
if prot is None:
|
||||
prot: str = token
|
||||
else:
|
||||
yield prot, params
|
||||
prot = token
|
||||
|
||||
params = []
|
||||
|
||||
elif token not in prots:
|
||||
params.append(token)
|
||||
|
||||
else:
|
||||
yield prot, params
|
||||
|
||||
|
||||
def parse_maddr(
|
||||
multiaddr: str,
|
||||
) -> dict[str, str | int | dict]:
|
||||
'''
|
||||
Parse a libp2p style "multiaddress" into its distinct protocol
|
||||
segments where each segment is of the form:
|
||||
|
||||
`../<protocol>/<param0>/<param1>/../<paramN>`
|
||||
|
||||
and is loaded into a (order preserving) `layers: dict[str,
|
||||
dict[str, Any]` which holds each protocol-layer-segment of the
|
||||
original `str` path as a separate entry according to its approx
|
||||
OSI "layer number".
|
||||
|
||||
Any `paramN` in the path must be distinctly defined by a str-token in the
|
||||
(module global) `prot_params` table.
|
||||
|
||||
For eg. for wireguard which requires an address, port number and publickey
|
||||
the protocol params are specified as the entry:
|
||||
|
||||
'wg': ('addr', 'port', 'pubkey'),
|
||||
|
||||
and are thus parsed from a maddr in that order:
|
||||
`'/wg/1.1.1.1/51820/<pubkey>'`
|
||||
|
||||
'''
|
||||
layers: dict[str, str | int | dict] = {}
|
||||
for (
|
||||
prot_key,
|
||||
params,
|
||||
) in iter_prot_layers(multiaddr):
|
||||
|
||||
layer: int = prots[prot_key] # OSI layer used for sorting
|
||||
ep: dict[str, int | str] = {'layer': layer}
|
||||
layers[prot_key] = ep
|
||||
|
||||
# TODO; validation and resolving of names:
|
||||
# - each param via a validator provided as part of the
|
||||
# prot_params def? (also see `"port"` case below..)
|
||||
# - do a resolv step that will check addrs against
|
||||
# any loaded network.resolv: dict[str, str]
|
||||
rparams: list = list(reversed(params))
|
||||
for key in prot_params[prot_key]:
|
||||
val: str | int = rparams.pop()
|
||||
|
||||
# TODO: UGHH, dunno what we should do for validation
|
||||
# here, put it in the params spec somehow?
|
||||
if key == 'port':
|
||||
val = int(val)
|
||||
|
||||
ep[key] = val
|
||||
|
||||
return layers
|
File diff suppressed because it is too large
Load Diff
226
tractor/_root.py
226
tractor/_root.py
|
@ -25,7 +25,6 @@ import logging
|
|||
import signal
|
||||
import sys
|
||||
import os
|
||||
import typing
|
||||
import warnings
|
||||
|
||||
|
||||
|
@ -37,7 +36,7 @@ from ._runtime import (
|
|||
Arbiter,
|
||||
async_main,
|
||||
)
|
||||
from . import _debug
|
||||
from .devx import _debug
|
||||
from . import _spawn
|
||||
from . import _state
|
||||
from . import log
|
||||
|
@ -46,8 +45,14 @@ from ._exceptions import is_multi_cancelled
|
|||
|
||||
|
||||
# set at startup and after forks
|
||||
_default_arbiter_host: str = '127.0.0.1'
|
||||
_default_arbiter_port: int = 1616
|
||||
_default_host: str = '127.0.0.1'
|
||||
_default_port: int = 1616
|
||||
|
||||
# default registry always on localhost
|
||||
_default_lo_addrs: list[tuple[str, int]] = [(
|
||||
_default_host,
|
||||
_default_port,
|
||||
)]
|
||||
|
||||
|
||||
logger = log.get_logger('tractor')
|
||||
|
@ -58,10 +63,10 @@ async def open_root_actor(
|
|||
|
||||
*,
|
||||
# defaults are above
|
||||
arbiter_addr: tuple[str, int] | None = None,
|
||||
registry_addrs: list[tuple[str, int]] | None = None,
|
||||
|
||||
# defaults are above
|
||||
registry_addr: tuple[str, int] | None = None,
|
||||
arbiter_addr: tuple[str, int] | None = None,
|
||||
|
||||
name: str | None = 'root',
|
||||
|
||||
|
@ -79,7 +84,11 @@ async def open_root_actor(
|
|||
enable_modules: list | None = None,
|
||||
rpc_module_paths: list | None = None,
|
||||
|
||||
) -> typing.Any:
|
||||
# NOTE: allow caller to ensure that only one registry exists
|
||||
# and that this call creates it.
|
||||
ensure_registry: bool = False,
|
||||
|
||||
) -> Actor:
|
||||
'''
|
||||
Runtime init entry point for ``tractor``.
|
||||
|
||||
|
@ -89,7 +98,7 @@ async def open_root_actor(
|
|||
# https://github.com/python-trio/trio/issues/1155#issuecomment-742964018
|
||||
builtin_bp_handler = sys.breakpointhook
|
||||
orig_bp_path: str | None = os.environ.get('PYTHONBREAKPOINT', None)
|
||||
os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace'
|
||||
os.environ['PYTHONBREAKPOINT'] = 'tractor.devx._debug.pause_from_sync'
|
||||
|
||||
# attempt to retreive ``trio``'s sigint handler and stash it
|
||||
# on our debugger lock state.
|
||||
|
@ -115,29 +124,34 @@ async def open_root_actor(
|
|||
|
||||
if arbiter_addr is not None:
|
||||
warnings.warn(
|
||||
'`arbiter_addr` is now deprecated and has been renamed to'
|
||||
'`registry_addr`.\nUse that instead..',
|
||||
'`arbiter_addr` is now deprecated\n'
|
||||
'Use `registry_addrs: list[tuple]` instead..',
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
registry_addrs = [arbiter_addr]
|
||||
|
||||
registry_addr = (host, port) = (
|
||||
registry_addr
|
||||
or arbiter_addr
|
||||
or (
|
||||
_default_arbiter_host,
|
||||
_default_arbiter_port,
|
||||
)
|
||||
registry_addrs: list[tuple[str, int]] = (
|
||||
registry_addrs
|
||||
or
|
||||
_default_lo_addrs
|
||||
)
|
||||
assert registry_addrs
|
||||
|
||||
loglevel = (loglevel or log._default_loglevel).upper()
|
||||
loglevel = (
|
||||
loglevel
|
||||
or log._default_loglevel
|
||||
).upper()
|
||||
|
||||
if debug_mode and _spawn._spawn_method == 'trio':
|
||||
if (
|
||||
debug_mode
|
||||
and _spawn._spawn_method == 'trio'
|
||||
):
|
||||
_state._runtime_vars['_debug_mode'] = True
|
||||
|
||||
# expose internal debug module to every actor allowing
|
||||
# for use of ``await tractor.breakpoint()``
|
||||
enable_modules.append('tractor._debug')
|
||||
# expose internal debug module to every actor allowing for
|
||||
# use of ``await tractor.pause()``
|
||||
enable_modules.append('tractor.devx._debug')
|
||||
|
||||
# if debug mode get's enabled *at least* use that level of
|
||||
# logging for some informative console prompts.
|
||||
|
@ -155,75 +169,146 @@ async def open_root_actor(
|
|||
"Debug mode is only supported for the `trio` backend!"
|
||||
)
|
||||
|
||||
log.get_console_log(loglevel)
|
||||
assert loglevel
|
||||
_log = log.get_console_log(loglevel)
|
||||
assert _log
|
||||
|
||||
try:
|
||||
# make a temporary connection to see if an arbiter exists,
|
||||
# if one can't be made quickly we assume none exists.
|
||||
arbiter_found = False
|
||||
# TODO: factor this into `.devx._stackscope`!!
|
||||
if debug_mode:
|
||||
try:
|
||||
logger.info('Enabling `stackscope` traces on SIGUSR1')
|
||||
from .devx import enable_stack_on_sig
|
||||
enable_stack_on_sig()
|
||||
except ImportError:
|
||||
logger.warning(
|
||||
'`stackscope` not installed for use in debug mode!'
|
||||
)
|
||||
|
||||
# TODO: this connect-and-bail forces us to have to carefully
|
||||
# rewrap TCP 104-connection-reset errors as EOF so as to avoid
|
||||
# propagating cancel-causing errors to the channel-msg loop
|
||||
# machinery. Likely it would be better to eventually have
|
||||
# a "discovery" protocol with basic handshake instead.
|
||||
with trio.move_on_after(1):
|
||||
async with _connect_chan(host, port):
|
||||
arbiter_found = True
|
||||
# closed into below ping task-func
|
||||
ponged_addrs: list[tuple[str, int]] = []
|
||||
|
||||
except OSError:
|
||||
# TODO: make this a "discovery" log level?
|
||||
logger.warning(f"No actor registry found @ {host}:{port}")
|
||||
async def ping_tpt_socket(
|
||||
addr: tuple[str, int],
|
||||
timeout: float = 1,
|
||||
) -> None:
|
||||
'''
|
||||
Attempt temporary connection to see if a registry is
|
||||
listening at the requested address by a tranport layer
|
||||
ping.
|
||||
|
||||
# create a local actor and start up its main routine/task
|
||||
if arbiter_found:
|
||||
If a connection can't be made quickly we assume none no
|
||||
server is listening at that addr.
|
||||
|
||||
'''
|
||||
try:
|
||||
# TODO: this connect-and-bail forces us to have to
|
||||
# carefully rewrap TCP 104-connection-reset errors as
|
||||
# EOF so as to avoid propagating cancel-causing errors
|
||||
# to the channel-msg loop machinery. Likely it would
|
||||
# be better to eventually have a "discovery" protocol
|
||||
# with basic handshake instead?
|
||||
with trio.move_on_after(timeout):
|
||||
async with _connect_chan(*addr):
|
||||
ponged_addrs.append(addr)
|
||||
|
||||
except OSError:
|
||||
# TODO: make this a "discovery" log level?
|
||||
logger.warning(f'No actor registry found @ {addr}')
|
||||
|
||||
async with trio.open_nursery() as tn:
|
||||
for addr in registry_addrs:
|
||||
tn.start_soon(
|
||||
ping_tpt_socket,
|
||||
tuple(addr), # TODO: just drop this requirement?
|
||||
)
|
||||
|
||||
trans_bind_addrs: list[tuple[str, int]] = []
|
||||
|
||||
# Create a new local root-actor instance which IS NOT THE
|
||||
# REGISTRAR
|
||||
if ponged_addrs:
|
||||
|
||||
if ensure_registry:
|
||||
raise RuntimeError(
|
||||
f'Failed to open `{name}`@{ponged_addrs}: '
|
||||
'registry socket(s) already bound'
|
||||
)
|
||||
|
||||
# we were able to connect to an arbiter
|
||||
logger.info(f"Arbiter seems to exist @ {host}:{port}")
|
||||
logger.info(
|
||||
f'Registry(s) seem(s) to exist @ {ponged_addrs}'
|
||||
)
|
||||
|
||||
actor = Actor(
|
||||
name or 'anonymous',
|
||||
arbiter_addr=registry_addr,
|
||||
name=name or 'anonymous',
|
||||
registry_addrs=ponged_addrs,
|
||||
loglevel=loglevel,
|
||||
enable_modules=enable_modules,
|
||||
)
|
||||
host, port = (host, 0)
|
||||
# DO NOT use the registry_addrs as the transport server
|
||||
# addrs for this new non-registar, root-actor.
|
||||
for host, port in ponged_addrs:
|
||||
# NOTE: zero triggers dynamic OS port allocation
|
||||
trans_bind_addrs.append((host, 0))
|
||||
|
||||
# Start this local actor as the "registrar", aka a regular
|
||||
# actor who manages the local registry of "mailboxes" of
|
||||
# other process-tree-local sub-actors.
|
||||
else:
|
||||
# start this local actor as the arbiter (aka a regular actor who
|
||||
# manages the local registry of "mailboxes")
|
||||
|
||||
# Note that if the current actor is the arbiter it is desirable
|
||||
# for it to stay up indefinitely until a re-election process has
|
||||
# taken place - which is not implemented yet FYI).
|
||||
# NOTE that if the current actor IS THE REGISTAR, the
|
||||
# following init steps are taken:
|
||||
# - the tranport layer server is bound to each (host, port)
|
||||
# pair defined in provided registry_addrs, or the default.
|
||||
trans_bind_addrs = registry_addrs
|
||||
|
||||
# - it is normally desirable for any registrar to stay up
|
||||
# indefinitely until either all registered (child/sub)
|
||||
# actors are terminated (via SC supervision) or,
|
||||
# a re-election process has taken place.
|
||||
# NOTE: all of ^ which is not implemented yet - see:
|
||||
# https://github.com/goodboy/tractor/issues/216
|
||||
# https://github.com/goodboy/tractor/pull/348
|
||||
# https://github.com/goodboy/tractor/issues/296
|
||||
|
||||
actor = Arbiter(
|
||||
name or 'arbiter',
|
||||
arbiter_addr=registry_addr,
|
||||
name or 'registrar',
|
||||
registry_addrs=registry_addrs,
|
||||
loglevel=loglevel,
|
||||
enable_modules=enable_modules,
|
||||
)
|
||||
|
||||
# Start up main task set via core actor-runtime nurseries.
|
||||
try:
|
||||
# assign process-local actor
|
||||
_state._current_actor = actor
|
||||
|
||||
# start local channel-server and fake the portal API
|
||||
# NOTE: this won't block since we provide the nursery
|
||||
logger.info(f"Starting local {actor} @ {host}:{port}")
|
||||
ml_addrs_str: str = '\n'.join(
|
||||
f'@{addr}' for addr in trans_bind_addrs
|
||||
)
|
||||
logger.info(
|
||||
f'Starting local {actor.uid} on the following transport addrs:\n'
|
||||
f'{ml_addrs_str}'
|
||||
)
|
||||
|
||||
# start the actor runtime in a new task
|
||||
async with trio.open_nursery() as nursery:
|
||||
|
||||
# ``_runtime.async_main()`` creates an internal nursery and
|
||||
# thus blocks here until the entire underlying actor tree has
|
||||
# terminated thereby conducting structured concurrency.
|
||||
|
||||
# ``_runtime.async_main()`` creates an internal nursery
|
||||
# and blocks here until any underlying actor(-process)
|
||||
# tree has terminated thereby conducting so called
|
||||
# "end-to-end" structured concurrency throughout an
|
||||
# entire hierarchical python sub-process set; all
|
||||
# "actor runtime" primitives are SC-compat and thus all
|
||||
# transitively spawned actors/processes must be as
|
||||
# well.
|
||||
await nursery.start(
|
||||
partial(
|
||||
async_main,
|
||||
actor,
|
||||
accept_addr=(host, port),
|
||||
accept_addrs=trans_bind_addrs,
|
||||
parent_addr=None
|
||||
)
|
||||
)
|
||||
|
@ -235,12 +320,16 @@ async def open_root_actor(
|
|||
BaseExceptionGroup,
|
||||
) as err:
|
||||
|
||||
entered = await _debug._maybe_enter_pm(err)
|
||||
entered: bool = await _debug._maybe_enter_pm(err)
|
||||
|
||||
if not entered and not is_multi_cancelled(err):
|
||||
logger.exception("Root actor crashed:")
|
||||
if (
|
||||
not entered
|
||||
and not is_multi_cancelled(err)
|
||||
):
|
||||
logger.exception('Root actor crashed:\n')
|
||||
|
||||
# always re-raise
|
||||
# ALWAYS re-raise any error bubbled up from the
|
||||
# runtime!
|
||||
raise
|
||||
|
||||
finally:
|
||||
|
@ -253,14 +342,14 @@ async def open_root_actor(
|
|||
# for an in nurseries:
|
||||
# tempn.start_soon(an.exited.wait)
|
||||
|
||||
logger.cancel("Shutting down root actor")
|
||||
await actor.cancel(
|
||||
requesting_uid=actor.uid,
|
||||
logger.info(
|
||||
'Closing down root actor'
|
||||
)
|
||||
await actor.cancel(None) # self cancel
|
||||
finally:
|
||||
_state._current_actor = None
|
||||
|
||||
# restore breakpoint hook state
|
||||
# restore built-in `breakpoint()` hook state
|
||||
sys.breakpointhook = builtin_bp_handler
|
||||
if orig_bp_path is not None:
|
||||
os.environ['PYTHONBREAKPOINT'] = orig_bp_path
|
||||
|
@ -276,10 +365,7 @@ def run_daemon(
|
|||
|
||||
# runtime kwargs
|
||||
name: str | None = 'root',
|
||||
registry_addr: tuple[str, int] = (
|
||||
_default_arbiter_host,
|
||||
_default_arbiter_port,
|
||||
),
|
||||
registry_addrs: list[tuple[str, int]] = _default_lo_addrs,
|
||||
|
||||
start_method: str | None = None,
|
||||
debug_mode: bool = False,
|
||||
|
@ -303,7 +389,7 @@ def run_daemon(
|
|||
async def _main():
|
||||
|
||||
async with open_root_actor(
|
||||
registry_addr=registry_addr,
|
||||
registry_addrs=registry_addrs,
|
||||
name=name,
|
||||
start_method=start_method,
|
||||
debug_mode=debug_mode,
|
||||
|
|
2165
tractor/_runtime.py
2165
tractor/_runtime.py
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,833 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
SC friendly shared memory management geared at real-time
|
||||
processing.
|
||||
|
||||
Support for ``numpy`` compatible array-buffers is provided but is
|
||||
considered optional within the context of this runtime-library.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from sys import byteorder
|
||||
import time
|
||||
from typing import Optional
|
||||
from multiprocessing import shared_memory as shm
|
||||
from multiprocessing.shared_memory import (
|
||||
SharedMemory,
|
||||
ShareableList,
|
||||
)
|
||||
|
||||
from msgspec import Struct
|
||||
import tractor
|
||||
|
||||
from .log import get_logger
|
||||
|
||||
|
||||
_USE_POSIX = getattr(shm, '_USE_POSIX', False)
|
||||
if _USE_POSIX:
|
||||
from _posixshmem import shm_unlink
|
||||
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
from numpy.lib import recfunctions as rfn
|
||||
import nptyping
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
def disable_mantracker():
|
||||
'''
|
||||
Disable all ``multiprocessing``` "resource tracking" machinery since
|
||||
it's an absolute multi-threaded mess of non-SC madness.
|
||||
|
||||
'''
|
||||
from multiprocessing import resource_tracker as mantracker
|
||||
|
||||
# Tell the "resource tracker" thing to fuck off.
|
||||
class ManTracker(mantracker.ResourceTracker):
|
||||
def register(self, name, rtype):
|
||||
pass
|
||||
|
||||
def unregister(self, name, rtype):
|
||||
pass
|
||||
|
||||
def ensure_running(self):
|
||||
pass
|
||||
|
||||
# "know your land and know your prey"
|
||||
# https://www.dailymotion.com/video/x6ozzco
|
||||
mantracker._resource_tracker = ManTracker()
|
||||
mantracker.register = mantracker._resource_tracker.register
|
||||
mantracker.ensure_running = mantracker._resource_tracker.ensure_running
|
||||
mantracker.unregister = mantracker._resource_tracker.unregister
|
||||
mantracker.getfd = mantracker._resource_tracker.getfd
|
||||
|
||||
|
||||
disable_mantracker()
|
||||
|
||||
|
||||
class SharedInt:
|
||||
'''
|
||||
Wrapper around a single entry shared memory array which
|
||||
holds an ``int`` value used as an index counter.
|
||||
|
||||
'''
|
||||
def __init__(
|
||||
self,
|
||||
shm: SharedMemory,
|
||||
) -> None:
|
||||
self._shm = shm
|
||||
|
||||
@property
|
||||
def value(self) -> int:
|
||||
return int.from_bytes(self._shm.buf, byteorder)
|
||||
|
||||
@value.setter
|
||||
def value(self, value) -> None:
|
||||
self._shm.buf[:] = value.to_bytes(self._shm.size, byteorder)
|
||||
|
||||
def destroy(self) -> None:
|
||||
if _USE_POSIX:
|
||||
# We manually unlink to bypass all the "resource tracker"
|
||||
# nonsense meant for non-SC systems.
|
||||
name = self._shm.name
|
||||
try:
|
||||
shm_unlink(name)
|
||||
except FileNotFoundError:
|
||||
# might be a teardown race here?
|
||||
log.warning(f'Shm for {name} already unlinked?')
|
||||
|
||||
|
||||
class NDToken(Struct, frozen=True):
|
||||
'''
|
||||
Internal represenation of a shared memory ``numpy`` array "token"
|
||||
which can be used to key and load a system (OS) wide shm entry
|
||||
and correctly read the array by type signature.
|
||||
|
||||
This type is msg safe.
|
||||
|
||||
'''
|
||||
shm_name: str # this servers as a "key" value
|
||||
shm_first_index_name: str
|
||||
shm_last_index_name: str
|
||||
dtype_descr: tuple
|
||||
size: int # in struct-array index / row terms
|
||||
|
||||
# TODO: use nptyping here on dtypes
|
||||
@property
|
||||
def dtype(self) -> list[tuple[str, str, tuple[int, ...]]]:
|
||||
return np.dtype(
|
||||
list(
|
||||
map(tuple, self.dtype_descr)
|
||||
)
|
||||
).descr
|
||||
|
||||
def as_msg(self):
|
||||
return self.to_dict()
|
||||
|
||||
@classmethod
|
||||
def from_msg(cls, msg: dict) -> NDToken:
|
||||
if isinstance(msg, NDToken):
|
||||
return msg
|
||||
|
||||
# TODO: native struct decoding
|
||||
# return _token_dec.decode(msg)
|
||||
|
||||
msg['dtype_descr'] = tuple(map(tuple, msg['dtype_descr']))
|
||||
return NDToken(**msg)
|
||||
|
||||
|
||||
# _token_dec = msgspec.msgpack.Decoder(NDToken)
|
||||
|
||||
# TODO: this api?
|
||||
# _known_tokens = tractor.ActorVar('_shm_tokens', {})
|
||||
# _known_tokens = tractor.ContextStack('_known_tokens', )
|
||||
# _known_tokens = trio.RunVar('shms', {})
|
||||
|
||||
# TODO: this should maybe be provided via
|
||||
# a `.trionics.maybe_open_context()` wrapper factory?
|
||||
# process-local store of keys to tokens
|
||||
_known_tokens: dict[str, NDToken] = {}
|
||||
|
||||
|
||||
def get_shm_token(key: str) -> NDToken | None:
|
||||
'''
|
||||
Convenience func to check if a token
|
||||
for the provided key is known by this process.
|
||||
|
||||
Returns either the ``numpy`` token or a string for a shared list.
|
||||
|
||||
'''
|
||||
return _known_tokens.get(key)
|
||||
|
||||
|
||||
def _make_token(
|
||||
key: str,
|
||||
size: int,
|
||||
dtype: np.dtype,
|
||||
|
||||
) -> NDToken:
|
||||
'''
|
||||
Create a serializable token that can be used
|
||||
to access a shared array.
|
||||
|
||||
'''
|
||||
return NDToken(
|
||||
shm_name=key,
|
||||
shm_first_index_name=key + "_first",
|
||||
shm_last_index_name=key + "_last",
|
||||
dtype_descr=tuple(np.dtype(dtype).descr),
|
||||
size=size,
|
||||
)
|
||||
|
||||
|
||||
class ShmArray:
|
||||
'''
|
||||
A shared memory ``numpy.ndarray`` API.
|
||||
|
||||
An underlying shared memory buffer is allocated based on
|
||||
a user specified ``numpy.ndarray``. This fixed size array
|
||||
can be read and written to by pushing data both onto the "front"
|
||||
or "back" of a set index range. The indexes for the "first" and
|
||||
"last" index are themselves stored in shared memory (accessed via
|
||||
``SharedInt`` interfaces) values such that multiple processes can
|
||||
interact with the same array using a synchronized-index.
|
||||
|
||||
'''
|
||||
def __init__(
|
||||
self,
|
||||
shmarr: np.ndarray,
|
||||
first: SharedInt,
|
||||
last: SharedInt,
|
||||
shm: SharedMemory,
|
||||
# readonly: bool = True,
|
||||
) -> None:
|
||||
self._array = shmarr
|
||||
|
||||
# indexes for first and last indices corresponding
|
||||
# to fille data
|
||||
self._first = first
|
||||
self._last = last
|
||||
|
||||
self._len = len(shmarr)
|
||||
self._shm = shm
|
||||
self._post_init: bool = False
|
||||
|
||||
# pushing data does not write the index (aka primary key)
|
||||
self._write_fields: list[str] | None = None
|
||||
dtype = shmarr.dtype
|
||||
if dtype.fields:
|
||||
self._write_fields = list(shmarr.dtype.fields.keys())[1:]
|
||||
|
||||
# TODO: ringbuf api?
|
||||
|
||||
@property
|
||||
def _token(self) -> NDToken:
|
||||
return NDToken(
|
||||
shm_name=self._shm.name,
|
||||
shm_first_index_name=self._first._shm.name,
|
||||
shm_last_index_name=self._last._shm.name,
|
||||
dtype_descr=tuple(self._array.dtype.descr),
|
||||
size=self._len,
|
||||
)
|
||||
|
||||
@property
|
||||
def token(self) -> dict:
|
||||
"""Shared memory token that can be serialized and used by
|
||||
another process to attach to this array.
|
||||
"""
|
||||
return self._token.as_msg()
|
||||
|
||||
@property
|
||||
def index(self) -> int:
|
||||
return self._last.value % self._len
|
||||
|
||||
@property
|
||||
def array(self) -> np.ndarray:
|
||||
'''
|
||||
Return an up-to-date ``np.ndarray`` view of the
|
||||
so-far-written data to the underlying shm buffer.
|
||||
|
||||
'''
|
||||
a = self._array[self._first.value:self._last.value]
|
||||
|
||||
# first, last = self._first.value, self._last.value
|
||||
# a = self._array[first:last]
|
||||
|
||||
# TODO: eventually comment this once we've not seen it in the
|
||||
# wild in a long time..
|
||||
# XXX: race where first/last indexes cause a reader
|
||||
# to load an empty array..
|
||||
if len(a) == 0 and self._post_init:
|
||||
raise RuntimeError('Empty array race condition hit!?')
|
||||
# breakpoint()
|
||||
|
||||
return a
|
||||
|
||||
def ustruct(
|
||||
self,
|
||||
fields: Optional[list[str]] = None,
|
||||
|
||||
# type that all field values will be cast to
|
||||
# in the returned view.
|
||||
common_dtype: np.dtype = float,
|
||||
|
||||
) -> np.ndarray:
|
||||
|
||||
array = self._array
|
||||
|
||||
if fields:
|
||||
selection = array[fields]
|
||||
# fcount = len(fields)
|
||||
else:
|
||||
selection = array
|
||||
# fcount = len(array.dtype.fields)
|
||||
|
||||
# XXX: manual ``.view()`` attempt that also doesn't work.
|
||||
# uview = selection.view(
|
||||
# dtype='<f16',
|
||||
# ).reshape(-1, 4, order='A')
|
||||
|
||||
# assert len(selection) == len(uview)
|
||||
|
||||
u = rfn.structured_to_unstructured(
|
||||
selection,
|
||||
# dtype=float,
|
||||
copy=True,
|
||||
)
|
||||
|
||||
# unstruct = np.ndarray(u.shape, dtype=a.dtype, buffer=shm.buf)
|
||||
# array[:] = a[:]
|
||||
return u
|
||||
# return ShmArray(
|
||||
# shmarr=u,
|
||||
# first=self._first,
|
||||
# last=self._last,
|
||||
# shm=self._shm
|
||||
# )
|
||||
|
||||
def last(
|
||||
self,
|
||||
length: int = 1,
|
||||
|
||||
) -> np.ndarray:
|
||||
'''
|
||||
Return the last ``length``'s worth of ("row") entries from the
|
||||
array.
|
||||
|
||||
'''
|
||||
return self.array[-length:]
|
||||
|
||||
def push(
|
||||
self,
|
||||
data: np.ndarray,
|
||||
|
||||
field_map: Optional[dict[str, str]] = None,
|
||||
prepend: bool = False,
|
||||
update_first: bool = True,
|
||||
start: int | None = None,
|
||||
|
||||
) -> int:
|
||||
'''
|
||||
Ring buffer like "push" to append data
|
||||
into the buffer and return updated "last" index.
|
||||
|
||||
NB: no actual ring logic yet to give a "loop around" on overflow
|
||||
condition, lel.
|
||||
|
||||
'''
|
||||
length = len(data)
|
||||
|
||||
if prepend:
|
||||
index = (start or self._first.value) - length
|
||||
|
||||
if index < 0:
|
||||
raise ValueError(
|
||||
f'Array size of {self._len} was overrun during prepend.\n'
|
||||
f'You have passed {abs(index)} too many datums.'
|
||||
)
|
||||
|
||||
else:
|
||||
index = start if start is not None else self._last.value
|
||||
|
||||
end = index + length
|
||||
|
||||
if field_map:
|
||||
src_names, dst_names = zip(*field_map.items())
|
||||
else:
|
||||
dst_names = src_names = self._write_fields
|
||||
|
||||
try:
|
||||
self._array[
|
||||
list(dst_names)
|
||||
][index:end] = data[list(src_names)][:]
|
||||
|
||||
# NOTE: there was a race here between updating
|
||||
# the first and last indices and when the next reader
|
||||
# tries to access ``.array`` (which due to the index
|
||||
# overlap will be empty). Pretty sure we've fixed it now
|
||||
# but leaving this here as a reminder.
|
||||
if (
|
||||
prepend
|
||||
and update_first
|
||||
and length
|
||||
):
|
||||
assert index < self._first.value
|
||||
|
||||
if (
|
||||
index < self._first.value
|
||||
and update_first
|
||||
):
|
||||
assert prepend, 'prepend=True not passed but index decreased?'
|
||||
self._first.value = index
|
||||
|
||||
elif not prepend:
|
||||
self._last.value = end
|
||||
|
||||
self._post_init = True
|
||||
return end
|
||||
|
||||
except ValueError as err:
|
||||
if field_map:
|
||||
raise
|
||||
|
||||
# should raise if diff detected
|
||||
self.diff_err_fields(data)
|
||||
raise err
|
||||
|
||||
def diff_err_fields(
|
||||
self,
|
||||
data: np.ndarray,
|
||||
) -> None:
|
||||
# reraise with any field discrepancy
|
||||
our_fields, their_fields = (
|
||||
set(self._array.dtype.fields),
|
||||
set(data.dtype.fields),
|
||||
)
|
||||
|
||||
only_in_ours = our_fields - their_fields
|
||||
only_in_theirs = their_fields - our_fields
|
||||
|
||||
if only_in_ours:
|
||||
raise TypeError(
|
||||
f"Input array is missing field(s): {only_in_ours}"
|
||||
)
|
||||
elif only_in_theirs:
|
||||
raise TypeError(
|
||||
f"Input array has unknown field(s): {only_in_theirs}"
|
||||
)
|
||||
|
||||
# TODO: support "silent" prepends that don't update ._first.value?
|
||||
def prepend(
|
||||
self,
|
||||
data: np.ndarray,
|
||||
) -> int:
|
||||
end = self.push(data, prepend=True)
|
||||
assert end
|
||||
|
||||
def close(self) -> None:
|
||||
self._first._shm.close()
|
||||
self._last._shm.close()
|
||||
self._shm.close()
|
||||
|
||||
def destroy(self) -> None:
|
||||
if _USE_POSIX:
|
||||
# We manually unlink to bypass all the "resource tracker"
|
||||
# nonsense meant for non-SC systems.
|
||||
shm_unlink(self._shm.name)
|
||||
|
||||
self._first.destroy()
|
||||
self._last.destroy()
|
||||
|
||||
def flush(self) -> None:
|
||||
# TODO: flush to storage backend like markestore?
|
||||
...
|
||||
|
||||
|
||||
def open_shm_ndarray(
|
||||
size: int,
|
||||
key: str | None = None,
|
||||
dtype: np.dtype | None = None,
|
||||
append_start_index: int | None = None,
|
||||
readonly: bool = False,
|
||||
|
||||
) -> ShmArray:
|
||||
'''
|
||||
Open a memory shared ``numpy`` using the standard library.
|
||||
|
||||
This call unlinks (aka permanently destroys) the buffer on teardown
|
||||
and thus should be used from the parent-most accessor (process).
|
||||
|
||||
'''
|
||||
# create new shared mem segment for which we
|
||||
# have write permission
|
||||
a = np.zeros(size, dtype=dtype)
|
||||
a['index'] = np.arange(len(a))
|
||||
|
||||
shm = SharedMemory(
|
||||
name=key,
|
||||
create=True,
|
||||
size=a.nbytes
|
||||
)
|
||||
array = np.ndarray(
|
||||
a.shape,
|
||||
dtype=a.dtype,
|
||||
buffer=shm.buf
|
||||
)
|
||||
array[:] = a[:]
|
||||
array.setflags(write=int(not readonly))
|
||||
|
||||
token = _make_token(
|
||||
key=key,
|
||||
size=size,
|
||||
dtype=dtype,
|
||||
)
|
||||
|
||||
# create single entry arrays for storing an first and last indices
|
||||
first = SharedInt(
|
||||
shm=SharedMemory(
|
||||
name=token.shm_first_index_name,
|
||||
create=True,
|
||||
size=4, # std int
|
||||
)
|
||||
)
|
||||
|
||||
last = SharedInt(
|
||||
shm=SharedMemory(
|
||||
name=token.shm_last_index_name,
|
||||
create=True,
|
||||
size=4, # std int
|
||||
)
|
||||
)
|
||||
|
||||
# Start the "real-time" append-updated (or "pushed-to") section
|
||||
# after some start index: ``append_start_index``. This allows appending
|
||||
# from a start point in the array which isn't the 0 index and looks
|
||||
# something like,
|
||||
# -------------------------
|
||||
# | | i
|
||||
# _________________________
|
||||
# <-------------> <------->
|
||||
# history real-time
|
||||
#
|
||||
# Once fully "prepended", the history section will leave the
|
||||
# ``ShmArray._start.value: int = 0`` and the yet-to-be written
|
||||
# real-time section will start at ``ShmArray.index: int``.
|
||||
|
||||
# this sets the index to nearly 2/3rds into the the length of
|
||||
# the buffer leaving at least a "days worth of second samples"
|
||||
# for the real-time section.
|
||||
if append_start_index is None:
|
||||
append_start_index = round(size * 0.616)
|
||||
|
||||
last.value = first.value = append_start_index
|
||||
|
||||
shmarr = ShmArray(
|
||||
array,
|
||||
first,
|
||||
last,
|
||||
shm,
|
||||
)
|
||||
|
||||
assert shmarr._token == token
|
||||
_known_tokens[key] = shmarr.token
|
||||
|
||||
# "unlink" created shm on process teardown by
|
||||
# pushing teardown calls onto actor context stack
|
||||
stack = tractor.current_actor().lifetime_stack
|
||||
stack.callback(shmarr.close)
|
||||
stack.callback(shmarr.destroy)
|
||||
|
||||
return shmarr
|
||||
|
||||
|
||||
def attach_shm_ndarray(
|
||||
token: tuple[str, str, tuple[str, str]],
|
||||
readonly: bool = True,
|
||||
|
||||
) -> ShmArray:
|
||||
'''
|
||||
Attach to an existing shared memory array previously
|
||||
created by another process using ``open_shared_array``.
|
||||
|
||||
No new shared mem is allocated but wrapper types for read/write
|
||||
access are constructed.
|
||||
|
||||
'''
|
||||
token = NDToken.from_msg(token)
|
||||
key = token.shm_name
|
||||
|
||||
if key in _known_tokens:
|
||||
assert NDToken.from_msg(_known_tokens[key]) == token, "WTF"
|
||||
|
||||
# XXX: ugh, looks like due to the ``shm_open()`` C api we can't
|
||||
# actually place files in a subdir, see discussion here:
|
||||
# https://stackoverflow.com/a/11103289
|
||||
|
||||
# attach to array buffer and view as per dtype
|
||||
_err: Optional[Exception] = None
|
||||
for _ in range(3):
|
||||
try:
|
||||
shm = SharedMemory(
|
||||
name=key,
|
||||
create=False,
|
||||
)
|
||||
break
|
||||
except OSError as oserr:
|
||||
_err = oserr
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
if _err:
|
||||
raise _err
|
||||
|
||||
shmarr = np.ndarray(
|
||||
(token.size,),
|
||||
dtype=token.dtype,
|
||||
buffer=shm.buf
|
||||
)
|
||||
shmarr.setflags(write=int(not readonly))
|
||||
|
||||
first = SharedInt(
|
||||
shm=SharedMemory(
|
||||
name=token.shm_first_index_name,
|
||||
create=False,
|
||||
size=4, # std int
|
||||
),
|
||||
)
|
||||
last = SharedInt(
|
||||
shm=SharedMemory(
|
||||
name=token.shm_last_index_name,
|
||||
create=False,
|
||||
size=4, # std int
|
||||
),
|
||||
)
|
||||
|
||||
# make sure we can read
|
||||
first.value
|
||||
|
||||
sha = ShmArray(
|
||||
shmarr,
|
||||
first,
|
||||
last,
|
||||
shm,
|
||||
)
|
||||
# read test
|
||||
sha.array
|
||||
|
||||
# Stash key -> token knowledge for future queries
|
||||
# via `maybe_opepn_shm_array()` but only after we know
|
||||
# we can attach.
|
||||
if key not in _known_tokens:
|
||||
_known_tokens[key] = token
|
||||
|
||||
# "close" attached shm on actor teardown
|
||||
tractor.current_actor().lifetime_stack.callback(sha.close)
|
||||
|
||||
return sha
|
||||
|
||||
|
||||
def maybe_open_shm_ndarray(
|
||||
key: str, # unique identifier for segment
|
||||
size: int,
|
||||
dtype: np.dtype | None = None,
|
||||
append_start_index: int = 0,
|
||||
readonly: bool = True,
|
||||
|
||||
) -> tuple[ShmArray, bool]:
|
||||
'''
|
||||
Attempt to attach to a shared memory block using a "key" lookup
|
||||
to registered blocks in the users overall "system" registry
|
||||
(presumes you don't have the block's explicit token).
|
||||
|
||||
This function is meant to solve the problem of discovering whether
|
||||
a shared array token has been allocated or discovered by the actor
|
||||
running in **this** process. Systems where multiple actors may seek
|
||||
to access a common block can use this function to attempt to acquire
|
||||
a token as discovered by the actors who have previously stored
|
||||
a "key" -> ``NDToken`` map in an actor local (aka python global)
|
||||
variable.
|
||||
|
||||
If you know the explicit ``NDToken`` for your memory segment instead
|
||||
use ``attach_shm_array``.
|
||||
|
||||
'''
|
||||
try:
|
||||
# see if we already know this key
|
||||
token = _known_tokens[key]
|
||||
return (
|
||||
attach_shm_ndarray(
|
||||
token=token,
|
||||
readonly=readonly,
|
||||
),
|
||||
False, # not newly opened
|
||||
)
|
||||
except KeyError:
|
||||
log.warning(f"Could not find {key} in shms cache")
|
||||
if dtype:
|
||||
token = _make_token(
|
||||
key,
|
||||
size=size,
|
||||
dtype=dtype,
|
||||
)
|
||||
else:
|
||||
|
||||
try:
|
||||
return (
|
||||
attach_shm_ndarray(
|
||||
token=token,
|
||||
readonly=readonly,
|
||||
),
|
||||
False,
|
||||
)
|
||||
except FileNotFoundError:
|
||||
log.warning(f"Could not attach to shm with token {token}")
|
||||
|
||||
# This actor does not know about memory
|
||||
# associated with the provided "key".
|
||||
# Attempt to open a block and expect
|
||||
# to fail if a block has been allocated
|
||||
# on the OS by someone else.
|
||||
return (
|
||||
open_shm_ndarray(
|
||||
key=key,
|
||||
size=size,
|
||||
dtype=dtype,
|
||||
append_start_index=append_start_index,
|
||||
readonly=readonly,
|
||||
),
|
||||
True,
|
||||
)
|
||||
|
||||
|
||||
class ShmList(ShareableList):
|
||||
'''
|
||||
Carbon copy of ``.shared_memory.ShareableList`` with a few
|
||||
enhancements:
|
||||
|
||||
- readonly mode via instance var flag `._readonly: bool`
|
||||
- ``.__getitem__()`` accepts ``slice`` inputs
|
||||
- exposes the underlying buffer "name" as a ``.key: str``
|
||||
|
||||
'''
|
||||
def __init__(
|
||||
self,
|
||||
sequence: list | None = None,
|
||||
*,
|
||||
name: str | None = None,
|
||||
readonly: bool = True
|
||||
|
||||
) -> None:
|
||||
self._readonly = readonly
|
||||
self._key = name
|
||||
return super().__init__(
|
||||
sequence=sequence,
|
||||
name=name,
|
||||
)
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
return self._key
|
||||
|
||||
@property
|
||||
def readonly(self) -> bool:
|
||||
return self._readonly
|
||||
|
||||
def __setitem__(
|
||||
self,
|
||||
position,
|
||||
value,
|
||||
|
||||
) -> None:
|
||||
|
||||
# mimick ``numpy`` error
|
||||
if self._readonly:
|
||||
raise ValueError('assignment destination is read-only')
|
||||
|
||||
return super().__setitem__(position, value)
|
||||
|
||||
def __getitem__(
|
||||
self,
|
||||
indexish,
|
||||
) -> list:
|
||||
|
||||
# NOTE: this is a non-writeable view (copy?) of the buffer
|
||||
# in a new list instance.
|
||||
if isinstance(indexish, slice):
|
||||
return list(self)[indexish]
|
||||
|
||||
return super().__getitem__(indexish)
|
||||
|
||||
# TODO: should we offer a `.array` and `.push()` equivalent
|
||||
# to the `ShmArray`?
|
||||
# currently we have the following limitations:
|
||||
# - can't write slices of input using traditional slice-assign
|
||||
# syntax due to the ``ShareableList.__setitem__()`` implementation.
|
||||
# - ``list(shmlist)`` returns a non-mutable copy instead of
|
||||
# a writeable view which would be handier numpy-style ops.
|
||||
|
||||
|
||||
def open_shm_list(
|
||||
key: str,
|
||||
sequence: list | None = None,
|
||||
size: int = int(2 ** 10),
|
||||
dtype: float | int | bool | str | bytes | None = float,
|
||||
readonly: bool = True,
|
||||
|
||||
) -> ShmList:
|
||||
|
||||
if sequence is None:
|
||||
default = {
|
||||
float: 0.,
|
||||
int: 0,
|
||||
bool: True,
|
||||
str: 'doggy',
|
||||
None: None,
|
||||
}[dtype]
|
||||
sequence = [default] * size
|
||||
|
||||
shml = ShmList(
|
||||
sequence=sequence,
|
||||
name=key,
|
||||
readonly=readonly,
|
||||
)
|
||||
|
||||
# "close" attached shm on actor teardown
|
||||
try:
|
||||
actor = tractor.current_actor()
|
||||
actor.lifetime_stack.callback(shml.shm.close)
|
||||
actor.lifetime_stack.callback(shml.shm.unlink)
|
||||
except RuntimeError:
|
||||
log.warning('tractor runtime not active, skipping teardown steps')
|
||||
|
||||
return shml
|
||||
|
||||
|
||||
def attach_shm_list(
|
||||
key: str,
|
||||
readonly: bool = False,
|
||||
|
||||
) -> ShmList:
|
||||
|
||||
return ShmList(
|
||||
name=key,
|
||||
readonly=readonly,
|
||||
)
|
|
@ -35,7 +35,7 @@ from exceptiongroup import BaseExceptionGroup
|
|||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
|
||||
from ._debug import (
|
||||
from .devx import (
|
||||
maybe_wait_for_debugger,
|
||||
acquire_debug_lock,
|
||||
)
|
||||
|
@ -144,7 +144,7 @@ async def exhaust_portal(
|
|||
|
||||
# XXX: streams should never be reaped here since they should
|
||||
# always be established and shutdown using a context manager api
|
||||
final = await portal.result()
|
||||
final: Any = await portal.result()
|
||||
|
||||
except (
|
||||
Exception,
|
||||
|
@ -152,13 +152,23 @@ async def exhaust_portal(
|
|||
) as err:
|
||||
# we reraise in the parent task via a ``BaseExceptionGroup``
|
||||
return err
|
||||
|
||||
except trio.Cancelled as err:
|
||||
# lol, of course we need this too ;P
|
||||
# TODO: merge with above?
|
||||
log.warning(f"Cancelled result waiter for {portal.actor.uid}")
|
||||
log.warning(
|
||||
'Cancelled portal result waiter task:\n'
|
||||
f'uid: {portal.channel.uid}\n'
|
||||
f'error: {err}\n'
|
||||
)
|
||||
return err
|
||||
|
||||
else:
|
||||
log.debug(f"Returning final result: {final}")
|
||||
log.debug(
|
||||
f'Returning final result from portal:\n'
|
||||
f'uid: {portal.channel.uid}\n'
|
||||
f'result: {final}\n'
|
||||
)
|
||||
return final
|
||||
|
||||
|
||||
|
@ -170,41 +180,71 @@ async def cancel_on_completion(
|
|||
|
||||
) -> None:
|
||||
'''
|
||||
Cancel actor gracefully once it's "main" portal's
|
||||
Cancel actor gracefully once its "main" portal's
|
||||
result arrives.
|
||||
|
||||
Should only be called for actors spawned with `run_in_actor()`.
|
||||
Should only be called for actors spawned via the
|
||||
`Portal.run_in_actor()` API.
|
||||
|
||||
=> and really this API will be deprecated and should be
|
||||
re-implemented as a `.hilevel.one_shot_task_nursery()`..)
|
||||
|
||||
'''
|
||||
# if this call errors we store the exception for later
|
||||
# in ``errors`` which will be reraised inside
|
||||
# an exception group and we still send out a cancel request
|
||||
result = await exhaust_portal(portal, actor)
|
||||
result: Any|Exception = await exhaust_portal(portal, actor)
|
||||
if isinstance(result, Exception):
|
||||
errors[actor.uid] = result
|
||||
log.warning(
|
||||
f"Cancelling {portal.channel.uid} after error {result}"
|
||||
errors[actor.uid]: Exception = result
|
||||
log.cancel(
|
||||
'Cancelling subactor runtime due to error:\n\n'
|
||||
f'Portal.cancel_actor() => {portal.channel.uid}\n\n'
|
||||
f'error: {result}\n'
|
||||
)
|
||||
|
||||
else:
|
||||
log.runtime(
|
||||
f"Cancelling {portal.channel.uid} gracefully "
|
||||
f"after result {result}")
|
||||
'Cancelling subactor gracefully:\n\n'
|
||||
f'Portal.cancel_actor() => {portal.channel.uid}\n\n'
|
||||
f'result: {result}\n'
|
||||
)
|
||||
|
||||
# cancel the process now that we have a final result
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
async def do_hard_kill(
|
||||
async def hard_kill(
|
||||
proc: trio.Process,
|
||||
terminate_after: int = 3,
|
||||
|
||||
# NOTE: for mucking with `.pause()`-ing inside the runtime
|
||||
# whilst also hacking on it XD
|
||||
# terminate_after: int = 99999,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Un-gracefully terminate an OS level `trio.Process` after timeout.
|
||||
|
||||
Used in 2 main cases:
|
||||
|
||||
- "unknown remote runtime state": a hanging/stalled actor that
|
||||
isn't responding after sending a (graceful) runtime cancel
|
||||
request via an IPC msg.
|
||||
- "cancelled during spawn": a process who's actor runtime was
|
||||
cancelled before full startup completed (such that
|
||||
cancel-request-handling machinery was never fully
|
||||
initialized) and thus a "cancel request msg" is never going
|
||||
to be handled.
|
||||
|
||||
'''
|
||||
log.cancel(
|
||||
'Terminating sub-proc:\n'
|
||||
f'|_{proc}\n'
|
||||
)
|
||||
# NOTE: this timeout used to do nothing since we were shielding
|
||||
# the ``.wait()`` inside ``new_proc()`` which will pretty much
|
||||
# never release until the process exits, now it acts as
|
||||
# a hard-kill time ultimatum.
|
||||
log.debug(f"Terminating {proc}")
|
||||
with trio.move_on_after(terminate_after) as cs:
|
||||
|
||||
# NOTE: code below was copied verbatim from the now deprecated
|
||||
|
@ -215,6 +255,9 @@ async def do_hard_kill(
|
|||
# and wait for it to exit. If cancelled, kills the process and
|
||||
# waits for it to finish exiting before propagating the
|
||||
# cancellation.
|
||||
#
|
||||
# This code was originally triggred by ``proc.__aexit__()``
|
||||
# but now must be called manually.
|
||||
with trio.CancelScope(shield=True):
|
||||
if proc.stdin is not None:
|
||||
await proc.stdin.aclose()
|
||||
|
@ -230,15 +273,22 @@ async def do_hard_kill(
|
|||
with trio.CancelScope(shield=True):
|
||||
await proc.wait()
|
||||
|
||||
# XXX NOTE XXX: zombie squad dispatch:
|
||||
# (should ideally never, but) If we do get here it means
|
||||
# graceful termination of a process failed and we need to
|
||||
# resort to OS level signalling to interrupt and cancel the
|
||||
# (presumably stalled or hung) actor. Since we never allow
|
||||
# zombies (as a feature) we ask the OS to do send in the
|
||||
# removal swad as the last resort.
|
||||
if cs.cancelled_caught:
|
||||
# XXX: should pretty much never get here unless we have
|
||||
# to move the bits from ``proc.__aexit__()`` out and
|
||||
# into here.
|
||||
log.critical(f"#ZOMBIE_LORD_IS_HERE: {proc}")
|
||||
log.critical(
|
||||
'Well, the #ZOMBIE_LORD_IS_HERE# to collect\n'
|
||||
f'|_{proc}\n'
|
||||
)
|
||||
proc.kill()
|
||||
|
||||
|
||||
async def soft_wait(
|
||||
async def soft_kill(
|
||||
|
||||
proc: ProcessType,
|
||||
wait_func: Callable[
|
||||
|
@ -248,14 +298,26 @@ async def soft_wait(
|
|||
portal: Portal,
|
||||
|
||||
) -> None:
|
||||
# Wait for proc termination but **dont' yet** call
|
||||
# ``trio.Process.__aexit__()`` (it tears down stdio
|
||||
# which will kill any waiting remote pdb trace).
|
||||
# This is a "soft" (cancellable) join/reap.
|
||||
uid = portal.channel.uid
|
||||
'''
|
||||
Wait for proc termination but **don't yet** teardown
|
||||
std-streams since it will clobber any ongoing pdb REPL
|
||||
session.
|
||||
|
||||
This is our "soft"/graceful, and thus itself also cancellable,
|
||||
join/reap on an actor-runtime-in-process shutdown; it is
|
||||
**not** the same as a "hard kill" via an OS signal (for that
|
||||
see `.hard_kill()`).
|
||||
|
||||
'''
|
||||
uid: tuple[str, str] = portal.channel.uid
|
||||
try:
|
||||
log.cancel(f'Soft waiting on actor:\n{uid}')
|
||||
log.cancel(
|
||||
'Soft killing sub-actor via `Portal.cancel_actor()`\n'
|
||||
f'|_{proc}\n'
|
||||
)
|
||||
# wait on sub-proc to signal termination
|
||||
await wait_func(proc)
|
||||
|
||||
except trio.Cancelled:
|
||||
# if cancelled during a soft wait, cancel the child
|
||||
# actor before entering the hard reap sequence
|
||||
|
@ -267,22 +329,29 @@ async def soft_wait(
|
|||
|
||||
async def cancel_on_proc_deth():
|
||||
'''
|
||||
Cancel the actor cancel request if we detect that
|
||||
that the process terminated.
|
||||
"Cancel-the-cancel" request: if we detect that the
|
||||
underlying sub-process exited prior to
|
||||
a `Portal.cancel_actor()` call completing .
|
||||
|
||||
'''
|
||||
await wait_func(proc)
|
||||
n.cancel_scope.cancel()
|
||||
|
||||
# start a task to wait on the termination of the
|
||||
# process by itself waiting on a (caller provided) wait
|
||||
# function which should unblock when the target process
|
||||
# has terminated.
|
||||
n.start_soon(cancel_on_proc_deth)
|
||||
|
||||
# send the actor-runtime a cancel request.
|
||||
await portal.cancel_actor()
|
||||
|
||||
if proc.poll() is None: # type: ignore
|
||||
log.warning(
|
||||
'Actor still alive after cancel request:\n'
|
||||
f'{uid}'
|
||||
'Subactor still alive after cancel request?\n\n'
|
||||
f'uid: {uid}\n'
|
||||
f'|_{proc}\n'
|
||||
)
|
||||
|
||||
n.cancel_scope.cancel()
|
||||
raise
|
||||
|
||||
|
@ -294,7 +363,7 @@ async def new_proc(
|
|||
errors: dict[tuple[str, str], Exception],
|
||||
|
||||
# passed through to actor main
|
||||
bind_addr: tuple[str, int],
|
||||
bind_addrs: list[tuple[str, int]],
|
||||
parent_addr: tuple[str, int],
|
||||
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
||||
|
||||
|
@ -306,7 +375,7 @@ async def new_proc(
|
|||
) -> None:
|
||||
|
||||
# lookup backend spawning target
|
||||
target = _methods[_spawn_method]
|
||||
target: Callable = _methods[_spawn_method]
|
||||
|
||||
# mark the new actor with the global spawn method
|
||||
subactor._spawn_method = _spawn_method
|
||||
|
@ -316,7 +385,7 @@ async def new_proc(
|
|||
actor_nursery,
|
||||
subactor,
|
||||
errors,
|
||||
bind_addr,
|
||||
bind_addrs,
|
||||
parent_addr,
|
||||
_runtime_vars, # run time vars
|
||||
infect_asyncio=infect_asyncio,
|
||||
|
@ -331,7 +400,7 @@ async def trio_proc(
|
|||
errors: dict[tuple[str, str], Exception],
|
||||
|
||||
# passed through to actor main
|
||||
bind_addr: tuple[str, int],
|
||||
bind_addrs: list[tuple[str, int]],
|
||||
parent_addr: tuple[str, int],
|
||||
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
||||
*,
|
||||
|
@ -374,19 +443,22 @@ async def trio_proc(
|
|||
spawn_cmd.append("--asyncio")
|
||||
|
||||
cancelled_during_spawn: bool = False
|
||||
proc: trio.Process | None = None
|
||||
proc: trio.Process|None = None
|
||||
try:
|
||||
try:
|
||||
# TODO: needs ``trio_typing`` patch?
|
||||
proc = await trio.lowlevel.open_process(spawn_cmd)
|
||||
|
||||
log.runtime(f"Started {proc}")
|
||||
log.runtime(
|
||||
'Started new sub-proc\n'
|
||||
f'|_{proc}\n'
|
||||
)
|
||||
|
||||
# wait for actor to spawn and connect back to us
|
||||
# channel should have handshake completed by the
|
||||
# local actor by the time we get a ref to it
|
||||
event, chan = await actor_nursery._actor.wait_for_peer(
|
||||
subactor.uid)
|
||||
subactor.uid
|
||||
)
|
||||
|
||||
except trio.Cancelled:
|
||||
cancelled_during_spawn = True
|
||||
|
@ -417,12 +489,11 @@ async def trio_proc(
|
|||
|
||||
# send additional init params
|
||||
await chan.send({
|
||||
"_parent_main_data": subactor._parent_main_data,
|
||||
"enable_modules": subactor.enable_modules,
|
||||
"_arb_addr": subactor._arb_addr,
|
||||
"bind_host": bind_addr[0],
|
||||
"bind_port": bind_addr[1],
|
||||
"_runtime_vars": _runtime_vars,
|
||||
'_parent_main_data': subactor._parent_main_data,
|
||||
'enable_modules': subactor.enable_modules,
|
||||
'reg_addrs': subactor.reg_addrs,
|
||||
'bind_addrs': bind_addrs,
|
||||
'_runtime_vars': _runtime_vars,
|
||||
})
|
||||
|
||||
# track subactor in current nursery
|
||||
|
@ -448,7 +519,7 @@ async def trio_proc(
|
|||
# This is a "soft" (cancellable) join/reap which
|
||||
# will remote cancel the actor on a ``trio.Cancelled``
|
||||
# condition.
|
||||
await soft_wait(
|
||||
await soft_kill(
|
||||
proc,
|
||||
trio.Process.wait,
|
||||
portal
|
||||
|
@ -457,8 +528,9 @@ async def trio_proc(
|
|||
# cancel result waiter that may have been spawned in
|
||||
# tandem if not done already
|
||||
log.cancel(
|
||||
"Cancelling existing result waiter task for "
|
||||
f"{subactor.uid}")
|
||||
'Cancelling existing result waiter task for '
|
||||
f'{subactor.uid}'
|
||||
)
|
||||
nursery.cancel_scope.cancel()
|
||||
|
||||
finally:
|
||||
|
@ -476,22 +548,40 @@ async def trio_proc(
|
|||
with trio.move_on_after(0.5):
|
||||
await proc.wait()
|
||||
|
||||
if is_root_process():
|
||||
# TODO: solve the following issue where we need
|
||||
# to do a similar wait like this but in an
|
||||
# "intermediary" parent actor that itself isn't
|
||||
# in debug but has a child that is, and we need
|
||||
# to hold off on relaying SIGINT until that child
|
||||
# is complete.
|
||||
# https://github.com/goodboy/tractor/issues/320
|
||||
await maybe_wait_for_debugger(
|
||||
child_in_debug=_runtime_vars.get(
|
||||
'_debug_mode', False),
|
||||
)
|
||||
await maybe_wait_for_debugger(
|
||||
child_in_debug=_runtime_vars.get(
|
||||
'_debug_mode', False
|
||||
),
|
||||
header_msg=(
|
||||
'Delaying subproc reaper while debugger locked..\n'
|
||||
),
|
||||
|
||||
# TODO: need a diff value then default?
|
||||
# poll_steps=9999999,
|
||||
)
|
||||
# TODO: solve the following issue where we need
|
||||
# to do a similar wait like this but in an
|
||||
# "intermediary" parent actor that itself isn't
|
||||
# in debug but has a child that is, and we need
|
||||
# to hold off on relaying SIGINT until that child
|
||||
# is complete.
|
||||
# https://github.com/goodboy/tractor/issues/320
|
||||
# -[ ] we need to handle non-root parent-actors specially
|
||||
# by somehow determining if a child is in debug and then
|
||||
# avoiding cancel/kill of said child by this
|
||||
# (intermediary) parent until such a time as the root says
|
||||
# the pdb lock is released and we are good to tear down
|
||||
# (our children)..
|
||||
#
|
||||
# -[ ] so maybe something like this where we try to
|
||||
# acquire the lock and get notified of who has it,
|
||||
# check that uid against our known children?
|
||||
# this_uid: tuple[str, str] = current_actor().uid
|
||||
# await acquire_debug_lock(this_uid)
|
||||
|
||||
if proc.poll() is None:
|
||||
log.cancel(f"Attempting to hard kill {proc}")
|
||||
await do_hard_kill(proc)
|
||||
await hard_kill(proc)
|
||||
|
||||
log.debug(f"Joined {proc}")
|
||||
else:
|
||||
|
@ -509,7 +599,7 @@ async def mp_proc(
|
|||
subactor: Actor,
|
||||
errors: dict[tuple[str, str], Exception],
|
||||
# passed through to actor main
|
||||
bind_addr: tuple[str, int],
|
||||
bind_addrs: list[tuple[str, int]],
|
||||
parent_addr: tuple[str, int],
|
||||
_runtime_vars: dict[str, Any], # serialized and sent to _child
|
||||
*,
|
||||
|
@ -567,7 +657,7 @@ async def mp_proc(
|
|||
target=_mp_main,
|
||||
args=(
|
||||
subactor,
|
||||
bind_addr,
|
||||
bind_addrs,
|
||||
fs_info,
|
||||
_spawn_method,
|
||||
parent_addr,
|
||||
|
@ -635,7 +725,7 @@ async def mp_proc(
|
|||
# This is a "soft" (cancellable) join/reap which
|
||||
# will remote cancel the actor on a ``trio.Cancelled``
|
||||
# condition.
|
||||
await soft_wait(
|
||||
await soft_kill(
|
||||
proc,
|
||||
proc_waiter,
|
||||
portal
|
||||
|
|
|
@ -21,8 +21,9 @@ The machinery and types behind ``Context.open_stream()``
|
|||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
import inspect
|
||||
from contextlib import asynccontextmanager as acm
|
||||
import inspect
|
||||
from pprint import pformat
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
|
@ -34,7 +35,8 @@ import warnings
|
|||
import trio
|
||||
|
||||
from ._exceptions import (
|
||||
unpack_error,
|
||||
_raise_from_no_key_in_msg,
|
||||
ContextCancelled,
|
||||
)
|
||||
from .log import get_logger
|
||||
from .trionics import (
|
||||
|
@ -54,7 +56,6 @@ log = get_logger(__name__)
|
|||
# messages? class ReceiveChannel(AsyncResource, Generic[ReceiveType]):
|
||||
# - use __slots__ on ``Context``?
|
||||
|
||||
|
||||
class MsgStream(trio.abc.Channel):
|
||||
'''
|
||||
A bidirectional message stream for receiving logically sequenced
|
||||
|
@ -85,74 +86,78 @@ class MsgStream(trio.abc.Channel):
|
|||
self._broadcaster = _broadcaster
|
||||
|
||||
# flag to denote end of stream
|
||||
self._eoc: bool = False
|
||||
self._closed: bool = False
|
||||
self._eoc: bool|trio.EndOfChannel = False
|
||||
self._closed: bool|trio.ClosedResourceError = False
|
||||
|
||||
# delegate directly to underlying mem channel
|
||||
def receive_nowait(self):
|
||||
msg = self._rx_chan.receive_nowait()
|
||||
return msg['yield']
|
||||
try:
|
||||
return msg['yield']
|
||||
except KeyError as kerr:
|
||||
_raise_from_no_key_in_msg(
|
||||
ctx=self._ctx,
|
||||
msg=msg,
|
||||
src_err=kerr,
|
||||
log=log,
|
||||
expect_key='yield',
|
||||
stream=self,
|
||||
)
|
||||
|
||||
async def receive(self):
|
||||
'''Async receive a single msg from the IPC transport, the next
|
||||
in sequence for this stream.
|
||||
'''
|
||||
Receive a single msg from the IPC transport, the next in
|
||||
sequence sent by the far end task (possibly in order as
|
||||
determined by the underlying protocol).
|
||||
|
||||
'''
|
||||
# NOTE: `trio.ReceiveChannel` implements
|
||||
# EOC handling as follows (aka uses it
|
||||
# to gracefully exit async for loops):
|
||||
#
|
||||
# async def __anext__(self) -> ReceiveType:
|
||||
# try:
|
||||
# return await self.receive()
|
||||
# except trio.EndOfChannel:
|
||||
# raise StopAsyncIteration
|
||||
|
||||
# see ``.aclose()`` for notes on the old behaviour prior to
|
||||
# introducing this
|
||||
if self._eoc:
|
||||
raise trio.EndOfChannel
|
||||
raise self._eoc
|
||||
|
||||
if self._closed:
|
||||
raise trio.ClosedResourceError('This stream was closed')
|
||||
raise self._closed
|
||||
|
||||
src_err: Exception|None = None
|
||||
try:
|
||||
msg = await self._rx_chan.receive()
|
||||
return msg['yield']
|
||||
try:
|
||||
msg = await self._rx_chan.receive()
|
||||
return msg['yield']
|
||||
|
||||
except KeyError as err:
|
||||
# internal error should never get here
|
||||
assert msg.get('cid'), ("Received internal error at portal?")
|
||||
except KeyError as kerr:
|
||||
# log.exception('GOT KEYERROR')
|
||||
src_err = kerr
|
||||
|
||||
# TODO: handle 2 cases with 3.10 match syntax
|
||||
# - 'stop'
|
||||
# - 'error'
|
||||
# possibly just handle msg['stop'] here!
|
||||
|
||||
if self._closed:
|
||||
raise trio.ClosedResourceError('This stream was closed')
|
||||
|
||||
if msg.get('stop') or self._eoc:
|
||||
log.debug(f"{self} was stopped at remote end")
|
||||
|
||||
# XXX: important to set so that a new ``.receive()``
|
||||
# call (likely by another task using a broadcast receiver)
|
||||
# doesn't accidentally pull the ``return`` message
|
||||
# value out of the underlying feed mem chan!
|
||||
self._eoc = True
|
||||
|
||||
# # when the send is closed we assume the stream has
|
||||
# # terminated and signal this local iterator to stop
|
||||
# await self.aclose()
|
||||
|
||||
# XXX: this causes ``ReceiveChannel.__anext__()`` to
|
||||
# raise a ``StopAsyncIteration`` **and** in our catch
|
||||
# block below it will trigger ``.aclose()``.
|
||||
raise trio.EndOfChannel from err
|
||||
|
||||
# TODO: test that shows stream raising an expected error!!!
|
||||
elif msg.get('error'):
|
||||
# raise the error message
|
||||
raise unpack_error(msg, self._ctx.chan)
|
||||
|
||||
else:
|
||||
raise
|
||||
# NOTE: may raise any of the below error types
|
||||
# includg EoC when a 'stop' msg is found.
|
||||
_raise_from_no_key_in_msg(
|
||||
ctx=self._ctx,
|
||||
msg=msg,
|
||||
src_err=kerr,
|
||||
log=log,
|
||||
expect_key='yield',
|
||||
stream=self,
|
||||
)
|
||||
|
||||
# XXX: we close the stream on any of these error conditions:
|
||||
except (
|
||||
trio.ClosedResourceError, # by self._rx_chan
|
||||
# trio.ClosedResourceError, # by self._rx_chan
|
||||
trio.EndOfChannel, # by self._rx_chan or `stop` msg from far end
|
||||
):
|
||||
# XXX: we close the stream on any of these error conditions:
|
||||
) as eoc:
|
||||
# log.exception('GOT EOC')
|
||||
src_err = eoc
|
||||
self._eoc = eoc
|
||||
|
||||
# a ``ClosedResourceError`` indicates that the internal
|
||||
# feeder memory receive channel was closed likely by the
|
||||
|
@ -175,32 +180,124 @@ class MsgStream(trio.abc.Channel):
|
|||
# closing this stream and not flushing a final value to
|
||||
# remaining (clone) consumers who may not have been
|
||||
# scheduled to receive it yet.
|
||||
# try:
|
||||
# maybe_err_msg_or_res: dict = self._rx_chan.receive_nowait()
|
||||
# if maybe_err_msg_or_res:
|
||||
# log.warning(
|
||||
# 'Discarding un-processed msg:\n'
|
||||
# f'{maybe_err_msg_or_res}'
|
||||
# )
|
||||
# except trio.WouldBlock:
|
||||
# # no queued msgs that might be another remote
|
||||
# # error, so just raise the original EoC
|
||||
# pass
|
||||
|
||||
# when the send is closed we assume the stream has
|
||||
# terminated and signal this local iterator to stop
|
||||
await self.aclose()
|
||||
# raise eoc
|
||||
|
||||
raise # propagate
|
||||
except trio.ClosedResourceError as cre: # by self._rx_chan
|
||||
# log.exception('GOT CRE')
|
||||
src_err = cre
|
||||
log.warning(
|
||||
'`Context._rx_chan` was already closed?'
|
||||
)
|
||||
self._closed = cre
|
||||
|
||||
async def aclose(self):
|
||||
# when the send is closed we assume the stream has
|
||||
# terminated and signal this local iterator to stop
|
||||
drained: list[Exception|dict] = await self.aclose()
|
||||
if drained:
|
||||
# from .devx import pause
|
||||
# await pause()
|
||||
log.warning(
|
||||
'Drained context msgs during closure:\n'
|
||||
f'{drained}'
|
||||
)
|
||||
# TODO: pass these to the `._ctx._drained_msgs: deque`
|
||||
# and then iterate them as part of any `.result()` call?
|
||||
|
||||
# NOTE XXX: if the context was cancelled or remote-errored
|
||||
# but we received the stream close msg first, we
|
||||
# probably want to instead raise the remote error
|
||||
# over the end-of-stream connection error since likely
|
||||
# the remote error was the source cause?
|
||||
ctx: Context = self._ctx
|
||||
if re := ctx._remote_error:
|
||||
ctx._maybe_raise_remote_err(
|
||||
re,
|
||||
raise_ctxc_from_self_call=True,
|
||||
)
|
||||
|
||||
raise src_err # propagate
|
||||
|
||||
async def aclose(self) -> list[Exception|dict]:
|
||||
'''
|
||||
Cancel associated remote actor task and local memory channel on
|
||||
close.
|
||||
|
||||
Notes:
|
||||
- REMEMBER that this is also called by `.__aexit__()` so
|
||||
careful consideration must be made to handle whatever
|
||||
internal stsate is mutated, particuarly in terms of
|
||||
draining IPC msgs!
|
||||
|
||||
- more or less we try to maintain adherance to trio's `.aclose()` semantics:
|
||||
https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose
|
||||
'''
|
||||
# XXX: keep proper adherance to trio's `.aclose()` semantics:
|
||||
# https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose
|
||||
rx_chan = self._rx_chan
|
||||
|
||||
if rx_chan._closed:
|
||||
log.cancel(f"{self} is already closed")
|
||||
# rx_chan = self._rx_chan
|
||||
|
||||
# XXX NOTE XXX
|
||||
# it's SUPER IMPORTANT that we ensure we don't DOUBLE
|
||||
# DRAIN msgs on closure so avoid getting stuck handing on
|
||||
# the `._rx_chan` since we call this method on
|
||||
# `.__aexit__()` as well!!!
|
||||
# => SO ENSURE WE CATCH ALL TERMINATION STATES in this
|
||||
# block including the EoC..
|
||||
if self.closed:
|
||||
# this stream has already been closed so silently succeed as
|
||||
# per ``trio.AsyncResource`` semantics.
|
||||
# https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose
|
||||
return
|
||||
return []
|
||||
|
||||
self._eoc = True
|
||||
ctx: Context = self._ctx
|
||||
drained: list[Exception|dict] = []
|
||||
while not drained:
|
||||
try:
|
||||
maybe_final_msg = self.receive_nowait()
|
||||
if maybe_final_msg:
|
||||
log.debug(
|
||||
'Drained un-processed stream msg:\n'
|
||||
f'{pformat(maybe_final_msg)}'
|
||||
)
|
||||
# TODO: inject into parent `Context` buf?
|
||||
drained.append(maybe_final_msg)
|
||||
|
||||
# NOTE: we only need these handlers due to the
|
||||
# `.receive_nowait()` call above which may re-raise
|
||||
# one of these errors on a msg key error!
|
||||
|
||||
except trio.WouldBlock as be:
|
||||
drained.append(be)
|
||||
break
|
||||
|
||||
except trio.EndOfChannel as eoc:
|
||||
self._eoc: Exception = eoc
|
||||
drained.append(eoc)
|
||||
break
|
||||
|
||||
except trio.ClosedResourceError as cre:
|
||||
self._closed = cre
|
||||
drained.append(cre)
|
||||
break
|
||||
|
||||
except ContextCancelled as ctxc:
|
||||
# log.exception('GOT CTXC')
|
||||
log.cancel(
|
||||
'Context was cancelled during stream closure:\n'
|
||||
f'canceller: {ctxc.canceller}\n'
|
||||
f'{pformat(ctxc.msgdata)}'
|
||||
)
|
||||
break
|
||||
|
||||
# NOTE: this is super subtle IPC messaging stuff:
|
||||
# Relay stop iteration to far end **iff** we're
|
||||
|
@ -231,26 +328,36 @@ class MsgStream(trio.abc.Channel):
|
|||
except (
|
||||
trio.BrokenResourceError,
|
||||
trio.ClosedResourceError
|
||||
):
|
||||
) as re:
|
||||
# the underlying channel may already have been pulled
|
||||
# in which case our stop message is meaningless since
|
||||
# it can't traverse the transport.
|
||||
ctx = self._ctx
|
||||
log.warning(
|
||||
f'Stream was already destroyed?\n'
|
||||
f'actor: {ctx.chan.uid}\n'
|
||||
f'ctx id: {ctx.cid}'
|
||||
)
|
||||
drained.append(re)
|
||||
self._closed = re
|
||||
|
||||
self._closed = True
|
||||
# if caught_eoc:
|
||||
# # from .devx import _debug
|
||||
# # await _debug.pause()
|
||||
# with trio.CancelScope(shield=True):
|
||||
# await rx_chan.aclose()
|
||||
|
||||
# Do we close the local mem chan ``self._rx_chan`` ??!?
|
||||
|
||||
# NO, DEFINITELY NOT if we're a bi-dir ``MsgStream``!
|
||||
# BECAUSE this same core-msg-loop mem recv-chan is used to deliver
|
||||
# the potential final result from the surrounding inter-actor
|
||||
# `Context` so we don't want to close it until that context has
|
||||
# run to completion.
|
||||
if not self._eoc:
|
||||
self._eoc: bool = trio.EndOfChannel(
|
||||
f'Context stream closed by {self._ctx.side}\n'
|
||||
f'|_{self}\n'
|
||||
)
|
||||
# ?XXX WAIT, why do we not close the local mem chan `._rx_chan` XXX?
|
||||
# => NO, DEFINITELY NOT! <=
|
||||
# if we're a bi-dir ``MsgStream`` BECAUSE this same
|
||||
# core-msg-loop mem recv-chan is used to deliver the
|
||||
# potential final result from the surrounding inter-actor
|
||||
# `Context` so we don't want to close it until that
|
||||
# context has run to completion.
|
||||
|
||||
# XXX: Notes on old behaviour:
|
||||
# await rx_chan.aclose()
|
||||
|
@ -279,6 +386,28 @@ class MsgStream(trio.abc.Channel):
|
|||
# runtime's closure of ``rx_chan`` in the case where we may
|
||||
# still need to consume msgs that are "in transit" from the far
|
||||
# end (eg. for ``Context.result()``).
|
||||
# self._closed = True
|
||||
return drained
|
||||
|
||||
@property
|
||||
def closed(self) -> bool:
|
||||
if (
|
||||
(rxc := self._rx_chan._closed)
|
||||
or
|
||||
(_closed := self._closed)
|
||||
or
|
||||
(_eoc := self._eoc)
|
||||
):
|
||||
log.runtime(
|
||||
f'`MsgStream` is already closed\n'
|
||||
f'{self}\n'
|
||||
f' |_cid: {self._ctx.cid}\n'
|
||||
f' |_rx_chan._closed: {type(rxc)} = {rxc}\n'
|
||||
f' |_closed: {type(_closed)} = {_closed}\n'
|
||||
f' |_eoc: {type(_eoc)} = {_eoc}'
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
@acm
|
||||
async def subscribe(
|
||||
|
@ -344,9 +473,13 @@ class MsgStream(trio.abc.Channel):
|
|||
raise self._ctx._remote_error # from None
|
||||
|
||||
if self._closed:
|
||||
raise trio.ClosedResourceError('This stream was already closed')
|
||||
raise self._closed
|
||||
# raise trio.ClosedResourceError('This stream was already closed')
|
||||
|
||||
await self._ctx.chan.send({'yield': data, 'cid': self._ctx.cid})
|
||||
await self._ctx.chan.send({
|
||||
'yield': data,
|
||||
'cid': self._ctx.cid,
|
||||
})
|
||||
|
||||
|
||||
def stream(func: Callable) -> Callable:
|
||||
|
|
|
@ -21,22 +21,23 @@
|
|||
from contextlib import asynccontextmanager as acm
|
||||
from functools import partial
|
||||
import inspect
|
||||
from typing import (
|
||||
Optional,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
from pprint import pformat
|
||||
from typing import TYPE_CHECKING
|
||||
import typing
|
||||
import warnings
|
||||
|
||||
from exceptiongroup import BaseExceptionGroup
|
||||
import trio
|
||||
|
||||
from ._debug import maybe_wait_for_debugger
|
||||
from .devx._debug import maybe_wait_for_debugger
|
||||
from ._state import current_actor, is_main_process
|
||||
from .log import get_logger, get_loglevel
|
||||
from ._runtime import Actor
|
||||
from ._portal import Portal
|
||||
from ._exceptions import is_multi_cancelled
|
||||
from ._exceptions import (
|
||||
is_multi_cancelled,
|
||||
ContextCancelled,
|
||||
)
|
||||
from ._root import open_root_actor
|
||||
from . import _state
|
||||
from . import _spawn
|
||||
|
@ -94,7 +95,7 @@ class ActorNursery:
|
|||
tuple[
|
||||
Actor,
|
||||
trio.Process | mp.Process,
|
||||
Optional[Portal],
|
||||
Portal | None,
|
||||
]
|
||||
] = {}
|
||||
# portals spawned with ``run_in_actor()`` are
|
||||
|
@ -106,16 +107,24 @@ class ActorNursery:
|
|||
self.errors = errors
|
||||
self.exited = trio.Event()
|
||||
|
||||
# NOTE: when no explicit call is made to
|
||||
# `.open_root_actor()` by application code,
|
||||
# `.open_nursery()` will implicitly call it to start the
|
||||
# actor-tree runtime. In this case we mark ourselves as
|
||||
# such so that runtime components can be aware for logging
|
||||
# and syncing purposes to any actor opened nurseries.
|
||||
self._implicit_runtime_started: bool = False
|
||||
|
||||
async def start_actor(
|
||||
self,
|
||||
name: str,
|
||||
*,
|
||||
bind_addr: tuple[str, int] = _default_bind_addr,
|
||||
bind_addrs: list[tuple[str, int]] = [_default_bind_addr],
|
||||
rpc_module_paths: list[str] | None = None,
|
||||
enable_modules: list[str] | None = None,
|
||||
loglevel: str | None = None, # set log level per subactor
|
||||
nursery: trio.Nursery | None = None,
|
||||
debug_mode: Optional[bool] | None = None,
|
||||
debug_mode: bool | None = None,
|
||||
infect_asyncio: bool = False,
|
||||
) -> Portal:
|
||||
'''
|
||||
|
@ -150,14 +159,16 @@ class ActorNursery:
|
|||
# modules allowed to invoked funcs from
|
||||
enable_modules=enable_modules,
|
||||
loglevel=loglevel,
|
||||
arbiter_addr=current_actor()._arb_addr,
|
||||
|
||||
# verbatim relay this actor's registrar addresses
|
||||
registry_addrs=current_actor().reg_addrs,
|
||||
)
|
||||
parent_addr = self._actor.accept_addr
|
||||
assert parent_addr
|
||||
|
||||
# start a task to spawn a process
|
||||
# blocks until process has been started and a portal setup
|
||||
nursery = nursery or self._da_nursery
|
||||
nursery: trio.Nursery = nursery or self._da_nursery
|
||||
|
||||
# XXX: the type ignore is actually due to a `mypy` bug
|
||||
return await nursery.start( # type: ignore
|
||||
|
@ -167,7 +178,7 @@ class ActorNursery:
|
|||
self,
|
||||
subactor,
|
||||
self.errors,
|
||||
bind_addr,
|
||||
bind_addrs,
|
||||
parent_addr,
|
||||
_rtv, # run time vars
|
||||
infect_asyncio=infect_asyncio,
|
||||
|
@ -180,8 +191,8 @@ class ActorNursery:
|
|||
fn: typing.Callable,
|
||||
*,
|
||||
|
||||
name: Optional[str] = None,
|
||||
bind_addr: tuple[str, int] = _default_bind_addr,
|
||||
name: str | None = None,
|
||||
bind_addrs: tuple[str, int] = [_default_bind_addr],
|
||||
rpc_module_paths: list[str] | None = None,
|
||||
enable_modules: list[str] | None = None,
|
||||
loglevel: str | None = None, # set log level per subactor
|
||||
|
@ -190,14 +201,16 @@ class ActorNursery:
|
|||
**kwargs, # explicit args to ``fn``
|
||||
|
||||
) -> Portal:
|
||||
"""Spawn a new actor, run a lone task, then terminate the actor and
|
||||
'''
|
||||
Spawn a new actor, run a lone task, then terminate the actor and
|
||||
return its result.
|
||||
|
||||
Actors spawned using this method are kept alive at nursery teardown
|
||||
until the task spawned by executing ``fn`` completes at which point
|
||||
the actor is terminated.
|
||||
"""
|
||||
mod_path = fn.__module__
|
||||
|
||||
'''
|
||||
mod_path: str = fn.__module__
|
||||
|
||||
if name is None:
|
||||
# use the explicit function name if not provided
|
||||
|
@ -208,7 +221,7 @@ class ActorNursery:
|
|||
enable_modules=[mod_path] + (
|
||||
enable_modules or rpc_module_paths or []
|
||||
),
|
||||
bind_addr=bind_addr,
|
||||
bind_addrs=bind_addrs,
|
||||
loglevel=loglevel,
|
||||
# use the run_in_actor nursery
|
||||
nursery=self._ria_nursery,
|
||||
|
@ -232,21 +245,37 @@ class ActorNursery:
|
|||
)
|
||||
return portal
|
||||
|
||||
async def cancel(self, hard_kill: bool = False) -> None:
|
||||
"""Cancel this nursery by instructing each subactor to cancel
|
||||
async def cancel(
|
||||
self,
|
||||
hard_kill: bool = False,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Cancel this nursery by instructing each subactor to cancel
|
||||
itself and wait for all subactors to terminate.
|
||||
|
||||
If ``hard_killl`` is set to ``True`` then kill the processes
|
||||
directly without any far end graceful ``trio`` cancellation.
|
||||
"""
|
||||
|
||||
'''
|
||||
self.cancelled = True
|
||||
|
||||
log.cancel(f"Cancelling nursery in {self._actor.uid}")
|
||||
# TODO: impl a repr for spawn more compact
|
||||
# then `._children`..
|
||||
children: dict = self._children
|
||||
child_count: int = len(children)
|
||||
msg: str = f'Cancelling actor nursery with {child_count} children\n'
|
||||
with trio.move_on_after(3) as cs:
|
||||
async with trio.open_nursery() as tn:
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
|
||||
for subactor, proc, portal in self._children.values():
|
||||
subactor: Actor
|
||||
proc: trio.Process
|
||||
portal: Portal
|
||||
for (
|
||||
subactor,
|
||||
proc,
|
||||
portal,
|
||||
) in children.values():
|
||||
|
||||
# TODO: are we ever even going to use this or
|
||||
# is the spawning backend responsible for such
|
||||
|
@ -258,12 +287,13 @@ class ActorNursery:
|
|||
if portal is None: # actor hasn't fully spawned yet
|
||||
event = self._actor._peer_connected[subactor.uid]
|
||||
log.warning(
|
||||
f"{subactor.uid} wasn't finished spawning?")
|
||||
f"{subactor.uid} never 't finished spawning?"
|
||||
)
|
||||
|
||||
await event.wait()
|
||||
|
||||
# channel/portal should now be up
|
||||
_, _, portal = self._children[subactor.uid]
|
||||
_, _, portal = children[subactor.uid]
|
||||
|
||||
# XXX should be impossible to get here
|
||||
# unless method was called from within
|
||||
|
@ -280,14 +310,24 @@ class ActorNursery:
|
|||
# spawn cancel tasks for each sub-actor
|
||||
assert portal
|
||||
if portal.channel.connected():
|
||||
nursery.start_soon(portal.cancel_actor)
|
||||
tn.start_soon(portal.cancel_actor)
|
||||
|
||||
log.cancel(msg)
|
||||
# if we cancelled the cancel (we hung cancelling remote actors)
|
||||
# then hard kill all sub-processes
|
||||
if cs.cancelled_caught:
|
||||
log.error(
|
||||
f"Failed to cancel {self}\nHard killing process tree!")
|
||||
for subactor, proc, portal in self._children.values():
|
||||
f'Failed to cancel {self}?\n'
|
||||
'Hard killing underlying subprocess tree!\n'
|
||||
)
|
||||
subactor: Actor
|
||||
proc: trio.Process
|
||||
portal: Portal
|
||||
for (
|
||||
subactor,
|
||||
proc,
|
||||
portal,
|
||||
) in children.values():
|
||||
log.warning(f"Hard killing process {proc}")
|
||||
proc.terminate()
|
||||
|
||||
|
@ -327,7 +367,7 @@ async def _open_and_supervise_one_cancels_all_nursery(
|
|||
# the above "daemon actor" nursery will be notified.
|
||||
async with trio.open_nursery() as ria_nursery:
|
||||
|
||||
anursery = ActorNursery(
|
||||
an = ActorNursery(
|
||||
actor,
|
||||
ria_nursery,
|
||||
da_nursery,
|
||||
|
@ -336,16 +376,16 @@ async def _open_and_supervise_one_cancels_all_nursery(
|
|||
try:
|
||||
# spawning of actors happens in the caller's scope
|
||||
# after we yield upwards
|
||||
yield anursery
|
||||
yield an
|
||||
|
||||
# When we didn't error in the caller's scope,
|
||||
# signal all process-monitor-tasks to conduct
|
||||
# the "hard join phase".
|
||||
log.runtime(
|
||||
f"Waiting on subactors {anursery._children} "
|
||||
"to complete"
|
||||
'Waiting on subactors to complete:\n'
|
||||
f'{pformat(an._children)}\n'
|
||||
)
|
||||
anursery._join_procs.set()
|
||||
an._join_procs.set()
|
||||
|
||||
except BaseException as inner_err:
|
||||
errors[actor.uid] = inner_err
|
||||
|
@ -357,37 +397,60 @@ async def _open_and_supervise_one_cancels_all_nursery(
|
|||
# Instead try to wait for pdb to be released before
|
||||
# tearing down.
|
||||
await maybe_wait_for_debugger(
|
||||
child_in_debug=anursery._at_least_one_child_in_debug
|
||||
child_in_debug=an._at_least_one_child_in_debug
|
||||
)
|
||||
|
||||
# if the caller's scope errored then we activate our
|
||||
# one-cancels-all supervisor strategy (don't
|
||||
# worry more are coming).
|
||||
anursery._join_procs.set()
|
||||
an._join_procs.set()
|
||||
|
||||
# XXX: hypothetically an error could be
|
||||
# raised and then a cancel signal shows up
|
||||
# XXX NOTE XXX: hypothetically an error could
|
||||
# be raised and then a cancel signal shows up
|
||||
# slightly after in which case the `else:`
|
||||
# block here might not complete? For now,
|
||||
# shield both.
|
||||
with trio.CancelScope(shield=True):
|
||||
etype = type(inner_err)
|
||||
etype: type = type(inner_err)
|
||||
if etype in (
|
||||
trio.Cancelled,
|
||||
KeyboardInterrupt
|
||||
KeyboardInterrupt,
|
||||
) or (
|
||||
is_multi_cancelled(inner_err)
|
||||
):
|
||||
log.cancel(
|
||||
f"Nursery for {current_actor().uid} "
|
||||
f"was cancelled with {etype}")
|
||||
f'Actor-nursery cancelled by {etype}\n\n'
|
||||
|
||||
f'{current_actor().uid}\n'
|
||||
f' |_{an}\n\n'
|
||||
|
||||
# TODO: show tb str?
|
||||
# f'{tb_str}'
|
||||
)
|
||||
elif etype in {
|
||||
ContextCancelled,
|
||||
}:
|
||||
log.cancel(
|
||||
'Actor-nursery caught remote cancellation\n\n'
|
||||
|
||||
f'{inner_err.tb_str}'
|
||||
)
|
||||
else:
|
||||
log.exception(
|
||||
f"Nursery for {current_actor().uid} "
|
||||
f"errored with")
|
||||
'Nursery errored with:\n'
|
||||
|
||||
# TODO: same thing as in
|
||||
# `._invoke()` to compute how to
|
||||
# place this div-line in the
|
||||
# middle of the above msg
|
||||
# content..
|
||||
# -[ ] prolly helper-func it too
|
||||
# in our `.log` module..
|
||||
# '------ - ------'
|
||||
)
|
||||
|
||||
# cancel all subactors
|
||||
await anursery.cancel()
|
||||
await an.cancel()
|
||||
|
||||
# ria_nursery scope end
|
||||
|
||||
|
@ -408,18 +471,22 @@ async def _open_and_supervise_one_cancels_all_nursery(
|
|||
# XXX: yet another guard before allowing the cancel
|
||||
# sequence in case a (single) child is in debug.
|
||||
await maybe_wait_for_debugger(
|
||||
child_in_debug=anursery._at_least_one_child_in_debug
|
||||
child_in_debug=an._at_least_one_child_in_debug
|
||||
)
|
||||
|
||||
# If actor-local error was raised while waiting on
|
||||
# ".run_in_actor()" actors then we also want to cancel all
|
||||
# remaining sub-actors (due to our lone strategy:
|
||||
# one-cancels-all).
|
||||
log.cancel(f"Nursery cancelling due to {err}")
|
||||
if anursery._children:
|
||||
if an._children:
|
||||
log.cancel(
|
||||
'Actor-nursery cancelling due error type:\n'
|
||||
f'{err}\n'
|
||||
)
|
||||
with trio.CancelScope(shield=True):
|
||||
await anursery.cancel()
|
||||
await an.cancel()
|
||||
raise
|
||||
|
||||
finally:
|
||||
# No errors were raised while awaiting ".run_in_actor()"
|
||||
# actors but those actors may have returned remote errors as
|
||||
|
@ -428,9 +495,9 @@ async def _open_and_supervise_one_cancels_all_nursery(
|
|||
# collected in ``errors`` so cancel all actors, summarize
|
||||
# all errors and re-raise.
|
||||
if errors:
|
||||
if anursery._children:
|
||||
if an._children:
|
||||
with trio.CancelScope(shield=True):
|
||||
await anursery.cancel()
|
||||
await an.cancel()
|
||||
|
||||
# use `BaseExceptionGroup` as needed
|
||||
if len(errors) > 1:
|
||||
|
@ -465,7 +532,7 @@ async def open_nursery(
|
|||
which cancellation scopes correspond to each spawned subactor set.
|
||||
|
||||
'''
|
||||
implicit_runtime = False
|
||||
implicit_runtime: bool = False
|
||||
|
||||
actor = current_actor(err_on_no_runtime=False)
|
||||
|
||||
|
@ -477,7 +544,7 @@ async def open_nursery(
|
|||
log.info("Starting actor runtime!")
|
||||
|
||||
# mark us for teardown on exit
|
||||
implicit_runtime = True
|
||||
implicit_runtime: bool = True
|
||||
|
||||
async with open_root_actor(**kwargs) as actor:
|
||||
assert actor is current_actor()
|
||||
|
@ -485,24 +552,42 @@ async def open_nursery(
|
|||
try:
|
||||
async with _open_and_supervise_one_cancels_all_nursery(
|
||||
actor
|
||||
) as anursery:
|
||||
yield anursery
|
||||
) as an:
|
||||
|
||||
# NOTE: mark this nursery as having
|
||||
# implicitly started the root actor so
|
||||
# that `._runtime` machinery can avoid
|
||||
# certain teardown synchronization
|
||||
# blocking/waits and any associated (warn)
|
||||
# logging when it's known that this
|
||||
# nursery shouldn't be exited before the
|
||||
# root actor is.
|
||||
an._implicit_runtime_started = True
|
||||
yield an
|
||||
finally:
|
||||
anursery.exited.set()
|
||||
# XXX: this event will be set after the root actor
|
||||
# runtime is already torn down, so we want to
|
||||
# avoid any blocking on it.
|
||||
an.exited.set()
|
||||
|
||||
else: # sub-nursery case
|
||||
|
||||
try:
|
||||
async with _open_and_supervise_one_cancels_all_nursery(
|
||||
actor
|
||||
) as anursery:
|
||||
yield anursery
|
||||
) as an:
|
||||
yield an
|
||||
finally:
|
||||
anursery.exited.set()
|
||||
an.exited.set()
|
||||
|
||||
finally:
|
||||
log.debug("Nursery teardown complete")
|
||||
msg: str = (
|
||||
'Actor-nursery exited\n'
|
||||
f'|_{an}\n\n'
|
||||
)
|
||||
|
||||
# shutdown runtime if it was started
|
||||
if implicit_runtime:
|
||||
log.info("Shutting down actor tree")
|
||||
msg += '=> Shutting down actor runtime <=\n'
|
||||
|
||||
log.info(msg)
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Runtime "developer experience" utils and addons to aid our
|
||||
(advanced) users and core devs in building distributed applications
|
||||
and working with/on the actor runtime.
|
||||
|
||||
"""
|
||||
from ._debug import (
|
||||
maybe_wait_for_debugger as maybe_wait_for_debugger,
|
||||
acquire_debug_lock as acquire_debug_lock,
|
||||
breakpoint as breakpoint,
|
||||
pause as pause,
|
||||
pause_from_sync as pause_from_sync,
|
||||
shield_sigint_handler as shield_sigint_handler,
|
||||
MultiActorPdb as MultiActorPdb,
|
||||
open_crash_handler as open_crash_handler,
|
||||
maybe_open_crash_handler as maybe_open_crash_handler,
|
||||
post_mortem as post_mortem,
|
||||
)
|
||||
from ._stackscope import (
|
||||
enable_stack_on_sig as enable_stack_on_sig,
|
||||
)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,84 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
The fundamental cross process SC abstraction: an inter-actor,
|
||||
cancel-scope linked task "context".
|
||||
|
||||
A ``Context`` is very similar to the ``trio.Nursery.cancel_scope`` built
|
||||
into each ``trio.Nursery`` except it links the lifetimes of memory space
|
||||
disjoint, parallel executing tasks in separate actors.
|
||||
|
||||
'''
|
||||
from signal import (
|
||||
signal,
|
||||
SIGUSR1,
|
||||
)
|
||||
|
||||
import trio
|
||||
|
||||
@trio.lowlevel.disable_ki_protection
|
||||
def dump_task_tree() -> None:
|
||||
import stackscope
|
||||
from tractor.log import get_console_log
|
||||
|
||||
tree_str: str = str(
|
||||
stackscope.extract(
|
||||
trio.lowlevel.current_root_task(),
|
||||
recurse_child_tasks=True
|
||||
)
|
||||
)
|
||||
log = get_console_log('cancel')
|
||||
log.pdb(
|
||||
f'Dumping `stackscope` tree:\n\n'
|
||||
f'{tree_str}\n'
|
||||
)
|
||||
# import logging
|
||||
# try:
|
||||
# with open("/dev/tty", "w") as tty:
|
||||
# tty.write(tree_str)
|
||||
# except BaseException:
|
||||
# logging.getLogger(
|
||||
# "task_tree"
|
||||
# ).exception("Error printing task tree")
|
||||
|
||||
|
||||
def signal_handler(sig: int, frame: object) -> None:
|
||||
import traceback
|
||||
try:
|
||||
trio.lowlevel.current_trio_token(
|
||||
).run_sync_soon(dump_task_tree)
|
||||
except RuntimeError:
|
||||
# not in async context -- print a normal traceback
|
||||
traceback.print_stack()
|
||||
|
||||
|
||||
|
||||
def enable_stack_on_sig(
|
||||
sig: int = SIGUSR1
|
||||
) -> None:
|
||||
'''
|
||||
Enable `stackscope` tracing on reception of a signal; by
|
||||
default this is SIGUSR1.
|
||||
|
||||
'''
|
||||
signal(
|
||||
sig,
|
||||
signal_handler,
|
||||
)
|
||||
# NOTE: not the above can be triggered from
|
||||
# a (xonsh) shell using:
|
||||
# kill -SIGUSR1 @$(pgrep -f '<cmd>')
|
|
@ -0,0 +1,129 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
CLI framework extensions for hacking on the actor runtime.
|
||||
|
||||
Currently popular frameworks supported are:
|
||||
|
||||
- `typer` via the `@callback` API
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
)
|
||||
from typing_extensions import Annotated
|
||||
|
||||
import typer
|
||||
|
||||
|
||||
_runtime_vars: dict[str, Any] = {}
|
||||
|
||||
|
||||
def load_runtime_vars(
|
||||
ctx: typer.Context,
|
||||
callback: Callable,
|
||||
pdb: bool = False, # --pdb
|
||||
ll: Annotated[
|
||||
str,
|
||||
typer.Option(
|
||||
'--loglevel',
|
||||
'-l',
|
||||
help='BigD logging level',
|
||||
),
|
||||
] = 'cancel', # -l info
|
||||
):
|
||||
'''
|
||||
Maybe engage crash handling with `pdbp` when code inside
|
||||
a `typer` CLI endpoint cmd raises.
|
||||
|
||||
To use this callback simply take your `app = typer.Typer()` instance
|
||||
and decorate this function with it like so:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from tractor.devx import cli
|
||||
|
||||
app = typer.Typer()
|
||||
|
||||
# manual decoration to hook into `click`'s context system!
|
||||
cli.load_runtime_vars = app.callback(
|
||||
invoke_without_command=True,
|
||||
)
|
||||
|
||||
And then you can use the now augmented `click` CLI context as so,
|
||||
|
||||
.. code:: python
|
||||
|
||||
@app.command(
|
||||
context_settings={
|
||||
"allow_extra_args": True,
|
||||
"ignore_unknown_options": True,
|
||||
}
|
||||
)
|
||||
def my_cli_cmd(
|
||||
ctx: typer.Context,
|
||||
):
|
||||
rtvars: dict = ctx.runtime_vars
|
||||
pdb: bool = rtvars['pdb']
|
||||
|
||||
with tractor.devx.cli.maybe_open_crash_handler(pdb=pdb):
|
||||
trio.run(
|
||||
partial(
|
||||
my_tractor_main_task_func,
|
||||
debug_mode=pdb,
|
||||
loglevel=rtvars['ll'],
|
||||
)
|
||||
)
|
||||
|
||||
which will enable log level and debug mode globally for the entire
|
||||
`tractor` + `trio` runtime thereafter!
|
||||
|
||||
Bo
|
||||
|
||||
'''
|
||||
global _runtime_vars
|
||||
_runtime_vars |= {
|
||||
'pdb': pdb,
|
||||
'll': ll,
|
||||
}
|
||||
|
||||
ctx.runtime_vars: dict[str, Any] = _runtime_vars
|
||||
print(
|
||||
f'`typer` sub-cmd: {ctx.invoked_subcommand}\n'
|
||||
f'`tractor` runtime vars: {_runtime_vars}'
|
||||
)
|
||||
|
||||
# XXX NOTE XXX: hackzone.. if no sub-cmd is specified (the
|
||||
# default if the user just invokes `bigd`) then we simply
|
||||
# invoke the sole `_bigd()` cmd passing in the "parent"
|
||||
# typer.Context directly to that call since we're treating it
|
||||
# as a "non sub-command" or wtv..
|
||||
# TODO: ideally typer would have some kinda built-in way to get
|
||||
# this behaviour without having to construct and manually
|
||||
# invoke our own cmd..
|
||||
if (
|
||||
ctx.invoked_subcommand is None
|
||||
or ctx.invoked_subcommand == callback.__name__
|
||||
):
|
||||
cmd: typer.core.TyperCommand = typer.core.TyperCommand(
|
||||
name='bigd',
|
||||
callback=callback,
|
||||
)
|
||||
ctx.params = {'ctx': ctx}
|
||||
cmd.invoke(ctx)
|
107
tractor/log.py
107
tractor/log.py
|
@ -48,12 +48,15 @@ LOG_FORMAT = (
|
|||
|
||||
DATE_FORMAT = '%b %d %H:%M:%S'
|
||||
|
||||
LEVELS = {
|
||||
LEVELS: dict[str, int] = {
|
||||
'TRANSPORT': 5,
|
||||
'RUNTIME': 15,
|
||||
'CANCEL': 16,
|
||||
'PDB': 500,
|
||||
}
|
||||
# _custom_levels: set[str] = {
|
||||
# lvlname.lower for lvlname in LEVELS.keys()
|
||||
# }
|
||||
|
||||
STD_PALETTE = {
|
||||
'CRITICAL': 'red',
|
||||
|
@ -102,7 +105,11 @@ class StackLevelAdapter(logging.LoggerAdapter):
|
|||
Cancellation logging, mostly for runtime reporting.
|
||||
|
||||
'''
|
||||
return self.log(16, msg)
|
||||
return self.log(
|
||||
level=16,
|
||||
msg=msg,
|
||||
# stacklevel=4,
|
||||
)
|
||||
|
||||
def pdb(
|
||||
self,
|
||||
|
@ -114,14 +121,37 @@ class StackLevelAdapter(logging.LoggerAdapter):
|
|||
'''
|
||||
return self.log(500, msg)
|
||||
|
||||
def log(self, level, msg, *args, **kwargs):
|
||||
"""
|
||||
def log(
|
||||
self,
|
||||
level,
|
||||
msg,
|
||||
*args,
|
||||
**kwargs,
|
||||
):
|
||||
'''
|
||||
Delegate a log call to the underlying logger, after adding
|
||||
contextual information from this adapter instance.
|
||||
"""
|
||||
|
||||
'''
|
||||
if self.isEnabledFor(level):
|
||||
stacklevel: int = 3
|
||||
if (
|
||||
level in LEVELS.values()
|
||||
# or level in _custom_levels
|
||||
):
|
||||
stacklevel: int = 4
|
||||
|
||||
# msg, kwargs = self.process(msg, kwargs)
|
||||
self._log(level, msg, args, **kwargs)
|
||||
self._log(
|
||||
level=level,
|
||||
msg=msg,
|
||||
args=args,
|
||||
# NOTE: not sure how this worked before but, it
|
||||
# seems with our custom level methods defined above
|
||||
# we do indeed (now) require another stack level??
|
||||
stacklevel=stacklevel,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# LOL, the stdlib doesn't allow passing through ``stacklevel``..
|
||||
def _log(
|
||||
|
@ -134,12 +164,15 @@ class StackLevelAdapter(logging.LoggerAdapter):
|
|||
stack_info=False,
|
||||
|
||||
# XXX: bit we added to show fileinfo from actual caller.
|
||||
# this level then ``.log()`` then finally the caller's level..
|
||||
stacklevel=3,
|
||||
# - this level
|
||||
# - then ``.log()``
|
||||
# - then finally the caller's level..
|
||||
stacklevel=4,
|
||||
):
|
||||
"""
|
||||
'''
|
||||
Low-level log implementation, proxied to allow nested logger adapters.
|
||||
"""
|
||||
|
||||
'''
|
||||
return self.logger._log(
|
||||
level,
|
||||
msg,
|
||||
|
@ -193,15 +226,39 @@ def get_logger(
|
|||
'''
|
||||
log = rlog = logging.getLogger(_root_name)
|
||||
|
||||
if name and name != _proj_name:
|
||||
if (
|
||||
name
|
||||
and name != _proj_name
|
||||
):
|
||||
|
||||
# handling for modules that use ``get_logger(__name__)`` to
|
||||
# avoid duplicate project-package token in msg output
|
||||
rname, _, tail = name.partition('.')
|
||||
if rname == _root_name:
|
||||
name = tail
|
||||
# NOTE: for handling for modules that use ``get_logger(__name__)``
|
||||
# we make the following stylistic choice:
|
||||
# - always avoid duplicate project-package token
|
||||
# in msg output: i.e. tractor.tractor _ipc.py in header
|
||||
# looks ridiculous XD
|
||||
# - never show the leaf module name in the {name} part
|
||||
# since in python the {filename} is always this same
|
||||
# module-file.
|
||||
|
||||
sub_name: None | str = None
|
||||
rname, _, sub_name = name.partition('.')
|
||||
pkgpath, _, modfilename = sub_name.rpartition('.')
|
||||
|
||||
# NOTE: for tractor itself never include the last level
|
||||
# module key in the name such that something like: eg.
|
||||
# 'tractor.trionics._broadcast` only includes the first
|
||||
# 2 tokens in the (coloured) name part.
|
||||
if rname == 'tractor':
|
||||
sub_name = pkgpath
|
||||
|
||||
if _root_name in sub_name:
|
||||
duplicate, _, sub_name = sub_name.partition('.')
|
||||
|
||||
if not sub_name:
|
||||
log = rlog
|
||||
else:
|
||||
log = rlog.getChild(sub_name)
|
||||
|
||||
log = rlog.getChild(name)
|
||||
log.level = rlog.level
|
||||
|
||||
# add our actor-task aware adapter which will dynamically look up
|
||||
|
@ -232,11 +289,19 @@ def get_console_log(
|
|||
if not level:
|
||||
return log
|
||||
|
||||
log.setLevel(level.upper() if not isinstance(level, int) else level)
|
||||
log.setLevel(
|
||||
level.upper()
|
||||
if not isinstance(level, int)
|
||||
else level
|
||||
)
|
||||
|
||||
if not any(
|
||||
handler.stream == sys.stderr # type: ignore
|
||||
for handler in logger.handlers if getattr(handler, 'stream', None)
|
||||
for handler in logger.handlers if getattr(
|
||||
handler,
|
||||
'stream',
|
||||
None,
|
||||
)
|
||||
):
|
||||
handler = logging.StreamHandler()
|
||||
formatter = colorlog.ColoredFormatter(
|
||||
|
@ -254,3 +319,7 @@ def get_console_log(
|
|||
|
||||
def get_loglevel() -> str:
|
||||
return _default_loglevel
|
||||
|
||||
|
||||
# global module logger for tractor itself
|
||||
log = get_logger('tractor')
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Built-in messaging patterns, types, APIs and helpers.
|
||||
|
||||
'''
|
||||
from .ptr import (
|
||||
NamespacePath as NamespacePath,
|
||||
)
|
||||
from .types import (
|
||||
Struct as Struct,
|
||||
)
|
|
@ -15,7 +15,7 @@
|
|||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Built-in messaging patterns, types, APIs and helpers.
|
||||
IPC-compat cross-mem-boundary object pointer.
|
||||
|
||||
'''
|
||||
|
||||
|
@ -43,38 +43,92 @@ Built-in messaging patterns, types, APIs and helpers.
|
|||
# - https://github.com/msgpack/msgpack-python#packingunpacking-of-custom-data-type
|
||||
|
||||
from __future__ import annotations
|
||||
from inspect import (
|
||||
isfunction,
|
||||
ismethod,
|
||||
)
|
||||
from pkgutil import resolve_name
|
||||
|
||||
|
||||
class NamespacePath(str):
|
||||
'''
|
||||
A serializeable description of a (function) Python object location
|
||||
described by the target's module path and namespace key meant as
|
||||
a message-native "packet" to allows actors to point-and-load objects
|
||||
by absolute reference.
|
||||
A serializeable `str`-subtype implementing a "namespace
|
||||
pointer" to any Python object reference (like a function)
|
||||
using the same format as the built-in `pkgutil.resolve_name()`
|
||||
system.
|
||||
|
||||
A value describes a target's module-path and namespace-key
|
||||
separated by a ':' and thus can be easily used as
|
||||
a IPC-message-native reference-type allowing memory isolated
|
||||
actors to point-and-load objects via a minimal `str` value.
|
||||
|
||||
'''
|
||||
_ref: object = None
|
||||
_ref: object | type | None = None
|
||||
|
||||
def load_ref(self) -> object:
|
||||
# TODO: support providing the ns instance in
|
||||
# order to support 'self.<meth>` style to make
|
||||
# `Portal.run_from_ns()` work!
|
||||
# _ns: ModuleType|type|None = None
|
||||
|
||||
def load_ref(self) -> object | type:
|
||||
if self._ref is None:
|
||||
self._ref = resolve_name(self)
|
||||
return self._ref
|
||||
|
||||
def to_tuple(
|
||||
self,
|
||||
@staticmethod
|
||||
def _mk_fqnp(ref: type | object) -> tuple[str, str]:
|
||||
'''
|
||||
Generate a minial ``str`` pair which describes a python
|
||||
object's namespace path and object/type name.
|
||||
|
||||
) -> tuple[str, str]:
|
||||
ref = self.load_ref()
|
||||
return ref.__module__, getattr(ref, '__name__', '')
|
||||
In more precise terms something like:
|
||||
- 'py.namespace.path:object_name',
|
||||
- eg.'tractor.msg:NamespacePath' will be the ``str`` form
|
||||
of THIS type XD
|
||||
|
||||
'''
|
||||
if (
|
||||
isfunction(ref)
|
||||
):
|
||||
name: str = getattr(ref, '__name__')
|
||||
|
||||
elif ismethod(ref):
|
||||
# build out the path manually i guess..?
|
||||
# TODO: better way?
|
||||
name: str = '.'.join([
|
||||
type(ref.__self__).__name__,
|
||||
ref.__func__.__name__,
|
||||
])
|
||||
|
||||
else: # object or other?
|
||||
# isinstance(ref, object)
|
||||
# and not isfunction(ref)
|
||||
name: str = type(ref).__name__
|
||||
|
||||
# fully qualified namespace path, tuple.
|
||||
fqnp: tuple[str, str] = (
|
||||
ref.__module__,
|
||||
name,
|
||||
)
|
||||
return fqnp
|
||||
|
||||
@classmethod
|
||||
def from_ref(
|
||||
cls,
|
||||
ref,
|
||||
ref: type | object,
|
||||
|
||||
) -> NamespacePath:
|
||||
return cls(':'.join(
|
||||
(ref.__module__,
|
||||
getattr(ref, '__name__', ''))
|
||||
))
|
||||
|
||||
fqnp: tuple[str, str] = cls._mk_fqnp(ref)
|
||||
return cls(':'.join(fqnp))
|
||||
|
||||
def to_tuple(
|
||||
self,
|
||||
|
||||
# TODO: could this work re `self:<meth>` case from above?
|
||||
# load_ref: bool = True,
|
||||
|
||||
) -> tuple[str, str]:
|
||||
return self._mk_fqnp(
|
||||
self.load_ref()
|
||||
)
|
|
@ -0,0 +1,270 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Extensions to built-in or (heavily used but 3rd party) friend-lib
|
||||
types.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from collections import UserList
|
||||
from pprint import (
|
||||
saferepr,
|
||||
)
|
||||
from typing import (
|
||||
Any,
|
||||
Iterator,
|
||||
)
|
||||
|
||||
from msgspec import (
|
||||
msgpack,
|
||||
Struct as _Struct,
|
||||
structs,
|
||||
)
|
||||
|
||||
# TODO: auto-gen type sig for input func both for
|
||||
# type-msgs and logging of RPC tasks?
|
||||
# taken and modified from:
|
||||
# https://stackoverflow.com/a/57110117
|
||||
# import inspect
|
||||
# from typing import List
|
||||
|
||||
# def my_function(input_1: str, input_2: int) -> list[int]:
|
||||
# pass
|
||||
|
||||
# def types_of(func):
|
||||
# specs = inspect.getfullargspec(func)
|
||||
# return_type = specs.annotations['return']
|
||||
# input_types = [t.__name__ for s, t in specs.annotations.items() if s != 'return']
|
||||
# return f'{func.__name__}({": ".join(input_types)}) -> {return_type}'
|
||||
|
||||
# types_of(my_function)
|
||||
|
||||
|
||||
class DiffDump(UserList):
|
||||
'''
|
||||
Very simple list delegator that repr() dumps (presumed) tuple
|
||||
elements of the form `tuple[str, Any, Any]` in a nice
|
||||
multi-line readable form for analyzing `Struct` diffs.
|
||||
|
||||
'''
|
||||
def __repr__(self) -> str:
|
||||
if not len(self):
|
||||
return super().__repr__()
|
||||
|
||||
# format by displaying item pair's ``repr()`` on multiple,
|
||||
# indented lines such that they are more easily visually
|
||||
# comparable when printed to console when printed to
|
||||
# console.
|
||||
repstr: str = '[\n'
|
||||
for k, left, right in self:
|
||||
repstr += (
|
||||
f'({k},\n'
|
||||
f'\t{repr(left)},\n'
|
||||
f'\t{repr(right)},\n'
|
||||
')\n'
|
||||
)
|
||||
repstr += ']\n'
|
||||
return repstr
|
||||
|
||||
|
||||
class Struct(
|
||||
_Struct,
|
||||
|
||||
# https://jcristharif.com/msgspec/structs.html#tagged-unions
|
||||
# tag='pikerstruct',
|
||||
# tag=True,
|
||||
):
|
||||
'''
|
||||
A "human friendlier" (aka repl buddy) struct subtype.
|
||||
|
||||
'''
|
||||
def _sin_props(self) -> Iterator[
|
||||
tuple[
|
||||
structs.FieldIinfo,
|
||||
str,
|
||||
Any,
|
||||
]
|
||||
]:
|
||||
'''
|
||||
Iterate over all non-@property fields of this struct.
|
||||
|
||||
'''
|
||||
fi: structs.FieldInfo
|
||||
for fi in structs.fields(self):
|
||||
key: str = fi.name
|
||||
val: Any = getattr(self, key)
|
||||
yield fi, key, val
|
||||
|
||||
def to_dict(
|
||||
self,
|
||||
include_non_members: bool = True,
|
||||
|
||||
) -> dict:
|
||||
'''
|
||||
Like it sounds.. direct delegation to:
|
||||
https://jcristharif.com/msgspec/api.html#msgspec.structs.asdict
|
||||
|
||||
BUT, by default we pop all non-member (aka not defined as
|
||||
struct fields) fields by default.
|
||||
|
||||
'''
|
||||
asdict: dict = structs.asdict(self)
|
||||
if include_non_members:
|
||||
return asdict
|
||||
|
||||
# only return a dict of the struct members
|
||||
# which were provided as input, NOT anything
|
||||
# added as type-defined `@property` methods!
|
||||
sin_props: dict = {}
|
||||
fi: structs.FieldInfo
|
||||
for fi, k, v in self._sin_props():
|
||||
sin_props[k] = asdict[k]
|
||||
|
||||
return sin_props
|
||||
|
||||
def pformat(
|
||||
self,
|
||||
field_indent: int = 2,
|
||||
indent: int = 0,
|
||||
|
||||
) -> str:
|
||||
'''
|
||||
Recursion-safe `pprint.pformat()` style formatting of
|
||||
a `msgspec.Struct` for sane reading by a human using a REPL.
|
||||
|
||||
'''
|
||||
# global whitespace indent
|
||||
ws: str = ' '*indent
|
||||
|
||||
# field whitespace indent
|
||||
field_ws: str = ' '*(field_indent + indent)
|
||||
|
||||
# qtn: str = ws + self.__class__.__qualname__
|
||||
qtn: str = self.__class__.__qualname__
|
||||
|
||||
obj_str: str = '' # accumulator
|
||||
fi: structs.FieldInfo
|
||||
k: str
|
||||
v: Any
|
||||
for fi, k, v in self._sin_props():
|
||||
|
||||
# TODO: how can we prefer `Literal['option1', 'option2,
|
||||
# ..]` over .__name__ == `Literal` but still get only the
|
||||
# latter for simple types like `str | int | None` etc..?
|
||||
ft: type = fi.type
|
||||
typ_name: str = getattr(ft, '__name__', str(ft))
|
||||
|
||||
# recurse to get sub-struct's `.pformat()` output Bo
|
||||
if isinstance(v, Struct):
|
||||
val_str: str = v.pformat(
|
||||
indent=field_indent + indent,
|
||||
field_indent=indent + field_indent,
|
||||
)
|
||||
|
||||
else: # the `pprint` recursion-safe format:
|
||||
# https://docs.python.org/3.11/library/pprint.html#pprint.saferepr
|
||||
val_str: str = saferepr(v)
|
||||
|
||||
# TODO: LOLOL use `textwrap.indent()` instead dawwwwwg!
|
||||
obj_str += (field_ws + f'{k}: {typ_name} = {val_str},\n')
|
||||
|
||||
return (
|
||||
f'{qtn}(\n'
|
||||
f'{obj_str}'
|
||||
f'{ws})'
|
||||
)
|
||||
|
||||
# TODO: use a pprint.PrettyPrinter instance around ONLY rendering
|
||||
# inside a known tty?
|
||||
# def __repr__(self) -> str:
|
||||
# ...
|
||||
|
||||
# __str__ = __repr__ = pformat
|
||||
__repr__ = pformat
|
||||
|
||||
def copy(
|
||||
self,
|
||||
update: dict | None = None,
|
||||
|
||||
) -> Struct:
|
||||
'''
|
||||
Validate-typecast all self defined fields, return a copy of
|
||||
us with all such fields.
|
||||
|
||||
NOTE: This is kinda like the default behaviour in
|
||||
`pydantic.BaseModel` except a copy of the object is
|
||||
returned making it compat with `frozen=True`.
|
||||
|
||||
'''
|
||||
if update:
|
||||
for k, v in update.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
# NOTE: roundtrip serialize to validate
|
||||
# - enode to msgpack binary format,
|
||||
# - decode that back to a struct.
|
||||
return msgpack.Decoder(type=type(self)).decode(
|
||||
msgpack.Encoder().encode(self)
|
||||
)
|
||||
|
||||
def typecast(
|
||||
self,
|
||||
|
||||
# TODO: allow only casting a named subset?
|
||||
# fields: set[str] | None = None,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Cast all fields using their declared type annotations
|
||||
(kinda like what `pydantic` does by default).
|
||||
|
||||
NOTE: this of course won't work on frozen types, use
|
||||
``.copy()`` above in such cases.
|
||||
|
||||
'''
|
||||
# https://jcristharif.com/msgspec/api.html#msgspec.structs.fields
|
||||
fi: structs.FieldInfo
|
||||
for fi in structs.fields(self):
|
||||
setattr(
|
||||
self,
|
||||
fi.name,
|
||||
fi.type(getattr(self, fi.name)),
|
||||
)
|
||||
|
||||
def __sub__(
|
||||
self,
|
||||
other: Struct,
|
||||
|
||||
) -> DiffDump[tuple[str, Any, Any]]:
|
||||
'''
|
||||
Compare fields/items key-wise and return a ``DiffDump``
|
||||
for easy visual REPL comparison B)
|
||||
|
||||
'''
|
||||
diffs: DiffDump[tuple[str, Any, Any]] = DiffDump()
|
||||
for fi in structs.fields(self):
|
||||
attr_name: str = fi.name
|
||||
ours: Any = getattr(self, attr_name)
|
||||
theirs: Any = getattr(other, attr_name)
|
||||
if ours != theirs:
|
||||
diffs.append((
|
||||
attr_name,
|
||||
ours,
|
||||
theirs,
|
||||
))
|
||||
|
||||
return diffs
|
|
@ -28,7 +28,6 @@ from typing import (
|
|||
Callable,
|
||||
AsyncIterator,
|
||||
Awaitable,
|
||||
Optional,
|
||||
)
|
||||
|
||||
import trio
|
||||
|
@ -65,9 +64,9 @@ class LinkedTaskChannel(trio.abc.Channel):
|
|||
_trio_exited: bool = False
|
||||
|
||||
# set after ``asyncio.create_task()``
|
||||
_aio_task: Optional[asyncio.Task] = None
|
||||
_aio_err: Optional[BaseException] = None
|
||||
_broadcaster: Optional[BroadcastReceiver] = None
|
||||
_aio_task: asyncio.Task | None = None
|
||||
_aio_err: BaseException | None = None
|
||||
_broadcaster: BroadcastReceiver | None = None
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self._from_aio.aclose()
|
||||
|
@ -188,7 +187,7 @@ def _run_asyncio_task(
|
|||
|
||||
cancel_scope = trio.CancelScope()
|
||||
aio_task_complete = trio.Event()
|
||||
aio_err: Optional[BaseException] = None
|
||||
aio_err: BaseException | None = None
|
||||
|
||||
chan = LinkedTaskChannel(
|
||||
aio_q, # asyncio.Queue
|
||||
|
@ -263,7 +262,7 @@ def _run_asyncio_task(
|
|||
'''
|
||||
nonlocal chan
|
||||
aio_err = chan._aio_err
|
||||
task_err: Optional[BaseException] = None
|
||||
task_err: BaseException | None = None
|
||||
|
||||
# only to avoid ``asyncio`` complaining about uncaptured
|
||||
# task exceptions
|
||||
|
@ -329,11 +328,11 @@ async def translate_aio_errors(
|
|||
'''
|
||||
trio_task = trio.lowlevel.current_task()
|
||||
|
||||
aio_err: Optional[BaseException] = None
|
||||
aio_err: BaseException | None = None
|
||||
|
||||
# TODO: make thisi a channel method?
|
||||
def maybe_raise_aio_err(
|
||||
err: Optional[Exception] = None
|
||||
err: Exception | None = None
|
||||
) -> None:
|
||||
aio_err = chan._aio_err
|
||||
if (
|
||||
|
|
|
@ -19,22 +19,13 @@ Sugary patterns for trio + tractor designs.
|
|||
|
||||
'''
|
||||
from ._mngrs import (
|
||||
gather_contexts,
|
||||
maybe_open_context,
|
||||
maybe_open_nursery,
|
||||
gather_contexts as gather_contexts,
|
||||
maybe_open_context as maybe_open_context,
|
||||
maybe_open_nursery as maybe_open_nursery,
|
||||
)
|
||||
from ._broadcast import (
|
||||
broadcast_receiver,
|
||||
BroadcastReceiver,
|
||||
Lagged,
|
||||
AsyncReceiver as AsyncReceiver,
|
||||
broadcast_receiver as broadcast_receiver,
|
||||
BroadcastReceiver as BroadcastReceiver,
|
||||
Lagged as Lagged,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
'gather_contexts',
|
||||
'broadcast_receiver',
|
||||
'BroadcastReceiver',
|
||||
'Lagged',
|
||||
'maybe_open_context',
|
||||
'maybe_open_nursery',
|
||||
]
|
||||
|
|
|
@ -70,6 +70,7 @@ async def _enter_and_wait(
|
|||
unwrapped: dict[int, T],
|
||||
all_entered: trio.Event,
|
||||
parent_exit: trio.Event,
|
||||
seed: int,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
|
@ -80,7 +81,10 @@ async def _enter_and_wait(
|
|||
async with mngr as value:
|
||||
unwrapped[id(mngr)] = value
|
||||
|
||||
if all(unwrapped.values()):
|
||||
if all(
|
||||
val != seed
|
||||
for val in unwrapped.values()
|
||||
):
|
||||
all_entered.set()
|
||||
|
||||
await parent_exit.wait()
|
||||
|
@ -91,7 +95,13 @@ async def gather_contexts(
|
|||
|
||||
mngrs: Sequence[AsyncContextManager[T]],
|
||||
|
||||
) -> AsyncGenerator[tuple[Optional[T], ...], None]:
|
||||
) -> AsyncGenerator[
|
||||
tuple[
|
||||
T | None,
|
||||
...
|
||||
],
|
||||
None,
|
||||
]:
|
||||
'''
|
||||
Concurrently enter a sequence of async context managers, each in
|
||||
a separate ``trio`` task and deliver the unwrapped values in the
|
||||
|
@ -104,7 +114,11 @@ async def gather_contexts(
|
|||
entered and exited, and cancellation just works.
|
||||
|
||||
'''
|
||||
unwrapped: dict[int, Optional[T]] = {}.fromkeys(id(mngr) for mngr in mngrs)
|
||||
seed: int = id(mngrs)
|
||||
unwrapped: dict[int, T | None] = {}.fromkeys(
|
||||
(id(mngr) for mngr in mngrs),
|
||||
seed,
|
||||
)
|
||||
|
||||
all_entered = trio.Event()
|
||||
parent_exit = trio.Event()
|
||||
|
@ -116,8 +130,9 @@ async def gather_contexts(
|
|||
|
||||
if not mngrs:
|
||||
raise ValueError(
|
||||
'input mngrs is empty?\n'
|
||||
'Did try to use inline generator syntax?'
|
||||
'`.trionics.gather_contexts()` input mngrs is empty?\n'
|
||||
'Did try to use inline generator syntax?\n'
|
||||
'Use a non-lazy iterator or sequence type intead!'
|
||||
)
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
|
@ -128,6 +143,7 @@ async def gather_contexts(
|
|||
unwrapped,
|
||||
all_entered,
|
||||
parent_exit,
|
||||
seed,
|
||||
)
|
||||
|
||||
# deliver control once all managers have started up
|
||||
|
@ -209,6 +225,7 @@ async def maybe_open_context(
|
|||
|
||||
# yielded output
|
||||
yielded: Any = None
|
||||
lock_registered: bool = False
|
||||
|
||||
# Lock resource acquisition around task racing / ``trio``'s
|
||||
# scheduler protocol.
|
||||
|
@ -216,6 +233,7 @@ async def maybe_open_context(
|
|||
# to allow re-entrant use cases where one `maybe_open_context()`
|
||||
# wrapped factor may want to call into another.
|
||||
lock = _Cache.locks.setdefault(fid, trio.Lock())
|
||||
lock_registered: bool = True
|
||||
await lock.acquire()
|
||||
|
||||
# XXX: one singleton nursery per actor and we want to
|
||||
|
@ -275,4 +293,9 @@ async def maybe_open_context(
|
|||
_, no_more_users = entry
|
||||
no_more_users.set()
|
||||
|
||||
_Cache.locks.pop(fid)
|
||||
if lock_registered:
|
||||
maybe_lock = _Cache.locks.pop(fid, None)
|
||||
if maybe_lock is None:
|
||||
log.error(
|
||||
f'Resource lock for {fid} ALREADY POPPED?'
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue