Compare commits

..

1 Commits

Author SHA1 Message Date
Tyler Goodlet c439a10985 Include ./docs/README.rst in src dist 2022-07-10 18:24:24 -04:00
135 changed files with 6145 additions and 30130 deletions

View File

@ -20,35 +20,13 @@ jobs:
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: '3.11'
python-version: '3.10'
- name: Install dependencies
run: pip install -U . --upgrade-strategy eager -r requirements-test.txt
- name: Run MyPy check
run: mypy tractor/ --ignore-missing-imports --show-traceback
# test that we can generate a software distribution and install it
# thus avoid missing file issues after packaging.
sdist-linux:
name: 'sdist'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: '3.11'
- name: Build sdist
run: python setup.py sdist --formats=zip
- name: Install sdist from .zips
run: python -m pip install dist/*.zip
run: mypy tractor/ --ignore-missing-imports
testing-linux:
name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}'
@ -59,12 +37,8 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-latest]
python: ['3.11']
spawn_backend: [
'trio',
'mp_spawn',
'mp_forkserver',
]
python: ['3.9', '3.10']
spawn_backend: ['trio', 'mp']
steps:
@ -79,53 +53,71 @@ jobs:
- name: Install dependencies
run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager
- name: List dependencies
run: pip list
- name: Run tests
run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs
testing-linux-msgspec:
# runs jobs on all OS's but with optional `msgspec` dep installed
name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }} - msgspec'
timeout-minutes: 10
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python: ['3.9', '3.10']
spawn_backend: ['trio', 'mp']
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: '${{ matrix.python }}'
- name: Install dependencies
run: pip install -U .[msgspec] -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager
- name: Run tests
run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx
run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs
# We skip 3.10 on windows for now due to not having any collabs to
# debug the CI failures. Anyone wanting to hack and solve them is very
# welcome, but our primary user base is not using that OS.
# We skip 3.10 on windows for now due to
# https://github.com/pytest-dev/pytest/issues/8733
# some kinda weird `pyreadline` issue..
# TODO: use job filtering to accomplish instead of repeated
# boilerplate as is above XD:
# - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows
# - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
# - https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idif
# testing-windows:
# name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}'
# timeout-minutes: 12
# runs-on: ${{ matrix.os }}
testing-windows:
name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}'
timeout-minutes: 12
runs-on: ${{ matrix.os }}
# strategy:
# fail-fast: false
# matrix:
# os: [windows-latest]
# python: ['3.10']
# spawn_backend: ['trio', 'mp']
strategy:
fail-fast: false
matrix:
os: [windows-latest]
python: ['3.9']
spawn_backend: ['trio', 'mp']
# steps:
steps:
# - name: Checkout
# uses: actions/checkout@v2
- name: Checkout
uses: actions/checkout@v2
# - name: Setup python
# uses: actions/setup-python@v2
# with:
# python-version: '${{ matrix.python }}'
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: '${{ matrix.python }}'
# - name: Install dependencies
# run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager
- name: Install dependencies
run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager
# # TODO: pretty sure this solves debugger deps-issues on windows, but it needs to
# # be verified by someone with a native setup.
# # - name: Force pyreadline3
# # run: pip uninstall pyreadline; pip install -U pyreadline3
# - name: List dependencies
# run: pip list
# - name: Run tests
# run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx
- name: Run tests
run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs

7
305.misc.rst 100644
View File

@ -0,0 +1,7 @@
Add ``tractor.query_actor()`` an addr looker-upper which doesn't deliver
a ``Portal`` instance and instead just a socket address ``tuple``.
Sometimes it's handy to just have a simple way to figure out if
a "service" actor is up, so add this discovery helper for that. We'll
prolly just leave it undocumented for now until we figure out
a longer-term/better discovery system.

142
NEWS.rst
View File

@ -4,148 +4,6 @@ Changelog
.. towncrier release notes start
tractor 0.1.0a5 (2022-08-03)
============================
This is our final release supporting Python 3.9 since we will be moving
internals to the new `match:` syntax from 3.10 going forward and
further, we have officially dropped usage of the `msgpack` library and
happily adopted `msgspec`.
Features
--------
- `#165 <https://github.com/goodboy/tractor/issues/165>`_: Add SIGINT
protection to our `pdbpp` based debugger subystem such that for
(single-depth) actor trees in debug mode we ignore interrupts in any
actor currently holding the TTY lock thus avoiding clobbering IPC
connections and/or task and process state when working in the REPL.
As a big note currently so called "nested" actor trees (trees with
actors having more then one parent/ancestor) are not fully supported
since we don't yet have a mechanism to relay the debug mode knowledge
"up" the actor tree (for eg. when handling a crash in a leaf actor).
As such currently there is a set of tests and known scenarios which will
result in process cloberring by the zombie repaing machinery and these
have been documented in https://github.com/goodboy/tractor/issues/320.
The implementation details include:
- utilizing a custom SIGINT handler which we apply whenever an actor's
runtime enters the debug machinery, which we also make sure the
stdlib's `pdb` configuration doesn't override (which it does by
default without special instance config).
- litter the runtime with `maybe_wait_for_debugger()` mostly in spots
where the root actor should block before doing embedded nursery
teardown ops which both cancel potential-children-in-deubg as well
as eventually trigger zombie reaping machinery.
- hardening of the TTY locking semantics/API both in terms of IPC
terminations and cancellation and lock release determinism from
sync debugger instance methods.
- factoring of locking infrastructure into a new `._debug.Lock` global
which encapsulates all details of the ``trio`` sync primitives and
task/actor uid management and tracking.
We also add `ctrl-c` cases throughout the test suite though these are
disabled for py3.9 (`pdbpp` UX differences that don't seem worth
compensating for, especially since this will be our last 3.9 supported
release) and there are a slew of marked cases that aren't expected to
work in CI more generally (as mentioned in the "nested" tree note
above) despite seemingly working when run manually on linux.
- `#304 <https://github.com/goodboy/tractor/issues/304>`_: Add a new
``to_asyncio.LinkedTaskChannel.subscribe()`` which gives task-oriented
broadcast functionality semantically equivalent to
``tractor.MsgStream.subscribe()`` this makes it possible for multiple
``trio``-side tasks to consume ``asyncio``-side task msgs in tandem.
Further Improvements to the test suite were added in this patch set
including a new scenario test for a sub-actor managed "service nursery"
(implementing the basics of a "service manager") including use of
*infected asyncio* mode. Further we added a lower level
``test_trioisms.py`` to start to track issues we need to work around in
``trio`` itself which in this case included a bug we were trying to
solve related to https://github.com/python-trio/trio/issues/2258.
Bug Fixes
---------
- `#318 <https://github.com/goodboy/tractor/issues/318>`_: Fix
a previously undetected ``trio``-``asyncio`` task lifetime linking
issue with the ``to_asyncio.open_channel_from()`` api where both sides
where not properly waiting/signalling termination and it was possible
for ``asyncio``-side errors to not propagate due to a race condition.
The implementation fix summary is:
- add state to signal the end of the ``trio`` side task to be
read by the ``asyncio`` side and always cancel any ongoing
task in such cases.
- always wait on the ``asyncio`` task termination from the ``trio``
side on error before maybe raising said error.
- always close the ``trio`` mem chan on exit to ensure the other
side can detect it and follow.
Trivial/Internal Changes
------------------------
- `#248 <https://github.com/goodboy/tractor/issues/248>`_: Adjust the
`tractor._spawn.soft_wait()` strategy to avoid sending an actor cancel
request (via `Portal.cancel_actor()`) if either the child process is
detected as having terminated or the IPC channel is detected to be
closed.
This ensures (even) more deterministic inter-actor cancellation by
avoiding the timeout condition where possible when a whild never
sucessfully spawned, crashed, or became un-contactable over IPC.
- `#295 <https://github.com/goodboy/tractor/issues/295>`_: Add an
experimental ``tractor.msg.NamespacePath`` type for passing Python
objects by "reference" through a ``str``-subtype message and using the
new ``pkgutil.resolve_name()`` for reference loading.
- `#298 <https://github.com/goodboy/tractor/issues/298>`_: Add a new
`tractor.experimental` subpackage for staging new high level APIs and
subystems that we might eventually make built-ins.
- `#300 <https://github.com/goodboy/tractor/issues/300>`_: Update to and
pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) both of which
required adjustments for backwards imcompatible API tweaks.
- `#303 <https://github.com/goodboy/tractor/issues/303>`_: Fence off
``multiprocessing`` imports until absolutely necessary in an effort to
avoid "resource tracker" spawning side effects that seem to have
varying degrees of unreliability per Python release. Port to new
``msgspec.DecodeError``.
- `#305 <https://github.com/goodboy/tractor/issues/305>`_: Add
``tractor.query_actor()`` an addr looker-upper which doesn't deliver
a ``Portal`` instance and instead just a socket address ``tuple``.
Sometimes it's handy to just have a simple way to figure out if
a "service" actor is up, so add this discovery helper for that. We'll
prolly just leave it undocumented for now until we figure out
a longer-term/better discovery system.
- `#316 <https://github.com/goodboy/tractor/issues/316>`_: Run windows
CI jobs on python 3.10 after some hacks for ``pdbpp`` dependency
issues.
Issue was to do with the now deprecated `pyreadline` project which
should be changed over to `pyreadline3`.
- `#317 <https://github.com/goodboy/tractor/issues/317>`_: Drop use of
the ``msgpack`` package and instead move fully to the ``msgspec``
codec library.
We've now used ``msgspec`` extensively in production and there's no
reason to not use it as default. Further this change preps us for the up
and coming typed messaging semantics (#196), dialog-unprotocol system
(#297), and caps-based messaging-protocols (#299) planned before our
first beta.
tractor 0.1.0a4 (2021-12-18)
============================

View File

@ -1,122 +1,39 @@
|logo| ``tractor``: distributed structurred concurrency
|logo| ``tractor``: next-gen Python parallelism
|gh_actions|
|docs|
``tractor`` is a `structured concurrency`_ (SC), multi-processing_ runtime built on trio_.
``tractor`` is a `structured concurrent`_, multi-processing_ runtime built on trio_.
Fundamentally, ``tractor`` provides parallelism via
``trio``-"*actors*": independent Python **processes** (i.e.
*non-shared-memory threads*) which can schedule ``trio`` tasks whilst
maintaining *end-to-end SC* inside a *distributed supervision tree*.
Fundamentally ``tractor`` gives you parallelism via ``trio``-"*actors*":
our nurseries_ let you spawn new Python processes which each run a ``trio``
scheduled runtime - a call to ``trio.run()``.
Cross-process (and thus cross-host) SC is accomplished through the
combined use of our,
We believe the system adhere's to the `3 axioms`_ of an "`actor model`_"
but likely *does not* look like what *you* probably think an "actor
model" looks like, and that's *intentional*.
- "actor nurseries_" which provide for spawning multiple, and
possibly nested, Python processes each running a ``trio`` scheduled
runtime - a call to ``trio.run()``,
- an "SC-transitive supervision protocol" enforced as an
IPC-message-spec encapsulating all RPC-dialogs.
We believe the system adheres to the `3 axioms`_ of an "`actor model`_"
but likely **does not** look like what **you** probably *think* an "actor
model" looks like, and that's **intentional**.
Where do i start!?
------------------
The first step to grok ``tractor`` is to get an intermediate
knowledge of ``trio`` and **structured concurrency** B)
Some great places to start are,
- the seminal `blog post`_
- obviously the `trio docs`_
- wikipedia's nascent SC_ page
- the fancy diagrams @ libdill-docs_
The first step to grok ``tractor`` is to get the basics of ``trio`` down.
A great place to start is the `trio docs`_ and this `blog post`_.
Features
--------
- **It's just** a ``trio`` API!
- *Infinitely nesteable* process trees running embedded ``trio`` tasks.
- Swappable, OS-specific, process spawning via multiple backends.
- Modular IPC stack, allowing for custom interchange formats (eg.
as offered from `msgspec`_), varied transport protocols (TCP, RUDP,
QUIC, wireguard), and OS-env specific higher-perf primitives (UDS,
shm-ring-buffers).
- Optionally distributed_: all IPC and RPC APIs work over multi-host
transports the same as local.
- Builtin high-level streaming API that enables your app to easily
leverage the benefits of a "`cheap or nasty`_" `(un)protocol`_.
- A "native UX" around a multi-process safe debugger REPL using
`pdbp`_ (a fork & fix of `pdb++`_)
- "Infected ``asyncio``" mode: support for starting an actor's
runtime as a `guest`_ on the ``asyncio`` loop allowing us to
provide stringent SC-style ``trio.Task``-supervision around any
``asyncio.Task`` spawned via our ``tractor.to_asyncio`` APIs.
- A **very naive** and still very much work-in-progress inter-actor
`discovery`_ sys with plans to support multiple `modern protocol`_
approaches.
- Various ``trio`` extension APIs via ``tractor.trionics`` such as,
- task fan-out `broadcasting`_,
- multi-task-single-resource-caching and fan-out-to-multi
``__aenter__()`` APIs for ``@acm`` functions,
- (WIP) a ``TaskMngr``: one-cancels-one style nursery supervisor.
Install
-------
``tractor`` is still in a *alpha-near-beta-stage* for many
of its subsystems, however we are very close to having a stable
lowlevel runtime and API.
As such, it's currently recommended that you clone and install the
repo from source::
pip install git+git://github.com/goodboy/tractor.git
We use the very hip `uv`_ for project mgmt::
git clone https://github.com/goodboy/tractor.git
cd tractor
uv sync --dev
uv run python examples/rpc_bidir_streaming.py
Consider activating a virtual/project-env before starting to hack on
the code base::
# you could use plain ol' venvs
# https://docs.astral.sh/uv/pip/environments/
uv venv tractor_py313 --python 3.13
# but @goodboy prefers the more explicit (and shell agnostic)
# https://docs.astral.sh/uv/configuration/environment/#uv_project_environment
UV_PROJECT_ENVIRONMENT="tractor_py313
# hint hint, enter @goodboy's fave shell B)
uv run --dev xonsh
Alongside all this we ofc offer "releases" on PyPi::
pip install tractor
Just note that YMMV since the main git branch is often much further
ahead then any latest release.
Example codez
-------------
In ``tractor``'s (very lacking) documention we prefer to point to
example scripts in the repo over duplicating them in docs, but with
that in mind here are some definitive snippets to try and hook you
into digging deeper.
- **It's just** a ``trio`` API
- *Infinitely nesteable* process trees
- Builtin IPC streaming APIs with task fan-out broadcasting
- A (first ever?) "native" multi-core debugger UX for Python using `pdb++`_
- Support for a swappable, OS specific, process spawning layer
- A modular transport stack, allowing for custom serialization (eg.
`msgspec`_), communications protocols, and environment specific IPC
primitives
- Support for spawning process-level-SC, inter-loop one-to-one-task oriented
``asyncio`` actors via "infected ``asyncio``" mode
- `structured chadcurrency`_ from the ground up
Run a func in a process
***********************
-----------------------
Use ``trio``'s style of focussing on *tasks as functions*:
.. code:: python
@ -174,7 +91,7 @@ might want to check out `trio-parallel`_.
Zombie safe: self-destruct a process tree
*****************************************
-----------------------------------------
``tractor`` tries to protect you from zombies, no matter what.
.. code:: python
@ -200,7 +117,7 @@ Zombie safe: self-destruct a process tree
f"running in pid {os.getpid()}"
)
await trio.sleep_forever()
await trio.sleep_forever()
async def main():
@ -230,8 +147,8 @@ it **is a bug**.
"Native" multi-process debugging
********************************
Using the magic of `pdbp`_ and our internal IPC, we've
--------------------------------
Using the magic of `pdb++`_ and our internal IPC, we've
been able to create a native feeling debugging experience for
any (sub-)process in your ``tractor`` tree.
@ -285,7 +202,7 @@ We're hoping to add a respawn-from-repl system soon!
SC compatible bi-directional streaming
**************************************
--------------------------------------
Yes, you saw it here first; we provide 2-way streams
with reliable, transitive setup/teardown semantics.
@ -377,7 +294,7 @@ hear your thoughts on!
Worker poolz are easy peasy
***************************
---------------------------
The initial ask from most new users is *"how do I make a worker
pool thing?"*.
@ -399,10 +316,10 @@ This uses no extra threads, fancy semaphores or futures; all we need
is ``tractor``'s IPC!
"Infected ``asyncio``" mode
***************************
---------------------------
Have a bunch of ``asyncio`` code you want to force to be SC at the process level?
Check out our experimental system for `guest`_-mode controlled
Check out our experimental system for `guest-mode`_ controlled
``asyncio`` actors:
.. code:: python
@ -508,7 +425,7 @@ We need help refining the `asyncio`-side channel API to be more
Higher level "cluster" APIs
***************************
---------------------------
To be extra terse the ``tractor`` devs have started hacking some "higher
level" APIs for managing actor trees/clusters. These interfaces should
generally be condsidered provisional for now but we encourage you to try
@ -565,6 +482,24 @@ spawn a flat cluster:
.. _full worker pool re-implementation: https://github.com/goodboy/tractor/blob/master/examples/parallelism/concurrent_actors_primes.py
Install
-------
From PyPi::
pip install tractor
To try out the (optionally) faster `msgspec`_ codec instead of the
default ``msgpack`` lib::
pip install tractor[msgspec]
From git::
pip install git+git://github.com/goodboy/tractor.git
Under the hood
--------------
``tractor`` is an attempt to pair trionic_ `structured concurrency`_ with
@ -628,22 +563,11 @@ properties of the system.
What's on the TODO:
-------------------
Help us push toward the future of distributed `Python`.
Help us push toward the future.
- Erlang-style supervisors via composed context managers (see `#22
<https://github.com/goodboy/tractor/issues/22>`_)
- Typed messaging protocols (ex. via ``msgspec.Struct``, see `#36
- Typed messaging protocols (ex. via ``msgspec``, see `#36
<https://github.com/goodboy/tractor/issues/36>`_)
- Typed capability-based (dialog) protocols ( see `#196
<https://github.com/goodboy/tractor/issues/196>`_ with draft work
started in `#311 <https://github.com/goodboy/tractor/pull/311>`_)
- We **recently disabled CI-testing on windows** and need help getting
it running again! (see `#327
<https://github.com/goodboy/tractor/pull/327>`_). **We do have windows
support** (and have for quite a while) but since no active hacker
exists in the user-base to help test on that OS, for now we're not
actively maintaining testing due to the added hassle and general
latency..
- Erlang-style supervisors via composed context managers
Feel like saying hi?
@ -655,39 +579,31 @@ say hi, please feel free to reach us in our `matrix channel`_. If
matrix seems too hip, we're also mostly all in the the `trio gitter
channel`_!
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
.. _distributed: https://en.wikipedia.org/wiki/Distributed_computing
.. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing
.. _trio: https://github.com/python-trio/trio
.. _nurseries: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/#nurseries-a-structured-replacement-for-go-statements
.. _actor model: https://en.wikipedia.org/wiki/Actor_model
.. _trio: https://github.com/python-trio/trio
.. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing
.. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles
.. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
.. _3 axioms: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=162s
.. .. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts
.. _adherance to: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=1821s
.. _trio gitter channel: https://gitter.im/python-trio/general
.. _matrix channel: https://matrix.to/#/!tractor:matrix.org
.. _broadcasting: https://github.com/goodboy/tractor/pull/229
.. _modern procotol: https://en.wikipedia.org/wiki/Rendezvous_protocol
.. _pdbp: https://github.com/mdmintz/pdbp
.. _pdb++: https://github.com/pdbpp/pdbpp
.. _cheap or nasty: https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern
.. _(un)protocol: https://zguide.zeromq.org/docs/chapter7/#Unprotocols
.. _discovery: https://zguide.zeromq.org/docs/chapter8/#Discovery
.. _modern protocol: https://en.wikipedia.org/wiki/Rendezvous_protocol
.. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops
.. _messages: https://en.wikipedia.org/wiki/Message_passing
.. _trio docs: https://trio.readthedocs.io/en/latest/
.. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _SC: https://en.wikipedia.org/wiki/Structured_concurrency
.. _libdill-docs: https://sustrik.github.io/libdill/structured-concurrency.html
.. _structured chadcurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony
.. _async generators: https://www.python.org/dev/peps/pep-0525/
.. _trio-parallel: https://github.com/richardsheridan/trio-parallel
.. _uv: https://docs.astral.sh/uv/
.. _msgspec: https://jcristharif.com/msgspec/
.. _guest: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops
.. _guest-mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops
.. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square

View File

@ -396,7 +396,7 @@ tasks spawned via multiple RPC calls to an actor can modify
# a per process cache
_actor_cache: dict[str, bool] = {}
_actor_cache: Dict[str, bool] = {}
def ping_endpoints(endpoints: List[str]):

View File

View File

@ -1,259 +0,0 @@
'''
Complex edge case where during real-time streaming the IPC tranport
channels are wiped out (purposely in this example though it could have
been an outage) and we want to ensure that despite being in debug mode
(or not) the user can sent SIGINT once they notice the hang and the
actor tree will eventually be cancelled without leaving any zombies.
'''
from contextlib import asynccontextmanager as acm
from functools import partial
from tractor import (
open_nursery,
context,
Context,
ContextCancelled,
MsgStream,
_testing,
)
import trio
import pytest
async def break_ipc_then_error(
stream: MsgStream,
break_ipc_with: str|None = None,
pre_close: bool = False,
):
await _testing.break_ipc(
stream=stream,
method=break_ipc_with,
pre_close=pre_close,
)
async for msg in stream:
await stream.send(msg)
assert 0
async def iter_ipc_stream(
stream: MsgStream,
break_ipc_with: str|None = None,
pre_close: bool = False,
):
async for msg in stream:
await stream.send(msg)
@context
async def recv_and_spawn_net_killers(
ctx: Context,
break_ipc_after: bool|int = False,
pre_close: bool = False,
) -> None:
'''
Receive stream msgs and spawn some IPC killers mid-stream.
'''
broke_ipc: bool = False
await ctx.started()
async with (
ctx.open_stream() as stream,
trio.open_nursery(
strict_exception_groups=False,
) as tn,
):
async for i in stream:
print(f'child echoing {i}')
if not broke_ipc:
await stream.send(i)
else:
await trio.sleep(0.01)
if (
break_ipc_after
and
i >= break_ipc_after
):
broke_ipc = True
tn.start_soon(
iter_ipc_stream,
stream,
)
tn.start_soon(
partial(
break_ipc_then_error,
stream=stream,
pre_close=pre_close,
)
)
@acm
async def stuff_hangin_ctlc(timeout: float = 1) -> None:
with trio.move_on_after(timeout) as cs:
yield timeout
if cs.cancelled_caught:
# pretend to be a user seeing no streaming action
# thinking it's a hang, and then hitting ctl-c..
print(
f"i'm a user on the PARENT side and thingz hangin "
f'after timeout={timeout} ???\n\n'
'MASHING CTlR-C..!?\n'
)
raise KeyboardInterrupt
async def main(
debug_mode: bool = False,
start_method: str = 'trio',
loglevel: str = 'cancel',
# by default we break the parent IPC first (if configured to break
# at all), but this can be changed so the child does first (even if
# both are set to break).
break_parent_ipc_after: int|bool = False,
break_child_ipc_after: int|bool = False,
pre_close: bool = False,
) -> None:
async with (
open_nursery(
start_method=start_method,
# NOTE: even debugger is used we shouldn't get
# a hang since it never engages due to broken IPC
debug_mode=debug_mode,
loglevel=loglevel,
) as an,
):
sub_name: str = 'chitty_hijo'
portal = await an.start_actor(
sub_name,
enable_modules=[__name__],
)
async with (
stuff_hangin_ctlc(timeout=2) as timeout,
_testing.expect_ctxc(
yay=(
break_parent_ipc_after
or break_child_ipc_after
),
# TODO: we CAN'T remove this right?
# since we need the ctxc to bubble up from either
# the stream API after the `None` msg is sent
# (which actually implicitly cancels all remote
# tasks in the hijo) or from simluated
# KBI-mash-from-user
# or should we expect that a KBI triggers the ctxc
# and KBI in an eg?
reraise=True,
),
portal.open_context(
recv_and_spawn_net_killers,
break_ipc_after=break_child_ipc_after,
pre_close=pre_close,
) as (ctx, sent),
):
rx_eoc: bool = False
ipc_break_sent: bool = False
async with ctx.open_stream() as stream:
for i in range(1000):
if (
break_parent_ipc_after
and
i > break_parent_ipc_after
and
not ipc_break_sent
):
print(
'#################################\n'
'Simulating PARENT-side IPC BREAK!\n'
'#################################\n'
)
# TODO: other methods? see break func above.
# await stream._ctx.chan.send(None)
# await stream._ctx.chan.transport.stream.send_eof()
await stream._ctx.chan.transport.stream.aclose()
ipc_break_sent = True
# it actually breaks right here in the
# mp_spawn/forkserver backends and thus the
# zombie reaper never even kicks in?
try:
print(f'parent sending {i}')
await stream.send(i)
except ContextCancelled as ctxc:
print(
'parent received ctxc on `stream.send()`\n'
f'{ctxc}\n'
)
assert 'root' in ctxc.canceller
assert sub_name in ctx.canceller
# TODO: is this needed or no?
raise
except trio.ClosedResourceError:
# NOTE: don't send if we already broke the
# connection to avoid raising a closed-error
# such that we drop through to the ctl-c
# mashing by user.
await trio.sleep(0.01)
# timeout: int = 1
# with trio.move_on_after(timeout) as cs:
async with stuff_hangin_ctlc() as timeout:
print(
f'PARENT `stream.receive()` with timeout={timeout}\n'
)
# NOTE: in the parent side IPC failure case this
# will raise an ``EndOfChannel`` after the child
# is killed and sends a stop msg back to it's
# caller/this-parent.
try:
rx = await stream.receive()
print(
"I'm a happy PARENT user and echoed to me is\n"
f'{rx}\n'
)
except trio.EndOfChannel:
rx_eoc: bool = True
print('MsgStream got EoC for PARENT')
raise
print(
'Streaming finished and we got Eoc.\n'
'Canceling `.open_context()` in root with\n'
'CTlR-C..'
)
if rx_eoc:
assert stream.closed
try:
await stream.send(i)
pytest.fail('stream not closed?')
except (
trio.ClosedResourceError,
trio.EndOfChannel,
) as send_err:
if rx_eoc:
assert send_err is stream._eoc
else:
assert send_err is stream._closed
raise KeyboardInterrupt
if __name__ == '__main__':
trio.run(main)

View File

@ -1,136 +0,0 @@
'''
Examples of using the builtin `breakpoint()` from an `asyncio.Task`
running in a subactor spawned with `infect_asyncio=True`.
'''
import asyncio
import trio
import tractor
from tractor import (
to_asyncio,
Portal,
)
async def aio_sleep_forever():
await asyncio.sleep(float('inf'))
async def bp_then_error(
to_trio: trio.MemorySendChannel,
from_trio: asyncio.Queue,
raise_after_bp: bool = True,
) -> None:
# sync with `trio`-side (caller) task
to_trio.send_nowait('start')
# NOTE: what happens here inside the hook needs some refinement..
# => seems like it's still `._debug._set_trace()` but
# we set `Lock.local_task_in_debug = 'sync'`, we probably want
# some further, at least, meta-data about the task/actor in debug
# in terms of making it clear it's `asyncio` mucking about.
breakpoint() # asyncio-side
# short checkpoint / delay
await asyncio.sleep(0.5) # asyncio-side
if raise_after_bp:
raise ValueError('asyncio side error!')
# TODO: test case with this so that it gets cancelled?
else:
# XXX NOTE: this is required in order to get the SIGINT-ignored
# hang case documented in the module script section!
await aio_sleep_forever()
@tractor.context
async def trio_ctx(
ctx: tractor.Context,
bp_before_started: bool = False,
):
# this will block until the ``asyncio`` task sends a "first"
# message, see first line in above func.
async with (
to_asyncio.open_channel_from(
bp_then_error,
# raise_after_bp=not bp_before_started,
) as (first, chan),
trio.open_nursery() as tn,
):
assert first == 'start'
if bp_before_started:
await tractor.pause() # trio-side
await ctx.started(first) # trio-side
tn.start_soon(
to_asyncio.run_task,
aio_sleep_forever,
)
await trio.sleep_forever()
async def main(
bps_all_over: bool = True,
# TODO, WHICH OF THESE HAZ BUGZ?
cancel_from_root: bool = False,
err_from_root: bool = False,
) -> None:
async with tractor.open_nursery(
debug_mode=True,
maybe_enable_greenback=True,
# loglevel='devx',
) as an:
ptl: Portal = await an.start_actor(
'aio_daemon',
enable_modules=[__name__],
infect_asyncio=True,
debug_mode=True,
# loglevel='cancel',
)
async with ptl.open_context(
trio_ctx,
bp_before_started=bps_all_over,
) as (ctx, first):
assert first == 'start'
# pause in parent to ensure no cross-actor
# locking problems exist!
await tractor.pause() # trio-root
if cancel_from_root:
await ctx.cancel()
if err_from_root:
assert 0
else:
await trio.sleep_forever()
# TODO: case where we cancel from trio-side while asyncio task
# has debugger lock?
# await ptl.cancel_actor()
if __name__ == '__main__':
# works fine B)
trio.run(main)
# will hang and ignores SIGINT !!
# NOTE: you'll need to send a SIGQUIT (via ctl-\) to kill it
# manually..
# trio.run(main, True)

View File

@ -1,9 +0,0 @@
'''
Reproduce a bug where enabling debug mode for a sub-actor actually causes
a hang on teardown...
'''
import asyncio
import trio
import tractor

View File

@ -1,5 +1,5 @@
'''
Fast fail test with a `Context`.
Fast fail test with a context.
Ensure the partially initialized sub-actor process
doesn't cause a hang on error/cancel of the parent

View File

@ -4,15 +4,9 @@ import trio
async def breakpoint_forever():
"Indefinitely re-enter debugger in child actor."
try:
while True:
yield 'yo'
await tractor.pause()
except BaseException:
tractor.log.get_console_log().exception(
'Cancelled while trying to enter pause point!'
)
raise
while True:
yield 'yo'
await tractor.breakpoint()
async def name_error():
@ -21,14 +15,11 @@ async def name_error():
async def main():
'''
Test breakpoint in a streaming actor.
'''
"""Test breakpoint in a streaming actor.
"""
async with tractor.open_nursery(
debug_mode=True,
loglevel='cancel',
# loglevel='devx',
loglevel='error',
) as n:
p0 = await n.start_actor('bp_forever', enable_modules=[__name__])
@ -36,18 +27,7 @@ async def main():
# retreive results
async with p0.open_stream_from(breakpoint_forever) as stream:
# triggers the first name error
try:
await p1.run(name_error)
except tractor.RemoteActorError as rae:
assert rae.boxed_type is NameError
async for i in stream:
# a second time try the failing subactor and this tie
# let error propagate up to the parent/nursery.
await p1.run(name_error)
await p1.run(name_error)
if __name__ == '__main__':

View File

@ -10,12 +10,7 @@ async def name_error():
async def breakpoint_forever():
"Indefinitely re-enter debugger in child actor."
while True:
await tractor.pause()
# NOTE: if the test never sent 'q'/'quit' commands
# on the pdb repl, without this checkpoint line the
# repl would spin in this actor forever.
# await trio.sleep(0)
await tractor.breakpoint()
async def spawn_until(depth=0):
@ -23,20 +18,12 @@ async def spawn_until(depth=0):
"""
async with tractor.open_nursery() as n:
if depth < 1:
await n.run_in_actor(breakpoint_forever)
p = await n.run_in_actor(
# await n.run_in_actor('breakpoint_forever', breakpoint_forever)
await n.run_in_actor(
name_error,
name='name_error'
)
await trio.sleep(0.5)
# rx and propagate error from child
await p.result()
else:
# recusrive call to spawn another process branching layer of
# the tree
depth -= 1
await n.run_in_actor(
spawn_until,
@ -45,7 +32,6 @@ async def spawn_until(depth=0):
)
# TODO: notes on the new boxed-relayed errors through proxy actors
async def main():
"""The main ``tractor`` routine.
@ -67,7 +53,6 @@ async def main():
"""
async with tractor.open_nursery(
debug_mode=True,
# loglevel='cancel',
) as n:
# spawn both actors
@ -82,16 +67,8 @@ async def main():
name='spawner1',
)
# TODO: test this case as well where the parent don't see
# the sub-actor errors by default and instead expect a user
# ctrl-c to kill the root.
with trio.move_on_after(3):
await trio.sleep_forever()
# gah still an issue here.
await portal.result()
# should never get here
await portal1.result()

View File

@ -40,7 +40,7 @@ async def main():
"""
async with tractor.open_nursery(
debug_mode=True,
loglevel='devx',
# loglevel='cancel',
) as n:
# spawn both actors

View File

@ -6,7 +6,7 @@ async def breakpoint_forever():
"Indefinitely re-enter debugger in child actor."
while True:
await trio.sleep(0.1)
await tractor.pause()
await tractor.breakpoint()
async def name_error():
@ -38,7 +38,6 @@ async def main():
"""
async with tractor.open_nursery(
debug_mode=True,
# loglevel='runtime',
) as n:
# Spawn both actors, don't bother with collecting results

View File

@ -1,40 +0,0 @@
import trio
import tractor
@tractor.context
async def just_sleep(
ctx: tractor.Context,
**kwargs,
) -> None:
'''
Start and sleep.
'''
await ctx.started()
await trio.sleep_forever()
async def main() -> None:
async with tractor.open_nursery(
debug_mode=True,
) as n:
portal = await n.start_actor(
'ctx_child',
# XXX: we don't enable the current module in order
# to trigger `ModuleNotFound`.
enable_modules=[],
)
async with portal.open_context(
just_sleep, # taken from pytest parameterization
) as (ctx, sent):
raise KeyboardInterrupt
if __name__ == '__main__':
trio.run(main)

View File

@ -23,6 +23,5 @@ async def main():
n.start_soon(debug_actor.run, die)
n.start_soon(crash_boi.run, die)
if __name__ == '__main__':
trio.run(main)

View File

@ -1,56 +0,0 @@
import trio
import tractor
@tractor.context
async def name_error(
ctx: tractor.Context,
):
'''
Raise a `NameError`, catch it and enter `.post_mortem()`, then
expect the `._rpc._invoke()` crash handler to also engage.
'''
try:
getattr(doggypants) # noqa (on purpose)
except NameError:
await tractor.post_mortem()
raise
async def main():
'''
Test 3 `PdbREPL` entries:
- one in the child due to manual `.post_mortem()`,
- another in the child due to runtime RPC crash handling.
- final one here in parent from the RAE.
'''
# XXX NOTE: ideally the REPL arrives at this frame in the parent
# ONE UP FROM the inner ctx block below!
async with tractor.open_nursery(
debug_mode=True,
# loglevel='cancel',
) as an:
p: tractor.Portal = await an.start_actor(
'child',
enable_modules=[__name__],
)
# XXX should raise `RemoteActorError[NameError]`
# AND be the active frame when REPL enters!
try:
async with p.open_context(name_error) as (ctx, first):
assert first
except tractor.RemoteActorError as rae:
assert rae.boxed_type is NameError
# manually handle in root's parent task
await tractor.post_mortem()
raise
else:
raise RuntimeError('IPC ctx should have remote errored!?')
if __name__ == '__main__':
trio.run(main)

View File

@ -1,49 +0,0 @@
import os
import sys
import trio
import tractor
async def main() -> None:
# intially unset, no entry.
orig_pybp_var: int = os.environ.get('PYTHONBREAKPOINT')
assert orig_pybp_var in {None, "0"}
async with tractor.open_nursery(
debug_mode=True,
) as an:
assert an
assert (
(pybp_var := os.environ['PYTHONBREAKPOINT'])
==
'tractor.devx._debug._sync_pause_from_builtin'
)
# TODO: an assert that verifies the hook has indeed been, hooked
# XD
assert (
(pybp_hook := sys.breakpointhook)
is not tractor.devx._debug._set_trace
)
print(
f'$PYTHONOBREAKPOINT: {pybp_var!r}\n'
f'`sys.breakpointhook`: {pybp_hook!r}\n'
)
breakpoint() # first bp, tractor hook set.
# XXX AFTER EXIT (of actor-runtime) verify the hook is unset..
#
# YES, this is weird but it's how stdlib docs say to do it..
# https://docs.python.org/3/library/sys.html#sys.breakpointhook
assert os.environ.get('PYTHONBREAKPOINT') is orig_pybp_var
assert sys.breakpointhook
# now ensure a regular builtin pause still works
breakpoint() # last bp, stdlib hook restored
if __name__ == '__main__':
trio.run(main)

View File

@ -10,7 +10,7 @@ async def main():
await trio.sleep(0.1)
await tractor.pause()
await tractor.breakpoint()
await trio.sleep(0.1)

View File

@ -2,16 +2,13 @@ import trio
import tractor
async def main(
registry_addrs: tuple[str, int]|None = None
):
async def main():
async with tractor.open_root_actor(
debug_mode=True,
# loglevel='runtime',
):
while True:
await tractor.pause()
await tractor.breakpoint()
if __name__ == '__main__':

View File

@ -1,83 +0,0 @@
'''
Verify we can dump a `stackscope` tree on a hang.
'''
import os
import signal
import trio
import tractor
@tractor.context
async def start_n_shield_hang(
ctx: tractor.Context,
):
# actor: tractor.Actor = tractor.current_actor()
# sync to parent-side task
await ctx.started(os.getpid())
print('Entering shield sleep..')
with trio.CancelScope(shield=True):
await trio.sleep_forever() # in subactor
# XXX NOTE ^^^ since this shields, we expect
# the zombie reaper (aka T800) to engage on
# SIGINT from the user and eventually hard-kill
# this subprocess!
async def main(
from_test: bool = False,
) -> None:
async with (
tractor.open_nursery(
debug_mode=True,
enable_stack_on_sig=True,
# maybe_enable_greenback=False,
loglevel='devx',
) as an,
):
ptl: tractor.Portal = await an.start_actor(
'hanger',
enable_modules=[__name__],
debug_mode=True,
)
async with ptl.open_context(
start_n_shield_hang,
) as (ctx, cpid):
_, proc, _ = an._children[ptl.chan.uid]
assert cpid == proc.pid
print(
'Yo my child hanging..?\n'
# "i'm a user who wants to see a `stackscope` tree!\n"
)
# XXX simulate the wrapping test's "user actions"
# (i.e. if a human didn't run this manually but wants to
# know what they should do to reproduce test behaviour)
if from_test:
print(
f'Sending SIGUSR1 to {cpid!r}!\n'
)
os.kill(
cpid,
signal.SIGUSR1,
)
# simulate user cancelling program
await trio.sleep(0.5)
os.kill(
os.getpid(),
signal.SIGINT,
)
else:
# actually let user send the ctl-c
await trio.sleep_forever() # in root
if __name__ == '__main__':
trio.run(main)

View File

@ -1,88 +0,0 @@
import trio
import tractor
async def cancellable_pause_loop(
task_status: trio.TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED
):
with trio.CancelScope() as cs:
task_status.started(cs)
for _ in range(3):
try:
# ON first entry, there is no level triggered
# cancellation yet, so this cp does a parent task
# ctx-switch so that this scope raises for the NEXT
# checkpoint we hit.
await trio.lowlevel.checkpoint()
await tractor.pause()
cs.cancel()
# parent should have called `cs.cancel()` by now
await trio.lowlevel.checkpoint()
except trio.Cancelled:
print('INSIDE SHIELDED PAUSE')
await tractor.pause(shield=True)
else:
# should raise it again, bubbling up to parent
print('BUBBLING trio.Cancelled to parent task-nursery')
await trio.lowlevel.checkpoint()
async def pm_on_cancelled():
async with trio.open_nursery() as tn:
tn.cancel_scope.cancel()
try:
await trio.sleep_forever()
except trio.Cancelled:
# should also raise `Cancelled` since
# we didn't pass `shield=True`.
try:
await tractor.post_mortem(hide_tb=False)
except trio.Cancelled as taskc:
# should enter just fine, in fact it should
# be debugging the internals of the previous
# sin-shield call above Bo
await tractor.post_mortem(
hide_tb=False,
shield=True,
)
raise taskc
else:
raise RuntimeError('Dint cancel as expected!?')
async def cancelled_before_pause(
):
'''
Verify that using a shielded pause works despite surrounding
cancellation called state in the calling task.
'''
async with trio.open_nursery() as tn:
cs: trio.CancelScope = await tn.start(cancellable_pause_loop)
await trio.sleep(0.1)
assert cs.cancelled_caught
await pm_on_cancelled()
async def main():
async with tractor.open_nursery(
debug_mode=True,
) as n:
portal: tractor.Portal = await n.run_in_actor(
cancelled_before_pause,
)
await portal.result()
# ensure the same works in the root actor!
await pm_on_cancelled()
if __name__ == '__main__':
trio.run(main)

View File

@ -1,50 +0,0 @@
import tractor
import trio
async def gen():
yield 'yo'
await tractor.pause()
yield 'yo'
await tractor.pause()
@tractor.context
async def just_bp(
ctx: tractor.Context,
) -> None:
await ctx.started()
await tractor.pause()
# TODO: bps and errors in this call..
async for val in gen():
print(val)
# await trio.sleep(0.5)
# prematurely destroy the connection
await ctx.chan.aclose()
# THIS CAUSES AN UNRECOVERABLE HANG
# without latest ``pdbpp``:
assert 0
async def main():
async with tractor.open_nursery(
debug_mode=True,
) as n:
p = await n.start_actor(
'bp_boi',
enable_modules=[__name__],
)
async with p.open_context(
just_bp,
) as (ctx, first):
await trio.sleep_forever()
if __name__ == '__main__':
trio.run(main)

View File

@ -3,20 +3,17 @@ import tractor
async def breakpoint_forever():
'''
Indefinitely re-enter debugger in child actor.
'''
"""Indefinitely re-enter debugger in child actor.
"""
while True:
await trio.sleep(0.1)
await tractor.pause()
await tractor.breakpoint()
async def main():
async with tractor.open_nursery(
debug_mode=True,
loglevel='cancel',
) as n:
portal = await n.run_in_actor(

View File

@ -3,26 +3,16 @@ import tractor
async def name_error():
getattr(doggypants) # noqa (on purpose)
getattr(doggypants)
async def main():
async with tractor.open_nursery(
debug_mode=True,
# loglevel='transport',
) as an:
) as n:
# TODO: ideally the REPL arrives at this frame in the parent,
# ABOVE the @api_frame of `Portal.run_in_actor()` (which
# should eventually not even be a portal method ... XD)
# await tractor.pause()
p: tractor.Portal = await an.run_in_actor(name_error)
# with this style, should raise on this line
await p.result()
# with this alt style should raise at `open_nusery()`
# return await p.result()
portal = await n.run_in_actor(name_error)
await portal.result()
if __name__ == '__main__':

View File

@ -1,169 +0,0 @@
from functools import partial
import time
import trio
import tractor
# TODO: only import these when not running from test harness?
# can we detect `pexpect` usage maybe?
# from tractor.devx._debug import (
# get_lock,
# get_debug_req,
# )
def sync_pause(
use_builtin: bool = False,
error: bool = False,
hide_tb: bool = True,
pre_sleep: float|None = None,
):
if pre_sleep:
time.sleep(pre_sleep)
if use_builtin:
breakpoint(hide_tb=hide_tb)
else:
# TODO: maybe for testing some kind of cm style interface
# where the `._set_trace()` call doesn't happen until block
# exit?
# assert get_lock().ctx_in_debug is None
# assert get_debug_req().repl is None
tractor.pause_from_sync()
# assert get_debug_req().repl is None
if error:
raise RuntimeError('yoyo sync code error')
@tractor.context
async def start_n_sync_pause(
ctx: tractor.Context,
):
actor: tractor.Actor = tractor.current_actor()
# sync to parent-side task
await ctx.started()
print(f'Entering `sync_pause()` in subactor: {actor.uid}\n')
sync_pause()
print(f'Exited `sync_pause()` in subactor: {actor.uid}\n')
async def main() -> None:
async with (
tractor.open_nursery(
debug_mode=True,
maybe_enable_greenback=True,
enable_stack_on_sig=True,
# loglevel='warning',
# loglevel='devx',
) as an,
trio.open_nursery() as tn,
):
# just from root task
sync_pause()
p: tractor.Portal = await an.start_actor(
'subactor',
enable_modules=[__name__],
# infect_asyncio=True,
debug_mode=True,
)
# TODO: 3 sub-actor usage cases:
# -[x] via a `.open_context()`
# -[ ] via a `.run_in_actor()` call
# -[ ] via a `.run()`
# -[ ] via a `.to_thread.run_sync()` in subactor
async with p.open_context(
start_n_sync_pause,
) as (ctx, first):
assert first is None
# TODO: handle bg-thread-in-root-actor special cases!
#
# there are a couple very subtle situations possible here
# and they are likely to become more important as cpython
# moves to support no-GIL.
#
# Cases:
# 1. root-actor bg-threads that call `.pause_from_sync()`
# whilst an in-tree subactor also is using ` .pause()`.
# |_ since the root-actor bg thread can not
# `Lock._debug_lock.acquire_nowait()` without running
# a `trio.Task`, AND because the
# `PdbREPL.set_continue()` is called from that
# bg-thread, we can not `._debug_lock.release()`
# either!
# |_ this results in no actor-tree `Lock` being used
# on behalf of the bg-thread and thus the subactor's
# task and the thread trying to to use stdio
# simultaneously which results in the classic TTY
# clobbering!
#
# 2. mutiple sync-bg-threads that call
# `.pause_from_sync()` where one is scheduled via
# `Nursery.start_soon(to_thread.run_sync)` in a bg
# task.
#
# Due to the GIL, the threads never truly try to step
# through the REPL simultaneously, BUT their `logging`
# and traceback outputs are interleaved since the GIL
# (seemingly) on every REPL-input from the user
# switches threads..
#
# Soo, the context switching semantics of the GIL
# result in a very confusing and messy interaction UX
# since eval and (tb) print output is NOT synced to
# each REPL-cycle (like we normally make it via
# a `.set_continue()` callback triggering the
# `Lock.release()`). Ideally we can solve this
# usability issue NOW because this will of course be
# that much more important when eventually there is no
# GIL!
# XXX should cause double REPL entry and thus TTY
# clobbering due to case 1. above!
tn.start_soon(
partial(
trio.to_thread.run_sync,
partial(
sync_pause,
use_builtin=False,
# pre_sleep=0.5,
),
abandon_on_cancel=True,
thread_name='start_soon_root_bg_thread',
)
)
await tractor.pause()
# XXX should cause double REPL entry and thus TTY
# clobbering due to case 2. above!
await trio.to_thread.run_sync(
partial(
sync_pause,
# NOTE this already works fine since in the new
# thread the `breakpoint()` built-in is never
# overloaded, thus NO locking is used, HOWEVER
# the case 2. from above still exists!
use_builtin=True,
),
# TODO: with this `False` we can hang!??!
# abandon_on_cancel=False,
abandon_on_cancel=True,
thread_name='inline_root_bg_thread',
)
await ctx.cancel()
# TODO: case where we cancel from trio-side while asyncio task
# has debugger lock?
await p.cancel_actor()
if __name__ == '__main__':
trio.run(main)

View File

@ -1,11 +1,6 @@
import time
import trio
import tractor
from tractor import (
ActorNursery,
MsgStream,
Portal,
)
# this is the first 2 actors, streamer_1 and streamer_2
@ -17,18 +12,14 @@ async def stream_data(seed):
# this is the third actor; the aggregator
async def aggregate(seed):
'''
Ensure that the two streams we receive match but only stream
"""Ensure that the two streams we receive match but only stream
a single set of values to the parent.
'''
an: ActorNursery
async with tractor.open_nursery() as an:
portals: list[Portal] = []
"""
async with tractor.open_nursery() as nursery:
portals = []
for i in range(1, 3):
# fork/spawn call
portal = await an.start_actor(
# fork point
portal = await nursery.start_actor(
name=f'streamer_{i}',
enable_modules=[__name__],
)
@ -52,11 +43,7 @@ async def aggregate(seed):
async with trio.open_nursery() as n:
for portal in portals:
n.start_soon(
push_to_chan,
portal,
send_chan.clone(),
)
n.start_soon(push_to_chan, portal, send_chan.clone())
# close this local task's reference to send side
await send_chan.aclose()
@ -73,36 +60,26 @@ async def aggregate(seed):
print("FINISHED ITERATING in aggregator")
await an.cancel()
await nursery.cancel()
print("WAITING on `ActorNursery` to finish")
print("AGGREGATOR COMPLETE!")
async def main() -> list[int]:
'''
This is the "root" actor's main task's entrypoint.
By default (and if not otherwise specified) that root process
also acts as a "registry actor" / "registrar" on the localhost
for the purposes of multi-actor "service discovery".
'''
# yes, a nursery which spawns `trio`-"actors" B)
an: ActorNursery
# this is the main actor and *arbiter*
async def main():
# a nursery which spawns "actors"
async with tractor.open_nursery(
loglevel='cancel',
# debug_mode=True,
) as an:
arbiter_addr=('127.0.0.1', 1616)
) as nursery:
seed = int(1e3)
pre_start = time.time()
portal: Portal = await an.start_actor(
portal = await nursery.start_actor(
name='aggregator',
enable_modules=[__name__],
)
stream: MsgStream
async with portal.open_stream_from(
aggregate,
seed=seed,
@ -111,12 +88,11 @@ async def main() -> list[int]:
start = time.time()
# the portal call returns exactly what you'd expect
# as if the remote "aggregate" function was called locally
result_stream: list[int] = []
result_stream = []
async for value in stream:
result_stream.append(value)
cancelled: bool = await portal.cancel_actor()
assert cancelled
await portal.cancel_actor()
print(f"STREAM TIME = {time.time() - start}")
print(f"STREAM + SPAWN TIME = {time.time() - pre_start}")

View File

@ -13,7 +13,6 @@ import tractor
async def aio_echo_server(
to_trio: trio.MemorySendChannel,
from_trio: asyncio.Queue,
) -> None:
# a first message must be sent **from** this ``asyncio``

View File

@ -1,49 +0,0 @@
import trio
import click
import tractor
import pydantic
# from multiprocessing import shared_memory
@tractor.context
async def just_sleep(
ctx: tractor.Context,
**kwargs,
) -> None:
'''
Test a small ping-pong 2-way streaming server.
'''
await ctx.started()
await trio.sleep_forever()
async def main() -> None:
proc = await trio.open_process( (
'python',
'-c',
'import trio; trio.run(trio.sleep_forever)',
))
await proc.wait()
# await trio.sleep_forever()
# async with tractor.open_nursery() as n:
# portal = await n.start_actor(
# 'rpc_server',
# enable_modules=[__name__],
# )
# async with portal.open_context(
# just_sleep, # taken from pytest parameterization
# ) as (ctx, sent):
# await trio.sleep_forever()
if __name__ == '__main__':
import time
# time.sleep(999)
trio.run(main)

View File

@ -8,17 +8,15 @@ This uses no extra threads, fancy semaphores or futures; all we need
is ``tractor``'s channels.
"""
from contextlib import (
asynccontextmanager as acm,
aclosing,
)
from typing import Callable
from contextlib import asynccontextmanager
from typing import List, Callable
import itertools
import math
import time
import tractor
import trio
from async_generator import aclosing
PRIMES = [
@ -46,7 +44,7 @@ async def is_prime(n):
return True
@acm
@asynccontextmanager
async def worker_pool(workers=4):
"""Though it's a trivial special case for ``tractor``, the well
known "worker pool" seems to be the defacto "but, I want this
@ -73,8 +71,8 @@ async def worker_pool(workers=4):
async def _map(
worker_func: Callable[[int], bool],
sequence: list[int]
) -> list[bool]:
sequence: List[int]
) -> List[bool]:
# define an async (local) task to collect results from workers
async def send_result(func, value, portal):

View File

@ -3,18 +3,20 @@ import trio
import tractor
async def sleepy_jane() -> None:
uid: tuple = tractor.current_actor().uid
async def sleepy_jane():
uid = tractor.current_actor().uid
print(f'Yo i am actor {uid}')
await trio.sleep_forever()
async def main():
'''
Spawn a flat actor cluster, with one process per detected core.
Spawn a flat actor cluster, with one process per
detected core.
'''
portal_map: dict[str, tractor.Portal]
results: dict[str, str]
# look at this hip new syntax!
async with (
@ -23,16 +25,11 @@ async def main():
modules=[__name__]
) as portal_map,
trio.open_nursery(
strict_exception_groups=False,
) as tn,
trio.open_nursery() as n,
):
for (name, portal) in portal_map.items():
tn.start_soon(
portal.run,
sleepy_jane,
)
n.start_soon(portal.run, sleepy_jane)
await trio.sleep(0.5)
@ -44,4 +41,4 @@ if __name__ == '__main__':
try:
trio.run(main)
except KeyboardInterrupt:
print('trio cancelled by KBI')
pass

View File

@ -13,7 +13,7 @@ async def simple_rpc(
'''
# signal to parent that we're up much like
# ``trio.TaskStatus.started()``
# ``trio_typing.TaskStatus.started()``
await ctx.started(data + 1)
async with ctx.open_stream() as stream:

View File

@ -9,7 +9,7 @@ async def main(service_name):
async with tractor.open_nursery() as an:
await an.start_actor(service_name)
async with tractor.get_registry('127.0.0.1', 1616) as portal:
async with tractor.get_arbiter('127.0.0.1', 1616) as portal:
print(f"Arbiter is listening on {portal.channel}")
async with tractor.wait_for_actor(service_name) as sockaddr:

View File

@ -0,0 +1,8 @@
Adjust the `tractor._spawn.soft_wait()` strategy to avoid sending an
actor cancel request (via `Portal.cancel_actor()`) if either the child
process is detected as having terminated or the IPC channel is detected
to be closed.
This ensures (even) more deterministic inter-actor cancellation by
avoiding the timeout condition where possible when a whild never
sucessfully spawned, crashed, or became un-contactable over IPC.

View File

@ -0,0 +1,3 @@
Add an experimental ``tractor.msg.NamespacePath`` type for passing Python
objects by "reference" through a ``str``-subtype message and using the
new ``pkgutil.resolve_name()`` for reference loading.

View File

@ -0,0 +1,2 @@
Add a new `tractor.experimental` subpackage for staging new high level
APIs and subystems that we might eventually make built-ins.

View File

@ -0,0 +1,3 @@
Update to and pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0)
both of which required adjustments for backwards imcompatible API
tweaks.

View File

@ -0,0 +1,4 @@
Fence off ``multiprocessing`` imports until absolutely necessary in an
effort to avoid "resource tracker" spawning side effects that seem to
have varying degrees of unreliability per Python release. Port to new
``msgspec.DecodeError``.

View File

@ -0,0 +1,12 @@
Add a new ``to_asyncio.LinkedTaskChannel.subscribe()`` which gives
task-oriented broadcast functionality semantically equivalent to
``tractor.MsgStream.subscribe()`` this makes it possible for multiple
``trio``-side tasks to consume ``asyncio``-side task msgs in tandem.
Further Improvements to the test suite were added in this patch set
including a new scenario test for a sub-actor managed "service nursery"
(implementing the basics of a "service manager") including use of
*infected asyncio* mode. Further we added a lower level
``test_trioisms.py`` to start to track issues we need to work around in
``trio`` itself which in this case included a bug we were trying to
solve related to https://github.com/python-trio/trio/issues/2258.

View File

@ -1,16 +0,0 @@
Strictly support Python 3.10+, start runtime machinery reorg
Since we want to push forward using the new `match:` syntax for our
internal RPC-msg loops, we officially drop 3.9 support for the next
release which should coincide well with the first release of 3.11.
This patch set also officially removes the ``tractor.run()`` API (which
has been deprecated for some time) as well as starts an initial re-org
of the internal runtime core by:
- renaming ``tractor._actor`` -> ``._runtime``
- moving the ``._runtime.ActorActor._process_messages()`` and
``._async_main()`` to be module level singleton-task-functions since
they are only started once for each connection and actor spawn
respectively; this internal API thus looks more similar to (at the
time of writing) the ``trio``-internals in ``trio._core._run``.
- officially remove ``tractor.run()``, now deprecated for some time.

View File

@ -1,4 +0,0 @@
Only set `._debug.Lock.local_pdb_complete` if has been created.
This can be triggered by a very rare race condition (and thus we have no
working test yet) but it is known to exist in (a) consumer project(s).

View File

@ -1,25 +0,0 @@
Add support for ``trio >= 0.22`` and support for the new Python 3.11
``[Base]ExceptionGroup`` from `pep 654`_ via the backported
`exceptiongroup`_ package and some final fixes to the debug mode
subsystem.
This port ended up driving some (hopefully) final fixes to our debugger
subsystem including the solution to all lingering stdstreams locking
race-conditions and deadlock scenarios. This includes extending the
debugger tests suite as well as cancellation and ``asyncio`` mode cases.
Some of the notable details:
- always reverting to the ``trio`` SIGINT handler when leaving debug
mode.
- bypassing child attempts to acquire the debug lock when detected
to be amdist actor-runtime-cancellation.
- allowing the root actor to cancel local but IPC-stale subactor
requests-tasks for the debug lock when in a "no IPC peers" state.
Further we refined our ``ActorNursery`` semantics to be more similar to
``trio`` in the sense that parent task errors are always packed into the
actor-nursery emitted exception group and adjusted all tests and
examples accordingly.
.. _pep 654: https://peps.python.org/pep-0654/#handling-exception-groups
.. _exceptiongroup: https://github.com/python-trio/exceptiongroup

View File

@ -1,5 +0,0 @@
Establish an explicit "backend spawning" method table; use it from CI
More clearly lays out the current set of (3) backends: ``['trio',
'mp_spawn', 'mp_forkserver']`` and adjusts the ``._spawn.py`` internals
as well as the test suite to accommodate.

View File

@ -1,4 +0,0 @@
Add ``key: Callable[..., Hashable]`` support to ``.trionics.maybe_open_context()``
Gives users finer grained control over cache hit behaviour using
a callable which receives the input ``kwargs: dict``.

View File

@ -1,41 +0,0 @@
Add support for debug-lock blocking using a ``._debug.Lock._blocked:
set[tuple]`` and add ids when no-more IPC connections with the
root actor are detected.
This is an enhancement which (mostly) solves a lingering debugger
locking race case we needed to handle:
- child crashes acquires TTY lock in root and attaches to ``pdb``
- child IPC goes down such that all channels to the root are broken
/ non-functional.
- root is stuck thinking the child is still in debug even though it
can't be contacted and the child actor machinery hasn't been
cancelled by its parent.
- root get's stuck in deadlock with child since it won't send a cancel
request until the child is finished debugging (to avoid clobbering
a child that is actually using the debugger), but the child can't
unlock the debugger bc IPC is down and it can't contact the root.
To avoid this scenario add debug lock blocking list via
`._debug.Lock._blocked: set[tuple]` which holds actor uids for any actor
that is detected by the root as having no transport channel connections
(of which at least one should exist if this sub-actor at some point
acquired the debug lock). The root consequently checks this list for any
actor that tries to (re)acquire the lock and blocks with
a ``ContextCancelled``. Further, when a debug condition is tested in
``._runtime._invoke``, the context's ``._enter_debugger_on_cancel`` is
set to `False` if the actor was put on the block list then all
post-mortem / crash handling will be bypassed for that task.
In theory this approach to block list management may cause problems
where some nested child actor acquires and releases the lock multiple
times and it gets stuck on the block list after the first use? If this
turns out to be an issue we can try changing the strat so blocks are
only added when the root has zero IPC peers left?
Further, this adds a root-locking-task side cancel scope,
``Lock._root_local_task_cs_in_debug``, which can be ``.cancel()``-ed by the root
runtime when a stale lock is detected during the IPC channel testing.
However, right now we're NOT using this since it seems to cause test
failures likely due to causing pre-mature cancellation and maybe needs
a bit more experimenting?

View File

@ -1,19 +0,0 @@
Rework our ``.trionics.BroadcastReceiver`` internals to avoid method
recursion and approach a design and interface closer to ``trio``'s
``MemoryReceiveChannel``.
The details of the internal changes include:
- implementing a ``BroadcastReceiver.receive_nowait()`` and using it
within the async ``.receive()`` thus avoiding recursion from
``.receive()``.
- failing over to an internal ``._receive_from_underlying()`` when the
``_nowait()`` call raises ``trio.WouldBlock``
- adding ``BroadcastState.statistics()`` for debugging and testing both
internals and by users.
- add an internal ``BroadcastReceiver._raise_on_lag: bool`` which can be
set to avoid ``Lagged`` raising for possible use cases where a user
wants to choose between a [cheap or nasty
pattern](https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern)
the the particular stream (we use this in ``piker``'s dark clearing
engine to avoid fast feeds breaking during HFT periods).

View File

@ -1,11 +0,0 @@
Always ``list``-cast the ``mngrs`` input to
``.trionics.gather_contexts()`` and ensure its size otherwise raise
a ``ValueError``.
Turns out that trying to pass an inline-style generator comprehension
doesn't seem to work inside the ``async with`` expression? Further, in
such a case we can get a hang waiting on the all-entered event
completion when the internal mngrs iteration is a noop. Instead we
always greedily check a size and error on empty input; the lazy
iteration of a generator input is not beneficial anyway since we're
entering all manager instances in concurrent tasks.

View File

@ -1,15 +0,0 @@
Fixes to ensure IPC (channel) breakage doesn't result in hung actor
trees; the zombie reaping and general supervision machinery will always
clean up and terminate.
This includes not only the (mostly minor) fixes to solve these cases but
also a new extensive test suite in `test_advanced_faults.py` with an
accompanying highly configurable example module-script in
`examples/advanced_faults/ipc_failure_during_stream.py`. Tests ensure we
never get hang or zombies despite operating in debug mode and attempt to
simulate all possible IPC transport failure cases for a local-host actor
tree.
Further we simplify `Context.open_stream.__aexit__()` to just call
`MsgStream.aclose()` directly more or less avoiding a pure duplicate
code path.

View File

@ -1,10 +0,0 @@
Always redraw the `pdbpp` prompt on `SIGINT` during REPL use.
There was recent changes todo with Python 3.10 that required us to pin
to a specific commit in `pdbpp` which have recently been fixed minus
this last issue with `SIGINT` shielding: not clobbering or not
showing the `(Pdb++)` prompt on ctlr-c by the user. This repairs all
that by firstly removing the standard KBI intercepting of the std lib's
`pdb.Pdb._cmdloop()` as well as ensuring that only the actor with REPL
control ever reports `SIGINT` handler log msgs and prompt redraws. With
this we move back to using pypi `pdbpp` release.

View File

@ -1,7 +0,0 @@
Drop `trio.Process.aclose()` usage, copy into our spawning code.
The details are laid out in https://github.com/goodboy/tractor/issues/330.
`trio` changed is process running quite some time ago, this just copies
out the small bit we needed (from the old `.aclose()`) for hard kills
where a soft runtime cancel request fails and our "zombie killer"
implementation kicks in.

View File

@ -1,15 +0,0 @@
Switch to using the fork & fix of `pdb++`, `pdbp`:
https://github.com/mdmintz/pdbp
Allows us to sidestep a variety of issues that aren't being maintained
in the upstream project thanks to the hard work of @mdmintz!
We also include some default settings adjustments as per recent
development on the fork:
- sticky mode is still turned on by default but now activates when
a using the `ll` repl command.
- turn off line truncation by default to avoid inter-line gaps when
resizing the terimnal during use.
- when using the backtrace cmd either by `w` or `bt`, the config
automatically switches to non-sticky mode.

View File

@ -1,18 +0,0 @@
First generate a built disti:
```
python -m pip install --upgrade build
python -m build --sdist --outdir dist/alpha5/
```
Then try a test ``pypi`` upload:
```
python -m twine upload --repository testpypi dist/alpha5/*
```
The push to `pypi` for realz.
```
python -m twine upload --repository testpypi dist/alpha5/*
```

View File

@ -1,158 +0,0 @@
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
# ------ build-system ------
[project]
name = "tractor"
version = "0.1.0a6dev0"
description = 'structured concurrent `trio`-"actors"'
authors = [{ name = "Tyler Goodlet", email = "goodboy_foss@protonmail.com" }]
requires-python = ">= 3.11"
readme = "docs/README.rst"
license = "AGPL-3.0-or-later"
keywords = [
"trio",
"async",
"concurrency",
"structured concurrency",
"actor model",
"distributed",
"multiprocessing",
]
classifiers = [
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"Framework :: Trio",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.11",
"Topic :: System :: Distributed Computing",
]
dependencies = [
# trio runtime and friends
# (poetry) proper range specs,
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5
# TODO, for 3.13 we must go go `0.27` which means we have to
# disable strict egs or port to handling them internally!
"trio>0.27",
"tricycle>=0.4.1,<0.5",
"wrapt>=1.16.0,<2",
"colorlog>=6.8.2,<7",
# built-in multi-actor `pdb` REPL
"pdbp>=1.6,<2", # windows only (from `pdbp`)
# typed IPC msging
"msgspec>=0.19.0",
]
# ------ project ------
[dependency-groups]
dev = [
# test suite
# TODO: maybe some of these layout choices?
# https://docs.pytest.org/en/8.0.x/explanation/goodpractices.html#choosing-a-test-layout-import-rules
"pytest>=8.3.5",
"pexpect>=4.9.0,<5",
# `tractor.devx` tooling
"greenback>=1.2.1,<2",
"stackscope>=0.2.2,<0.3",
"pyperclip>=1.9.0",
"prompt-toolkit>=3.0.50",
"xonsh>=0.19.2",
]
# TODO, add these with sane versions; were originally in
# `requirements-docs.txt`..
# docs = [
# "sphinx>="
# "sphinx_book_theme>="
# ]
# ------ dependency-groups ------
# ------ dependency-groups ------
[tool.uv.sources]
# XXX NOTE, only for @goodboy's hacking on `pprint(sort_dicts=False)`
# for the `pp` alias..
# pdbp = { path = "../pdbp", editable = true }
# ------ tool.uv.sources ------
# TODO, distributed (multi-host) extensions
# linux kernel networking
# 'pyroute2
# ------ tool.uv.sources ------
[tool.uv]
# XXX NOTE, prefer the sys python bc apparently the distis from
# `astral` are built in a way that breaks `pdbp`+`tabcompleter`'s
# likely due to linking against `libedit` over `readline`..
# |_https://docs.astral.sh/uv/concepts/python-versions/#managed-python-distributions
# |_https://gregoryszorc.com/docs/python-build-standalone/main/quirks.html#use-of-libedit-on-linux
#
# https://docs.astral.sh/uv/reference/settings/#python-preference
python-preference = 'system'
# ------ tool.uv ------
[tool.hatch.build.targets.sdist]
include = ["tractor"]
[tool.hatch.build.targets.wheel]
include = ["tractor"]
# ------ tool.hatch ------
[tool.towncrier]
package = "tractor"
filename = "NEWS.rst"
directory = "nooz/"
version = "0.1.0a6"
title_format = "tractor {version} ({project_date})"
template = "nooz/_template.rst"
all_bullets = true
[[tool.towncrier.type]]
directory = "feature"
name = "Features"
showcontent = true
[[tool.towncrier.type]]
directory = "bugfix"
name = "Bug Fixes"
showcontent = true
[[tool.towncrier.type]]
directory = "doc"
name = "Improved Documentation"
showcontent = true
[[tool.towncrier.type]]
directory = "trivial"
name = "Trivial/Internal Changes"
showcontent = true
# ------ tool.towncrier ------
[tool.pytest.ini_options]
minversion = '6.0'
testpaths = [
'tests'
]
addopts = [
# TODO: figure out why this isn't working..
'--rootdir=./tests',
'--import-mode=importlib',
# don't show frickin captured logs AGAIN in the report..
'--show-capture=no',
]
log_cli = false
# TODO: maybe some of these layout choices?
# https://docs.pytest.org/en/8.0.x/explanation/goodpractices.html#choosing-a-test-layout-import-rules
# pythonpath = "src"
# ------ tool.pytest ------

View File

@ -1,8 +0,0 @@
# vim: ft=ini
# pytest.ini for tractor
[pytest]
# don't show frickin captured logs AGAIN in the report..
addopts = --show-capture='no'
log_cli = false
; minversion = 6.0

View File

@ -0,0 +1,2 @@
sphinx
sphinx_book_theme

View File

@ -0,0 +1,7 @@
pytest
pytest-trio
pdbpp
mypy<0.920
trio_typing<0.7.0
pexpect
towncrier

View File

@ -1,82 +0,0 @@
# from default `ruff.toml` @
# https://docs.astral.sh/ruff/configuration/
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".ipynb_checkpoints",
".mypy_cache",
".nox",
".pants.d",
".pyenv",
".pytest_cache",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
".vscode",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"site-packages",
"venv",
]
# Same as Black.
line-length = 88
indent-width = 4
# Assume Python 3.9
target-version = "py311"
[lint]
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
# McCabe complexity (`C901`) by default.
select = ["E4", "E7", "E9", "F"]
ignore = [
'E402', # https://docs.astral.sh/ruff/rules/module-import-not-at-top-of-file/
]
# Allow fix for all enabled rules (when `--fix`) is provided.
fixable = ["ALL"]
unfixable = []
# Allow unused variables when underscore-prefixed.
# dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
[format]
# Use single quotes in `ruff format`.
quote-style = "single"
# Like Black, indent with spaces, rather than tabs.
indent-style = "space"
# Like Black, respect magic trailing commas.
skip-magic-trailing-comma = false
# Like Black, automatically detect the appropriate line ending.
line-ending = "auto"
# Enable auto-formatting of code examples in docstrings. Markdown,
# reStructuredText code/literal blocks and doctests are all supported.
#
# This is currently disabled by default, but it is planned for this
# to be opt-out in the future.
docstring-code-format = false
# Set the line length limit used when formatting code snippets in
# docstrings.
#
# This only has an effect when the `docstring-code-format` setting is
# enabled.
docstring-code-line-length = "dynamic"

93
setup.py 100755
View File

@ -0,0 +1,93 @@
#!/usr/bin/env python
#
# tractor: structured concurrent "actors".
#
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from setuptools import setup
with open('docs/README.rst', encoding='utf-8') as f:
readme = f.read()
setup(
name="tractor",
version='0.1.0a5.dev', # alpha zone
description='structured concurrrent "actors"',
long_description=readme,
license='AGPLv3',
author='Tyler Goodlet',
maintainer='Tyler Goodlet',
maintainer_email='jgbt@protonmail.com',
url='https://github.com/goodboy/tractor',
platforms=['linux', 'windows'],
packages=[
'tractor',
'tractor.experimental',
'tractor.trionics',
'tractor.testing',
],
install_requires=[
# trio related
'trio>0.8',
'async_generator',
'trio_typing',
# tooling
'tricycle',
'trio_typing',
# tooling
'colorlog',
'wrapt',
'pdbpp',
# serialization
'msgpack>=1.0.3',
],
extras_require={
# serialization
'msgspec': ['msgspec >= "0.4.0"'],
},
tests_require=['pytest'],
python_requires=">=3.9",
keywords=[
'trio',
"async",
"concurrency",
"actor model",
"distributed",
'multiprocessing'
],
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Framework :: Trio",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: System :: Distributed Computing",
],
)

View File

View File

@ -11,14 +11,14 @@ import time
import pytest
import tractor
from tractor._testing import (
examples_dir as examples_dir,
tractor_test as tractor_test,
expect_ctxc as expect_ctxc,
)
# TODO: include wtv plugin(s) we build in `._testing.pytest`?
# export for tests
from tractor.testing import tractor_test # noqa
pytest_plugins = ['pytester']
_arb_addr = '127.0.0.1', random.randint(1000, 9999)
# Sending signal.SIGINT on subprocess fails on windows. Use CTRL_* alternatives
if platform.system() == 'Windows':
@ -39,46 +39,36 @@ no_windows = pytest.mark.skipif(
)
def repodir():
"""Return the abspath to the repo directory.
"""
dirname = os.path.dirname
dirpath = os.path.abspath(
dirname(dirname(os.path.realpath(__file__)))
)
return dirpath
def pytest_addoption(parser):
parser.addoption(
"--ll",
action="store",
dest='loglevel',
"--ll", action="store", dest='loglevel',
default='ERROR', help="logging level to set when testing"
)
parser.addoption(
"--spawn-backend",
action="store",
dest='spawn_backend',
"--spawn-backend", action="store", dest='spawn_backend',
default='trio',
help="Processing spawning backend to use for test run",
)
parser.addoption(
"--tpdb", "--debug-mode",
action="store_true",
dest='tractor_debug_mode',
# default=False,
help=(
'Enable a flag that can be used by tests to to set the '
'`debug_mode: bool` for engaging the internal '
'multi-proc debugger sys.'
),
)
def pytest_configure(config):
backend = config.option.spawn_backend
tractor._spawn.try_set_start_method(backend)
@pytest.fixture(scope='session')
def debug_mode(request):
debug_mode: bool = request.config.option.tractor_debug_mode
# if debug_mode:
# breakpoint()
return debug_mode
if backend == 'mp':
tractor._spawn.try_set_start_method('spawn')
elif backend == 'trio':
tractor._spawn.try_set_start_method(backend)
@pytest.fixture(scope='session', autouse=True)
@ -91,84 +81,42 @@ def loglevel(request):
@pytest.fixture(scope='session')
def spawn_backend(request) -> str:
def spawn_backend(request):
return request.config.option.spawn_backend
# @pytest.fixture(scope='function', autouse=True)
# def debug_enabled(request) -> str:
# from tractor import _state
# if _state._runtime_vars['_debug_mode']:
# breakpoint()
_ci_env: bool = os.environ.get('CI', False)
@pytest.fixture(scope='session')
def ci_env() -> bool:
'''
Detect CI envoirment.
'''
return _ci_env
# TODO: also move this to `._testing` for now?
# -[ ] possibly generalize and re-use for multi-tree spawning
# along with the new stuff for multi-addrs in distribute_dis
# branch?
#
# choose randomly at import time
_reg_addr: tuple[str, int] = (
'127.0.0.1',
random.randint(1000, 9999),
)
"""Detect CI envoirment.
"""
return os.environ.get('TRAVIS', False) or os.environ.get('CI', False)
@pytest.fixture(scope='session')
def reg_addr() -> tuple[str, int]:
# globally override the runtime to the per-test-session-dynamic
# addr so that all tests never conflict with any other actor
# tree using the default.
from tractor import _root
_root._default_lo_addrs = [_reg_addr]
return _reg_addr
def arb_addr():
return _arb_addr
def pytest_generate_tests(metafunc):
spawn_backend = metafunc.config.option.spawn_backend
if not spawn_backend:
# XXX some weird windows bug with `pytest`?
spawn_backend = 'trio'
spawn_backend = 'mp'
assert spawn_backend in ('mp', 'trio')
# TODO: maybe just use the literal `._spawn.SpawnMethodKey`?
assert spawn_backend in (
'mp_spawn',
'mp_forkserver',
'trio',
)
# NOTE: used to be used to dyanmically parametrize tests for when
# you just passed --spawn-backend=`mp` on the cli, but now we expect
# that cli input to be manually specified, BUT, maybe we'll do
# something like this again in the future?
if 'start_method' in metafunc.fixturenames:
metafunc.parametrize("start_method", [spawn_backend], scope='module')
if spawn_backend == 'mp':
from multiprocessing import get_all_start_methods
methods = get_all_start_methods()
if 'fork' in methods:
# fork not available on windows, so check before
# removing XXX: the fork method is in general
# incompatible with trio's global scheduler state
methods.remove('fork')
elif spawn_backend == 'trio':
methods = ['trio']
# TODO: a way to let test scripts (like from `examples/`)
# guarantee they won't registry addr collide!
# @pytest.fixture
# def open_test_runtime(
# reg_addr: tuple,
# ) -> AsyncContextManager:
# return partial(
# tractor.open_nursery,
# registry_addrs=[reg_addr],
# )
metafunc.parametrize("start_method", methods, scope='module')
def sig_prog(proc, sig):
@ -183,40 +131,28 @@ def sig_prog(proc, sig):
assert ret
# TODO: factor into @cm and move to `._testing`?
@pytest.fixture
def daemon(
loglevel: str,
testdir,
reg_addr: tuple[str, int],
):
'''
Run a daemon root actor as a separate actor-process tree and
"remote registrar" for discovery-protocol related tests.
'''
def daemon(loglevel, testdir, arb_addr):
"""Run a daemon actor as a "remote arbiter".
"""
if loglevel in ('trace', 'debug'):
# XXX: too much logging will lock up the subproc (smh)
loglevel: str = 'info'
# too much logging will lock up the subproc (smh)
loglevel = 'info'
code: str = (
"import tractor; "
"tractor.run_daemon([], registry_addrs={reg_addrs}, loglevel={ll})"
).format(
reg_addrs=str([reg_addr]),
ll="'{}'".format(loglevel) if loglevel else None,
)
cmd: list[str] = [
sys.executable,
'-c', code,
cmdargs = [
sys.executable, '-c',
"import tractor; tractor.run_daemon([], arbiter_addr={}, loglevel={})"
.format(
arb_addr,
"'{}'".format(loglevel) if loglevel else None)
]
kwargs = {}
kwargs = dict()
if platform.system() == 'Windows':
# without this, tests hang on windows forever
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
proc = testdir.popen(
cmd,
cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs,

View File

View File

@ -1,243 +0,0 @@
'''
`tractor.devx.*` tooling sub-pkg test space.
'''
import time
from typing import (
Callable,
)
import pytest
from pexpect.exceptions import (
TIMEOUT,
)
from pexpect.spawnbase import SpawnBase
from tractor._testing import (
mk_cmd,
)
from tractor.devx._debug import (
_pause_msg as _pause_msg,
_crash_msg as _crash_msg,
_repl_fail_msg as _repl_fail_msg,
_ctlc_ignore_header as _ctlc_ignore_header,
)
from ..conftest import (
_ci_env,
)
@pytest.fixture
def spawn(
start_method,
testdir: pytest.Pytester,
reg_addr: tuple[str, int],
) -> Callable[[str], None]:
'''
Use the `pexpect` module shipped via `testdir.spawn()` to
run an `./examples/..` script by name.
'''
if start_method != 'trio':
pytest.skip(
'`pexpect` based tests only supported on `trio` backend'
)
def unset_colors():
'''
Python 3.13 introduced colored tracebacks that break patt
matching,
https://docs.python.org/3/using/cmdline.html#envvar-PYTHON_COLORS
https://docs.python.org/3/using/cmdline.html#using-on-controlling-color
'''
import os
os.environ['PYTHON_COLORS'] = '0'
def _spawn(
cmd: str,
**mkcmd_kwargs,
):
unset_colors()
return testdir.spawn(
cmd=mk_cmd(
cmd,
**mkcmd_kwargs,
),
expect_timeout=3,
# preexec_fn=unset_colors,
# ^TODO? get `pytest` core to expose underlying
# `pexpect.spawn()` stuff?
)
# such that test-dep can pass input script name.
return _spawn
@pytest.fixture(
params=[False, True],
ids='ctl-c={}'.format,
)
def ctlc(
request,
ci_env: bool,
) -> bool:
use_ctlc = request.param
node = request.node
markers = node.own_markers
for mark in markers:
if mark.name == 'has_nested_actors':
pytest.skip(
f'Test {node} has nested actors and fails with Ctrl-C.\n'
f'The test can sometimes run fine locally but until'
' we solve' 'this issue this CI test will be xfail:\n'
'https://github.com/goodboy/tractor/issues/320'
)
if mark.name == 'ctlcs_bish':
pytest.skip(
f'Test {node} prolly uses something from the stdlib (namely `asyncio`..)\n'
f'The test and/or underlying example script can *sometimes* run fine '
f'locally but more then likely until the cpython peeps get their sh#$ together, '
f'this test will definitely not behave like `trio` under SIGINT..\n'
)
if use_ctlc:
# XXX: disable pygments highlighting for auto-tests
# since some envs (like actions CI) will struggle
# the the added color-char encoding..
from tractor.devx._debug import TractorConfig
TractorConfig.use_pygements = False
yield use_ctlc
def expect(
child,
# normally a `pdb` prompt by default
patt: str,
**kwargs,
) -> None:
'''
Expect wrapper that prints last seen console
data before failing.
'''
try:
child.expect(
patt,
**kwargs,
)
except TIMEOUT:
before = str(child.before.decode())
print(before)
raise
PROMPT = r"\(Pdb\+\)"
def in_prompt_msg(
child: SpawnBase,
parts: list[str],
pause_on_false: bool = False,
err_on_false: bool = False,
print_prompt_on_false: bool = True,
) -> bool:
'''
Predicate check if (the prompt's) std-streams output has all
`str`-parts in it.
Can be used in test asserts for bulk matching expected
log/REPL output for a given `pdb` interact point.
'''
__tracebackhide__: bool = False
before: str = str(child.before.decode())
for part in parts:
if part not in before:
if pause_on_false:
import pdbp
pdbp.set_trace()
if print_prompt_on_false:
print(before)
if err_on_false:
raise ValueError(
f'Could not find pattern in `before` output?\n'
f'part: {part!r}\n'
)
return False
return True
# TODO: todo support terminal color-chars stripping so we can match
# against call stack frame output from the the 'll' command the like!
# -[ ] SO answer for stipping ANSI codes: https://stackoverflow.com/a/14693789
def assert_before(
child: SpawnBase,
patts: list[str],
**kwargs,
) -> None:
__tracebackhide__: bool = False
assert in_prompt_msg(
child=child,
parts=patts,
# since this is an "assert" helper ;)
err_on_false=True,
**kwargs
)
def do_ctlc(
child,
count: int = 3,
delay: float = 0.1,
patt: str|None = None,
# expect repl UX to reprint the prompt after every
# ctrl-c send.
# XXX: no idea but, in CI this never seems to work even on 3.10 so
# needs some further investigation potentially...
expect_prompt: bool = not _ci_env,
) -> str|None:
before: str|None = None
# make sure ctl-c sends don't do anything but repeat output
for _ in range(count):
time.sleep(delay)
child.sendcontrol('c')
# TODO: figure out why this makes CI fail..
# if you run this test manually it works just fine..
if expect_prompt:
time.sleep(delay)
child.expect(PROMPT)
before = str(child.before.decode())
time.sleep(delay)
if patt:
# should see the last line on console
assert patt in before
# return the console content up to the final prompt
return before

File diff suppressed because it is too large Load Diff

View File

@ -1,381 +0,0 @@
'''
That "foreign loop/thread" debug REPL support better ALSO WORK!
Same as `test_native_pause.py`.
All these tests can be understood (somewhat) by running the
equivalent `examples/debugging/` scripts manually.
'''
from contextlib import (
contextmanager as cm,
)
# from functools import partial
# import itertools
import time
# from typing import (
# Iterator,
# )
import pytest
from pexpect.exceptions import (
TIMEOUT,
EOF,
)
from .conftest import (
# _ci_env,
do_ctlc,
PROMPT,
# expect,
in_prompt_msg,
assert_before,
_pause_msg,
_crash_msg,
_ctlc_ignore_header,
# _repl_fail_msg,
)
@cm
def maybe_expect_timeout(
ctlc: bool = False,
) -> None:
try:
yield
except TIMEOUT:
# breakpoint()
if ctlc:
pytest.xfail(
'Some kinda redic threading SIGINT bug i think?\n'
'See the notes in `examples/debugging/sync_bp.py`..\n'
)
raise
@pytest.mark.ctlcs_bish
def test_pause_from_sync(
spawn,
ctlc: bool,
):
'''
Verify we can use the `pdbp` REPL from sync functions AND from
any thread spawned with `trio.to_thread.run_sync()`.
`examples/debugging/sync_bp.py`
'''
child = spawn('sync_bp')
# first `sync_pause()` after nurseries open
child.expect(PROMPT)
assert_before(
child,
[
# pre-prompt line
_pause_msg,
"<Task '__main__.main'",
"('root'",
]
)
if ctlc:
do_ctlc(child)
# ^NOTE^ subactor not spawned yet; don't need extra delay.
child.sendline('c')
# first `await tractor.pause()` inside `p.open_context()` body
child.expect(PROMPT)
# XXX shouldn't see gb loaded message with PDB loglevel!
# assert not in_prompt_msg(
# child,
# ['`greenback` portal opened!'],
# )
# should be same root task
assert_before(
child,
[
_pause_msg,
"<Task '__main__.main'",
"('root'",
]
)
if ctlc:
do_ctlc(
child,
# NOTE: setting this to 0 (or some other sufficient
# small val) can cause the test to fail since the
# `subactor` suffers a race where the root/parent
# sends an actor-cancel prior to it hitting its pause
# point; by def the value is 0.1
delay=0.4,
)
# XXX, fwiw without a brief sleep here the SIGINT might actually
# trigger "subactor" cancellation by its parent before the
# shield-handler is engaged.
#
# => similar to the `delay` input to `do_ctlc()` below, setting
# this too low can cause the test to fail since the `subactor`
# suffers a race where the root/parent sends an actor-cancel
# prior to the context task hitting its pause point (and thus
# engaging the `sigint_shield()` handler in time); this value
# seems be good enuf?
time.sleep(0.6)
# one of the bg thread or subactor should have
# `Lock.acquire()`-ed
# (NOT both, which will result in REPL clobbering!)
attach_patts: dict[str, list[str]] = {
'subactor': [
"'start_n_sync_pause'",
"('subactor'",
],
'inline_root_bg_thread': [
"<Thread(inline_root_bg_thread",
"('root'",
],
'start_soon_root_bg_thread': [
"<Thread(start_soon_root_bg_thread",
"('root'",
],
}
conts: int = 0 # for debugging below matching logic on failure
while attach_patts:
child.sendline('c')
conts += 1
child.expect(PROMPT)
before = str(child.before.decode())
for key in attach_patts:
if key in before:
attach_key: str = key
expected_patts: str = attach_patts.pop(key)
assert_before(
child,
[_pause_msg]
+
expected_patts
)
break
else:
pytest.fail(
f'No keys found?\n\n'
f'{attach_patts.keys()}\n\n'
f'{before}\n'
)
# ensure no other task/threads engaged a REPL
# at the same time as the one that was detected above.
for key, other_patts in attach_patts.copy().items():
assert not in_prompt_msg(
child,
other_patts,
)
if ctlc:
do_ctlc(
child,
patt=attach_key,
# NOTE same as comment above
delay=0.4,
)
child.sendline('c')
# XXX TODO, weird threading bug it seems despite the
# `abandon_on_cancel: bool` setting to
# `trio.to_thread.run_sync()`..
with maybe_expect_timeout(
ctlc=ctlc,
):
child.expect(EOF)
def expect_any_of(
attach_patts: dict[str, list[str]],
child, # what type?
ctlc: bool = False,
prompt: str = _ctlc_ignore_header,
ctlc_delay: float = .4,
) -> list[str]:
'''
Receive any of a `list[str]` of patterns provided in
`attach_patts`.
Used to test racing prompts from multiple actors and/or
tasks using a common root process' `pdbp` REPL.
'''
assert attach_patts
child.expect(PROMPT)
before = str(child.before.decode())
for attach_key in attach_patts:
if attach_key in before:
expected_patts: str = attach_patts.pop(attach_key)
assert_before(
child,
expected_patts
)
break # from for
else:
pytest.fail(
f'No keys found?\n\n'
f'{attach_patts.keys()}\n\n'
f'{before}\n'
)
# ensure no other task/threads engaged a REPL
# at the same time as the one that was detected above.
for key, other_patts in attach_patts.copy().items():
assert not in_prompt_msg(
child,
other_patts,
)
if ctlc:
do_ctlc(
child,
patt=prompt,
# NOTE same as comment above
delay=ctlc_delay,
)
return expected_patts
@pytest.mark.ctlcs_bish
def test_sync_pause_from_aio_task(
spawn,
ctlc: bool
# ^TODO, fix for `asyncio`!!
):
'''
Verify we can use the `pdbp` REPL from an `asyncio.Task` spawned using
APIs in `.to_asyncio`.
`examples/debugging/asycio_bp.py`
'''
child = spawn('asyncio_bp')
# RACE on whether trio/asyncio task bps first
attach_patts: dict[str, list[str]] = {
# first pause in guest-mode (aka "infecting")
# `trio.Task`.
'trio-side': [
_pause_msg,
"<Task 'trio_ctx'",
"('aio_daemon'",
],
# `breakpoint()` from `asyncio.Task`.
'asyncio-side': [
_pause_msg,
"<Task pending name='Task-2' coro=<greenback_shim()",
"('aio_daemon'",
],
}
while attach_patts:
expect_any_of(
attach_patts=attach_patts,
child=child,
ctlc=ctlc,
)
child.sendline('c')
# NOW in race order,
# - the asyncio-task will error
# - the root-actor parent task will pause
#
attach_patts: dict[str, list[str]] = {
# error raised in `asyncio.Task`
"raise ValueError('asyncio side error!')": [
_crash_msg,
"<Task 'trio_ctx'",
"@ ('aio_daemon'",
"ValueError: asyncio side error!",
# XXX, we no longer show this frame by default!
# 'return await chan.receive()', # `.to_asyncio` impl internals in tb
],
# parent-side propagation via actor-nursery/portal
# "tractor._exceptions.RemoteActorError: remote task raised a 'ValueError'": [
"remote task raised a 'ValueError'": [
_crash_msg,
"src_uid=('aio_daemon'",
"('aio_daemon'",
],
# a final pause in root-actor
"<Task '__main__.main'": [
_pause_msg,
"<Task '__main__.main'",
"('root'",
],
}
while attach_patts:
expect_any_of(
attach_patts=attach_patts,
child=child,
ctlc=ctlc,
)
child.sendline('c')
assert not attach_patts
# final boxed error propagates to root
assert_before(
child,
[
_crash_msg,
"<Task '__main__.main'",
"('root'",
"remote task raised a 'ValueError'",
"ValueError: asyncio side error!",
]
)
if ctlc:
do_ctlc(
child,
# NOTE: setting this to 0 (or some other sufficient
# small val) can cause the test to fail since the
# `subactor` suffers a race where the root/parent
# sends an actor-cancel prior to it hitting its pause
# point; by def the value is 0.1
delay=0.4,
)
child.sendline('c')
# with maybe_expect_timeout():
child.expect(EOF)
def test_sync_pause_from_non_greenbacked_aio_task():
'''
Where the `breakpoint()` caller task is NOT spawned by
`tractor.to_asyncio` and thus never activates
a `greenback.ensure_portal()` beforehand, presumably bc the task
was started by some lib/dep as in often seen in the field.
Ensure sync pausing works when the pause is in,
- the root actor running in infected-mode?
|_ since we don't need any IPC to acquire the debug lock?
|_ is there some way to handle this like the non-main-thread case?
All other cases need to error out appropriately right?
- for any subactor we can't avoid needing the repl lock..
|_ is there a way to hook into `asyncio.ensure_future(obj)`?
'''
pass

View File

@ -1,172 +0,0 @@
'''
That "native" runtime-hackin toolset better be dang useful!
Verify the funtion of a variety of "developer-experience" tools we
offer from the `.devx` sub-pkg:
- use of the lovely `stackscope` for dumping actor `trio`-task trees
during operation and hangs.
TODO:
- demonstration of `CallerInfo` call stack frame filtering such that
for logging and REPL purposes a user sees exactly the layers needed
when debugging a problem inside the stack vs. in their app.
'''
import os
import signal
import time
from .conftest import (
expect,
assert_before,
in_prompt_msg,
PROMPT,
_pause_msg,
)
from pexpect.exceptions import (
# TIMEOUT,
EOF,
)
def test_shield_pause(
spawn,
):
'''
Verify the `tractor.pause()/.post_mortem()` API works inside an
already cancelled `trio.CancelScope` and that you can step to the
next checkpoint wherein the cancelled will get raised.
'''
child = spawn(
'shield_hang_in_sub'
)
expect(
child,
'Yo my child hanging..?',
)
assert_before(
child,
[
'Entering shield sleep..',
'Enabling trace-trees on `SIGUSR1` since `stackscope` is installed @',
]
)
script_pid: int = child.pid
print(
f'Sending SIGUSR1 to {script_pid}\n'
f'(kill -s SIGUSR1 {script_pid})\n'
)
os.kill(
script_pid,
signal.SIGUSR1,
)
time.sleep(0.2)
expect(
child,
# end-of-tree delimiter
"end-of-\('root'",
)
assert_before(
child,
[
# 'Srying to dump `stackscope` tree..',
# 'Dumping `stackscope` tree for actor',
"('root'", # uid line
# TODO!? this used to show?
# -[ ] mk reproducable for @oremanj?
#
# parent block point (non-shielded)
# 'await trio.sleep_forever() # in root',
]
)
expect(
child,
# end-of-tree delimiter
"end-of-\('hanger'",
)
assert_before(
child,
[
# relay to the sub should be reported
'Relaying `SIGUSR1`[10] to sub-actor',
"('hanger'", # uid line
# TODO!? SEE ABOVE
# hanger LOC where it's shield-halted
# 'await trio.sleep_forever() # in subactor',
]
)
# simulate the user sending a ctl-c to the hanging program.
# this should result in the terminator kicking in since
# the sub is shield blocking and can't respond to SIGINT.
os.kill(
child.pid,
signal.SIGINT,
)
expect(
child,
'Shutting down actor runtime',
timeout=6,
)
assert_before(
child,
[
'raise KeyboardInterrupt',
# 'Shutting down actor runtime',
'#T-800 deployed to collect zombie B0',
"'--uid', \"('hanger',",
]
)
def test_breakpoint_hook_restored(
spawn,
):
'''
Ensures our actor runtime sets a custom `breakpoint()` hook
on open then restores the stdlib's default on close.
The hook state validation is done via `assert`s inside the
invoked script with only `breakpoint()` (not `tractor.pause()`)
calls used.
'''
child = spawn('restore_builtin_breakpoint')
child.expect(PROMPT)
assert_before(
child,
[
_pause_msg,
"<Task '__main__.main'",
"('root'",
"first bp, tractor hook set",
]
)
child.sendline('c')
child.expect(PROMPT)
assert_before(
child,
[
"last bp, stdlib hook restored",
]
)
# since the stdlib hook was already restored there should be NO
# `tractor` `log.pdb()` content from console!
assert not in_prompt_msg(
child,
[
_pause_msg,
"<Task '__main__.main'",
"('root'",
],
)
child.sendline('c')
child.expect(EOF)

View File

@ -1,290 +0,0 @@
'''
Sketchy network blackoutz, ugly byzantine gens, puedes eschuchar la
cancelacion?..
'''
from functools import partial
from types import ModuleType
import pytest
from _pytest.pathlib import import_path
import trio
import tractor
from tractor._testing import (
examples_dir,
break_ipc,
)
@pytest.mark.parametrize(
'pre_aclose_msgstream',
[
False,
True,
],
ids=[
'no_msgstream_aclose',
'pre_aclose_msgstream',
],
)
@pytest.mark.parametrize(
'ipc_break',
[
# no breaks
{
'break_parent_ipc_after': False,
'break_child_ipc_after': False,
},
# only parent breaks
{
'break_parent_ipc_after': 500,
'break_child_ipc_after': False,
},
# only child breaks
{
'break_parent_ipc_after': False,
'break_child_ipc_after': 500,
},
# both: break parent first
{
'break_parent_ipc_after': 500,
'break_child_ipc_after': 800,
},
# both: break child first
{
'break_parent_ipc_after': 800,
'break_child_ipc_after': 500,
},
],
ids=[
'no_break',
'break_parent',
'break_child',
'break_both_parent_first',
'break_both_child_first',
],
)
def test_ipc_channel_break_during_stream(
debug_mode: bool,
loglevel: str,
spawn_backend: str,
ipc_break: dict|None,
pre_aclose_msgstream: bool,
):
'''
Ensure we can have an IPC channel break its connection during
streaming and it's still possible for the (simulated) user to kill
the actor tree using SIGINT.
We also verify the type of connection error expected in the parent
depending on which side if the IPC breaks first.
'''
if spawn_backend != 'trio':
if debug_mode:
pytest.skip('`debug_mode` only supported on `trio` spawner')
# non-`trio` spawners should never hit the hang condition that
# requires the user to do ctl-c to cancel the actor tree.
# expect_final_exc = trio.ClosedResourceError
expect_final_exc = tractor.TransportClosed
mod: ModuleType = import_path(
examples_dir() / 'advanced_faults'
/ 'ipc_failure_during_stream.py',
root=examples_dir(),
consider_namespace_packages=False,
)
# by def we expect KBI from user after a simulated "hang
# period" wherein the user eventually hits ctl-c to kill the
# root-actor tree.
expect_final_exc: BaseException = KeyboardInterrupt
if (
# only expect EoC if trans is broken on the child side,
ipc_break['break_child_ipc_after'] is not False
# AND we tell the child to call `MsgStream.aclose()`.
and pre_aclose_msgstream
):
# expect_final_exc = trio.EndOfChannel
# ^XXX NOPE! XXX^ since now `.open_stream()` absorbs this
# gracefully!
expect_final_exc = KeyboardInterrupt
# NOTE when ONLY the child breaks or it breaks BEFORE the
# parent we expect the parent to get a closed resource error
# on the next `MsgStream.receive()` and then fail out and
# cancel the child from there.
#
# ONLY CHILD breaks
if (
ipc_break['break_child_ipc_after']
and
ipc_break['break_parent_ipc_after'] is False
):
# NOTE: we DO NOT expect this any more since
# the child side's channel will be broken silently
# and nothing on the parent side will indicate this!
# expect_final_exc = trio.ClosedResourceError
# NOTE: child will send a 'stop' msg before it breaks
# the transport channel BUT, that will be absorbed by the
# `ctx.open_stream()` block and thus the `.open_context()`
# should hang, after which the test script simulates
# a user sending ctl-c by raising a KBI.
if pre_aclose_msgstream:
expect_final_exc = KeyboardInterrupt
# XXX OLD XXX
# if child calls `MsgStream.aclose()` then expect EoC.
# ^ XXX not any more ^ since eoc is always absorbed
# gracefully and NOT bubbled to the `.open_context()`
# block!
# expect_final_exc = trio.EndOfChannel
# BOTH but, CHILD breaks FIRST
elif (
ipc_break['break_child_ipc_after'] is not False
and (
ipc_break['break_parent_ipc_after']
> ipc_break['break_child_ipc_after']
)
):
if pre_aclose_msgstream:
expect_final_exc = KeyboardInterrupt
# NOTE when the parent IPC side dies (even if the child does as well
# but the child fails BEFORE the parent) we always expect the
# IPC layer to raise a closed-resource, NEVER do we expect
# a stop msg since the parent-side ctx apis will error out
# IMMEDIATELY before the child ever sends any 'stop' msg.
#
# ONLY PARENT breaks
elif (
ipc_break['break_parent_ipc_after']
and
ipc_break['break_child_ipc_after'] is False
):
# expect_final_exc = trio.ClosedResourceError
expect_final_exc = tractor.TransportClosed
# BOTH but, PARENT breaks FIRST
elif (
ipc_break['break_parent_ipc_after'] is not False
and (
ipc_break['break_child_ipc_after']
>
ipc_break['break_parent_ipc_after']
)
):
# expect_final_exc = trio.ClosedResourceError
expect_final_exc = tractor.TransportClosed
with pytest.raises(
expected_exception=(
expect_final_exc,
ExceptionGroup,
),
) as excinfo:
try:
trio.run(
partial(
mod.main,
debug_mode=debug_mode,
start_method=spawn_backend,
loglevel=loglevel,
pre_close=pre_aclose_msgstream,
**ipc_break,
)
)
except KeyboardInterrupt as _kbi:
kbi = _kbi
if expect_final_exc is not KeyboardInterrupt:
pytest.fail(
'Rxed unexpected KBI !?\n'
f'{repr(kbi)}'
)
raise
except tractor.TransportClosed as _tc:
tc = _tc
if expect_final_exc is KeyboardInterrupt:
pytest.fail(
'Unexpected transport failure !?\n'
f'{repr(tc)}'
)
cause: Exception = tc.__cause__
assert (
type(cause) is trio.ClosedResourceError
and
cause.args[0] == 'another task closed this fd'
)
raise
# get raw instance from pytest wrapper
value = excinfo.value
if isinstance(value, ExceptionGroup):
excs = value.exceptions
assert len(excs) == 1
final_exc = excs[0]
assert isinstance(final_exc, expect_final_exc)
@tractor.context
async def break_ipc_after_started(
ctx: tractor.Context,
) -> None:
await ctx.started()
async with ctx.open_stream() as stream:
# TODO: make a test which verifies the error
# for this, i.e. raises a `MsgTypeError`
# await ctx.chan.send(None)
await break_ipc(
stream=stream,
pre_close=True,
)
print('child broke IPC and terminating')
def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages():
'''
Verify that is a subactor's IPC goes down just after bringing up
a stream the parent can trigger a SIGINT and the child will be
reaped out-of-IPC by the localhost process supervision machinery:
aka "zombie lord".
'''
async def main():
with trio.fail_after(3):
async with tractor.open_nursery() as an:
portal = await an.start_actor(
'ipc_breaker',
enable_modules=[__name__],
)
with trio.move_on_after(1):
async with (
portal.open_context(
break_ipc_after_started
) as (ctx, sent),
):
async with ctx.open_stream():
await trio.sleep(0.5)
print('parent waiting on context')
print(
'parent exited context\n'
'parent raising KBI..\n'
)
raise KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
trio.run(main)

View File

@ -5,8 +5,8 @@ Advanced streaming patterns using bidirectional streams and contexts.
from collections import Counter
import itertools
import platform
from typing import Set, Dict, List
import pytest
import trio
import tractor
@ -15,7 +15,7 @@ def is_win():
return platform.system() == 'Windows'
_registry: dict[str, set[tractor.MsgStream]] = {
_registry: Dict[str, Set[tractor.ReceiveMsgStream]] = {
'even': set(),
'odd': set(),
}
@ -77,7 +77,7 @@ async def subscribe(
async def consumer(
subs: list[str],
subs: List[str],
) -> None:
@ -144,16 +144,8 @@ def test_dynamic_pub_sub():
try:
trio.run(main)
except (
trio.TooSlowError,
ExceptionGroup,
) as err:
if isinstance(err, ExceptionGroup):
for suberr in err.exceptions:
if isinstance(suberr, trio.TooSlowError):
break
else:
pytest.fail('Never got a `TooSlowError` ?')
except trio.TooSlowError:
pass
@tractor.context
@ -307,77 +299,44 @@ async def inf_streamer(
async with (
ctx.open_stream() as stream,
# XXX TODO, INTERESTING CASE!!
# - if we don't collapse the eg then the embedded
# `trio.EndOfChannel` doesn't propagate directly to the above
# .open_stream() parent, resulting in it also raising instead
# of gracefully absorbing as normal.. so how to handle?
trio.open_nursery(
strict_exception_groups=False,
) as tn,
trio.open_nursery() as n,
):
async def close_stream_on_sentinel():
async def bail_on_sentinel():
async for msg in stream:
if msg == 'done':
print(
'streamer RXed "done" sentinel msg!\n'
'CLOSING `MsgStream`!'
)
await stream.aclose()
else:
print(f'streamer received {msg}')
else:
print('streamer exited recv loop')
# start termination detector
tn.start_soon(close_stream_on_sentinel)
n.start_soon(bail_on_sentinel)
cap: int = 10000 # so that we don't spin forever when bug..
for val in range(cap):
for val in itertools.count():
try:
print(f'streamer sending {val}')
await stream.send(val)
if val > cap:
raise RuntimeError(
'Streamer never cancelled by setinel?'
)
await trio.sleep(0.001)
# close out the stream gracefully
except trio.ClosedResourceError:
print('transport closed on streamer side!')
assert stream.closed
# close out the stream gracefully
break
else:
raise RuntimeError(
'Streamer not cancelled before finished sending?'
)
print('streamer exited .open_streamer() block')
print('terminating streamer')
def test_local_task_fanout_from_stream(
debug_mode: bool,
):
def test_local_task_fanout_from_stream():
'''
Single stream with multiple local consumer tasks using the
``MsgStream.subscribe()` api.
Ensure all tasks receive all values after stream completes
sending.
Ensure all tasks receive all values after stream completes sending.
'''
consumers: int = 22
consumers = 22
async def main():
counts = Counter()
async with tractor.open_nursery(
debug_mode=debug_mode,
) as tn:
p: tractor.Portal = await tn.start_actor(
async with tractor.open_nursery() as tn:
p = await tn.start_actor(
'inf_streamer',
enable_modules=[__name__],
)
@ -385,6 +344,7 @@ def test_local_task_fanout_from_stream(
p.open_context(inf_streamer) as (ctx, _),
ctx.open_stream() as stream,
):
async def pull_and_count(name: str):
# name = trio.lowlevel.current_task().name
async with stream.subscribe() as recver:
@ -393,7 +353,7 @@ def test_local_task_fanout_from_stream(
tractor.trionics.BroadcastReceiver
)
async for val in recver:
print(f'bx {name} rx: {val}')
# print(f'{name}: {val}')
counts[name] += 1
print(f'{name} bcaster ended')
@ -403,14 +363,10 @@ def test_local_task_fanout_from_stream(
with trio.fail_after(3):
async with trio.open_nursery() as nurse:
for i in range(consumers):
nurse.start_soon(
pull_and_count,
i,
)
nurse.start_soon(pull_and_count, i)
# delay to let bcast consumers pull msgs
await trio.sleep(0.5)
print('terminating nursery of bcast rxer consumers!')
print('\nterminating')
await stream.send('done')
print('closed stream connection')

View File

@ -11,10 +11,8 @@ from itertools import repeat
import pytest
import trio
import tractor
from tractor._testing import (
tractor_test,
)
from .conftest import no_windows
from conftest import tractor_test, no_windows
def is_win():
@ -45,82 +43,45 @@ async def do_nuthin():
],
ids=['no_args', 'unexpected_args'],
)
def test_remote_error(reg_addr, args_err):
'''
Verify an error raised in a subactor that is propagated
def test_remote_error(arb_addr, args_err):
"""Verify an error raised in a subactor that is propagated
to the parent nursery, contains the underlying boxed builtin
error type info and causes cancellation and reraising all the
way up the stack.
'''
"""
args, errtype = args_err
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as nursery:
# on a remote type error caused by bad input args
# this should raise directly which means we **don't** get
# an exception group outside the nursery since the error
# here and the far end task error are one in the same?
portal = await nursery.run_in_actor(
assert_err,
name='errorer',
**args
assert_err, name='errorer', **args
)
# get result(s) from main task
try:
# this means the root actor will also raise a local
# parent task error and thus an eg will propagate out
# of this actor nursery.
await portal.result()
except tractor.RemoteActorError as err:
assert err.boxed_type == errtype
assert err.type == errtype
print("Look Maa that actor failed hard, hehh")
raise
# ensure boxed errors
if args:
with pytest.raises(tractor.RemoteActorError) as excinfo:
trio.run(main)
with pytest.raises(tractor.RemoteActorError) as excinfo:
trio.run(main)
assert excinfo.value.boxed_type == errtype
else:
# the root task will also error on the `Portal.result()`
# call so we expect an error from there AND the child.
# |_ tho seems like on new `trio` this doesn't always
# happen?
with pytest.raises((
BaseExceptionGroup,
tractor.RemoteActorError,
)) as excinfo:
trio.run(main)
# ensure boxed errors are `errtype`
err: BaseException = excinfo.value
if isinstance(err, BaseExceptionGroup):
suberrs: list[BaseException] = err.exceptions
else:
suberrs: list[BaseException] = [err]
for exc in suberrs:
assert exc.boxed_type == errtype
# ensure boxed error is correct
assert excinfo.value.type == errtype
def test_multierror(
reg_addr: tuple[str, int],
):
'''
Verify we raise a ``BaseExceptionGroup`` out of a nursery where
def test_multierror(arb_addr):
"""Verify we raise a ``trio.MultiError`` out of a nursery where
more then one actor errors.
'''
"""
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as nursery:
await nursery.run_in_actor(assert_err, name='errorer1')
@ -130,14 +91,14 @@ def test_multierror(
try:
await portal2.result()
except tractor.RemoteActorError as err:
assert err.boxed_type is AssertionError
assert err.type == AssertionError
print("Look Maa that first actor failed hard, hehh")
raise
# here we should get a ``BaseExceptionGroup`` containing exceptions
# here we should get a `trio.MultiError` containing exceptions
# from both subactors
with pytest.raises(BaseExceptionGroup):
with pytest.raises(trio.MultiError):
trio.run(main)
@ -145,14 +106,14 @@ def test_multierror(
@pytest.mark.parametrize(
'num_subactors', range(25, 26),
)
def test_multierror_fast_nursery(reg_addr, start_method, num_subactors, delay):
"""Verify we raise a ``BaseExceptionGroup`` out of a nursery where
def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay):
"""Verify we raise a ``trio.MultiError`` out of a nursery where
more then one actor errors and also with a delay before failure
to test failure during an ongoing spawning.
"""
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as nursery:
for i in range(num_subactors):
@ -162,11 +123,10 @@ def test_multierror_fast_nursery(reg_addr, start_method, num_subactors, delay):
delay=delay
)
# with pytest.raises(trio.MultiError) as exc_info:
with pytest.raises(BaseExceptionGroup) as exc_info:
with pytest.raises(trio.MultiError) as exc_info:
trio.run(main)
assert exc_info.type == ExceptionGroup
assert exc_info.type == tractor.MultiError
err = exc_info.value
exceptions = err.exceptions
@ -182,7 +142,7 @@ def test_multierror_fast_nursery(reg_addr, start_method, num_subactors, delay):
for exc in exceptions:
assert isinstance(exc, tractor.RemoteActorError)
assert exc.boxed_type is AssertionError
assert exc.type == AssertionError
async def do_nothing():
@ -190,20 +150,15 @@ async def do_nothing():
@pytest.mark.parametrize('mechanism', ['nursery_cancel', KeyboardInterrupt])
def test_cancel_single_subactor(reg_addr, mechanism):
'''
Ensure a ``ActorNursery.start_actor()`` spawned subactor
def test_cancel_single_subactor(arb_addr, mechanism):
"""Ensure a ``ActorNursery.start_actor()`` spawned subactor
cancels when the nursery is cancelled.
'''
"""
async def spawn_actor():
'''
Spawn an actor that blocks indefinitely then cancel via
either `ActorNursery.cancel()` or an exception raise.
'''
"""Spawn an actor that blocks indefinitely.
"""
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as nursery:
portal = await nursery.start_actor(
@ -259,8 +214,8 @@ async def test_cancel_infinite_streamer(start_method):
[
# daemon actors sit idle while single task actors error out
(1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None),
(2, BaseExceptionGroup, AssertionError, (assert_err, {}), None),
(3, BaseExceptionGroup, AssertionError, (assert_err, {}), None),
(2, tractor.MultiError, AssertionError, (assert_err, {}), None),
(3, tractor.MultiError, AssertionError, (assert_err, {}), None),
# 1 daemon actor errors out while single task actors sleep forever
(3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}),
@ -271,7 +226,7 @@ async def test_cancel_infinite_streamer(start_method):
(do_nuthin, {}), (assert_err, {'delay': 1}, True)),
# daemon complete quickly delay while single task
# actors error after brief delay
(3, BaseExceptionGroup, AssertionError,
(3, tractor.MultiError, AssertionError,
(assert_err, {'delay': 1}), (do_nuthin, {}, False)),
],
ids=[
@ -323,7 +278,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
await portal.run(func, **kwargs)
except tractor.RemoteActorError as err:
assert err.boxed_type == err_type
assert err.type == err_type
# we only expect this first error to propogate
# (all other daemons are cancelled before they
# can be scheduled)
@ -338,15 +293,15 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
# should error here with a ``RemoteActorError`` or ``MultiError``
except first_err as err:
if isinstance(err, BaseExceptionGroup):
if isinstance(err, tractor.MultiError):
assert len(err.exceptions) == num_actors
for exc in err.exceptions:
if isinstance(exc, tractor.RemoteActorError):
assert exc.boxed_type == err_type
assert exc.type == err_type
else:
assert isinstance(exc, trio.Cancelled)
elif isinstance(err, tractor.RemoteActorError):
assert err.boxed_type == err_type
assert err.type == err_type
assert n.cancelled is True
assert not n._children
@ -382,7 +337,7 @@ async def spawn_and_error(breadth, depth) -> None:
@tractor_test
async def test_nested_multierrors(loglevel, start_method):
'''
Test that failed actor sets are wrapped in `BaseExceptionGroup`s. This
Test that failed actor sets are wrapped in `trio.MultiError`s. This
test goes only 2 nurseries deep but we should eventually have tests
for arbitrary n-depth actor trees.
@ -410,7 +365,7 @@ async def test_nested_multierrors(loglevel, start_method):
breadth=subactor_breadth,
depth=depth,
)
except BaseExceptionGroup as err:
except trio.MultiError as err:
assert len(err.exceptions) == subactor_breadth
for subexc in err.exceptions:
@ -425,21 +380,21 @@ async def test_nested_multierrors(loglevel, start_method):
elif isinstance(subexc, tractor.RemoteActorError):
# on windows it seems we can't exactly be sure wtf
# will happen..
assert subexc.boxed_type in (
assert subexc.type in (
tractor.RemoteActorError,
trio.Cancelled,
BaseExceptionGroup,
trio.MultiError
)
elif isinstance(subexc, BaseExceptionGroup):
elif isinstance(subexc, trio.MultiError):
for subsub in subexc.exceptions:
if subsub in (tractor.RemoteActorError,):
subsub = subsub.boxed_type
subsub = subsub.type
assert type(subsub) in (
trio.Cancelled,
BaseExceptionGroup,
trio.MultiError,
)
else:
assert isinstance(subexc, tractor.RemoteActorError)
@ -450,16 +405,16 @@ async def test_nested_multierrors(loglevel, start_method):
# we get back the (sent) cancel signal instead
if is_win():
if isinstance(subexc, tractor.RemoteActorError):
assert subexc.boxed_type in (
BaseExceptionGroup,
assert subexc.type in (
trio.MultiError,
tractor.RemoteActorError
)
else:
assert isinstance(subexc, BaseExceptionGroup)
assert isinstance(subexc, trio.MultiError)
else:
assert subexc.boxed_type is ExceptionGroup
assert subexc.type is trio.MultiError
else:
assert subexc.boxed_type in (
assert subexc.type in (
tractor.RemoteActorError,
trio.Cancelled
)
@ -480,7 +435,7 @@ def test_cancel_via_SIGINT(
with trio.fail_after(2):
async with tractor.open_nursery() as tn:
await tn.start_actor('sucka')
if 'mp' in spawn_backend:
if spawn_backend == 'mp':
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
await trio.sleep_forever()
@ -504,9 +459,7 @@ def test_cancel_via_SIGINT_other_task(
if is_win(): # smh
timeout += 1
async def spawn_and_sleep_forever(
task_status=trio.TASK_STATUS_IGNORED
):
async def spawn_and_sleep_forever(task_status=trio.TASK_STATUS_IGNORED):
async with tractor.open_nursery() as tn:
for i in range(3):
await tn.run_in_actor(
@ -519,11 +472,9 @@ def test_cancel_via_SIGINT_other_task(
async def main():
# should never timeout since SIGINT should cancel the current program
with trio.fail_after(timeout):
async with trio.open_nursery(
strict_exception_groups=False,
) as n:
async with trio.open_nursery() as n:
await n.start(spawn_and_sleep_forever)
if 'mp' in spawn_backend:
if spawn_backend == 'mp':
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
@ -614,12 +565,6 @@ def test_fast_graceful_cancel_when_spawn_task_in_soft_proc_wait_for_daemon(
nurse.start_soon(delayed_kbi)
await p.run(do_nuthin)
# need to explicitly re-raise the lone kbi..now
except* KeyboardInterrupt as kbi_eg:
assert (len(excs := kbi_eg.exceptions) == 1)
raise excs[0]
finally:
duration = time.time() - start
if duration > timeout:

View File

@ -6,15 +6,14 @@ sub-sub-actor daemons.
'''
from typing import Optional
import asyncio
from contextlib import (
asynccontextmanager as acm,
aclosing,
)
from contextlib import asynccontextmanager as acm
import pytest
import trio
from trio_typing import TaskStatus
import tractor
from tractor import RemoteActorError
from async_generator import aclosing
async def aio_streamer(
@ -95,8 +94,8 @@ async def trio_main(
# stash a "service nursery" as "actor local" (aka a Python global)
global _nursery
tn = _nursery
assert tn
n = _nursery
assert n
async def consume_stream():
async with wrapper_mngr() as stream:
@ -104,10 +103,10 @@ async def trio_main(
print(msg)
# run 2 tasks to ensure broadcaster chan use
tn.start_soon(consume_stream)
tn.start_soon(consume_stream)
n.start_soon(consume_stream)
n.start_soon(consume_stream)
tn.start_soon(trio_sleep_and_err)
n.start_soon(trio_sleep_and_err)
await trio.sleep_forever()
@ -117,10 +116,8 @@ async def open_actor_local_nursery(
ctx: tractor.Context,
):
global _nursery
async with trio.open_nursery(
strict_exception_groups=False,
) as tn:
_nursery = tn
async with trio.open_nursery() as n:
_nursery = n
await ctx.started()
await trio.sleep(10)
# await trio.sleep(1)
@ -134,7 +131,7 @@ async def open_actor_local_nursery(
# never yields back.. aka a scenario where the
# ``tractor.context`` task IS NOT in the service n's cancel
# scope.
tn.cancel_scope.cancel()
n.cancel_scope.cancel()
@pytest.mark.parametrize(
@ -144,7 +141,7 @@ async def open_actor_local_nursery(
)
def test_actor_managed_trio_nursery_task_error_cancels_aio(
asyncio_mode: bool,
reg_addr: tuple,
arb_addr
):
'''
Verify that a ``trio`` nursery created managed in a child actor
@ -159,7 +156,7 @@ def test_actor_managed_trio_nursery_task_error_cancels_aio(
async with tractor.open_nursery() as n:
p = await n.start_actor(
'nursery_mngr',
infect_asyncio=asyncio_mode, # TODO, is this enabling debug mode?
infect_asyncio=asyncio_mode,
enable_modules=[__name__],
)
async with (
@ -173,4 +170,4 @@ def test_actor_managed_trio_nursery_task_error_cancels_aio(
# verify boxed error
err = excinfo.value
assert err.boxed_type is NameError
assert isinstance(err.type(), NameError)

View File

@ -1,81 +1,36 @@
import itertools
import pytest
import trio
import tractor
from tractor import open_actor_cluster
from tractor.trionics import gather_contexts
from tractor._testing import tractor_test
from conftest import tractor_test
MESSAGE = 'tractoring at full speed'
def test_empty_mngrs_input_raises() -> None:
async def main():
with trio.fail_after(1):
async with (
open_actor_cluster(
modules=[__name__],
# NOTE: ensure we can passthrough runtime opts
loglevel='info',
# debug_mode=True,
) as portals,
gather_contexts(
# NOTE: it's the use of inline-generator syntax
# here that causes the empty input.
mngrs=(
p.open_context(worker) for p in portals.values()
),
),
):
assert 0
with pytest.raises(ValueError):
trio.run(main)
@tractor.context
async def worker(
ctx: tractor.Context,
) -> None:
async def worker(ctx: tractor.Context) -> None:
await ctx.started()
async with ctx.open_stream(
allow_overruns=True,
) as stream:
# TODO: this with the below assert causes a hang bug?
# with trio.move_on_after(1):
async with ctx.open_stream(backpressure=True) as stream:
async for msg in stream:
# do something with msg
print(msg)
assert msg == MESSAGE
# TODO: does this ever cause a hang
# assert 0
@tractor_test
async def test_streaming_to_actor_cluster() -> None:
async with (
open_actor_cluster(modules=[__name__]) as portals,
gather_contexts(
mngrs=[p.open_context(worker) for p in portals.values()],
) as contexts,
gather_contexts(
mngrs=[ctx[0].open_stream() for ctx in contexts],
) as streams,
):
with trio.move_on_after(1):
for stream in itertools.cycle(streams):

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,598 @@
"""
That native debug better work!
All these tests can be understood (somewhat) by running the equivalent
`examples/debugging/` scripts manually.
TODO:
- none of these tests have been run successfully on windows yet but
there's been manual testing that verified it works.
- wonder if any of it'll work on OS X?
"""
import time
from os import path
import platform
import pytest
import pexpect
from conftest import repodir
# TODO: The next great debugger audit could be done by you!
# - recurrent entry to breakpoint() from single actor *after* and an
# error in another task?
# - root error before child errors
# - root error after child errors
# - root error before child breakpoint
# - root error after child breakpoint
# - recurrent root errors
if platform.system() == 'Windows':
pytest.skip(
'Debugger tests have no windows support (yet)',
allow_module_level=True,
)
def examples_dir():
"""Return the abspath to the examples directory.
"""
return path.join(repodir(), 'examples', 'debugging/')
def mk_cmd(ex_name: str) -> str:
"""Generate a command suitable to pass to ``pexpect.spawn()``.
"""
return ' '.join(
['python',
path.join(examples_dir(), f'{ex_name}.py')]
)
@pytest.fixture
def spawn(
start_method,
testdir,
arb_addr,
) -> 'pexpect.spawn':
if start_method != 'trio':
pytest.skip(
"Debugger tests are only supported on the trio backend"
)
def _spawn(cmd):
return testdir.spawn(
cmd=mk_cmd(cmd),
expect_timeout=3,
)
return _spawn
@pytest.mark.parametrize(
'user_in_out',
[
('c', 'AssertionError'),
('q', 'AssertionError'),
],
ids=lambda item: f'{item[0]} -> {item[1]}',
)
def test_root_actor_error(spawn, user_in_out):
"""Demonstrate crash handler entering pdbpp from basic error in root actor.
"""
user_input, expect_err_str = user_in_out
child = spawn('root_actor_error')
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
# make sure expected logging and error arrives
assert "Attaching to pdb in crashed actor: ('root'" in before
assert 'AssertionError' in before
# send user command
child.sendline(user_input)
# process should exit
child.expect(pexpect.EOF)
assert expect_err_str in str(child.before)
@pytest.mark.parametrize(
'user_in_out',
[
('c', None),
('q', 'bdb.BdbQuit'),
],
ids=lambda item: f'{item[0]} -> {item[1]}',
)
def test_root_actor_bp(spawn, user_in_out):
"""Demonstrate breakpoint from in root actor.
"""
user_input, expect_err_str = user_in_out
child = spawn('root_actor_breakpoint')
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
assert 'Error' not in str(child.before)
# send user command
child.sendline(user_input)
child.expect('\r\n')
# process should exit
child.expect(pexpect.EOF)
if expect_err_str is None:
assert 'Error' not in str(child.before)
else:
assert expect_err_str in str(child.before)
def test_root_actor_bp_forever(spawn):
"Re-enter a breakpoint from the root actor-task."
child = spawn('root_actor_breakpoint_forever')
# do some "next" commands to demonstrate recurrent breakpoint
# entries
for _ in range(10):
child.sendline('next')
child.expect(r"\(Pdb\+\+\)")
# do one continue which should trigger a new task to lock the tty
child.sendline('continue')
child.expect(r"\(Pdb\+\+\)")
# XXX: this previously caused a bug!
child.sendline('n')
child.expect(r"\(Pdb\+\+\)")
child.sendline('n')
child.expect(r"\(Pdb\+\+\)")
def test_subactor_error(spawn):
"Single subactor raising an error"
child = spawn('subactor_error')
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('name_error'" in before
# send user command
# (in this case it's the same for 'continue' vs. 'quit')
child.sendline('continue')
# the debugger should enter a second time in the nursery
# creating actor
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
# root actor gets debugger engaged
assert "Attaching to pdb in crashed actor: ('root'" in before
# error is a remote error propagated from the subactor
assert "RemoteActorError: ('name_error'" in before
child.sendline('c')
child.expect('\r\n')
# process should exit
child.expect(pexpect.EOF)
def test_subactor_breakpoint(spawn):
"Single subactor with an infinite breakpoint loop"
child = spawn('subactor_breakpoint')
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
# do some "next" commands to demonstrate recurrent breakpoint
# entries
for _ in range(10):
child.sendline('next')
child.expect(r"\(Pdb\+\+\)")
# now run some "continues" to show re-entries
for _ in range(5):
child.sendline('continue')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
# finally quit the loop
child.sendline('q')
# child process should exit but parent will capture pdb.BdbQuit
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "RemoteActorError: ('breakpoint_forever'" in before
assert 'bdb.BdbQuit' in before
# quit the parent
child.sendline('c')
# process should exit
child.expect(pexpect.EOF)
before = str(child.before.decode())
assert "RemoteActorError: ('breakpoint_forever'" in before
assert 'bdb.BdbQuit' in before
def test_multi_subactors(spawn):
"""
Multiple subactors, both erroring and breakpointing as well as
a nested subactor erroring.
"""
child = spawn(r'multi_subactors')
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
# do some "next" commands to demonstrate recurrent breakpoint
# entries
for _ in range(10):
child.sendline('next')
child.expect(r"\(Pdb\+\+\)")
# continue to next error
child.sendline('c')
# first name_error failure
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('name_error'" in before
assert "NameError" in before
# continue again
child.sendline('c')
# 2nd name_error failure
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('name_error_1'" in before
assert "NameError" in before
# breakpoint loop should re-engage
child.sendline('c')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
# wait for spawn error to show up
spawn_err = "Attaching to pdb in crashed actor: ('spawn_error'"
while spawn_err not in before:
child.sendline('c')
time.sleep(0.1)
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
# 2nd depth nursery should trigger
# child.sendline('c')
# child.expect(r"\(Pdb\+\+\)")
# before = str(child.before.decode())
assert spawn_err in before
assert "RemoteActorError: ('name_error_1'" in before
# now run some "continues" to show re-entries
for _ in range(5):
child.sendline('c')
child.expect(r"\(Pdb\+\+\)")
# quit the loop and expect parent to attach
child.sendline('q')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
# debugger attaches to root
assert "Attaching to pdb in crashed actor: ('root'" in before
# expect a multierror with exceptions for each sub-actor
assert "RemoteActorError: ('breakpoint_forever'" in before
assert "RemoteActorError: ('name_error'" in before
assert "RemoteActorError: ('spawn_error'" in before
assert "RemoteActorError: ('name_error_1'" in before
assert 'bdb.BdbQuit' in before
# process should exit
child.sendline('c')
child.expect(pexpect.EOF)
# repeat of previous multierror for final output
before = str(child.before.decode())
assert "RemoteActorError: ('breakpoint_forever'" in before
assert "RemoteActorError: ('name_error'" in before
assert "RemoteActorError: ('spawn_error'" in before
assert "RemoteActorError: ('name_error_1'" in before
assert 'bdb.BdbQuit' in before
def test_multi_daemon_subactors(spawn, loglevel):
"""Multiple daemon subactors, both erroring and breakpointing within a
stream.
"""
child = spawn('multi_daemon_subactors')
child.expect(r"\(Pdb\+\+\)")
# there is a race for which subactor will acquire
# the root's tty lock first
before = str(child.before.decode())
bp_forever_msg = "Attaching pdb to actor: ('bp_forever'"
name_error_msg = "NameError"
if bp_forever_msg in before:
next_msg = name_error_msg
elif name_error_msg in before:
next_msg = bp_forever_msg
else:
raise ValueError("Neither log msg was found !?")
# NOTE: previously since we did not have clobber prevention
# in the root actor this final resume could result in the debugger
# tearing down since both child actors would be cancelled and it was
# unlikely that `bp_forever` would re-acquire the tty lock again.
# Now, we should have a final resumption in the root plus a possible
# second entry by `bp_forever`.
child.sendline('c')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert next_msg in before
# XXX: hooray the root clobbering the child here was fixed!
# IMO, this demonstrates the true power of SC system design.
# now the root actor won't clobber the bp_forever child
# during it's first access to the debug lock, but will instead
# wait for the lock to release, by the edge triggered
# ``_debug._no_remote_has_tty`` event before sending cancel messages
# (via portals) to its underlings B)
# at some point here there should have been some warning msg from
# the root announcing it avoided a clobber of the child's lock, but
# it seems unreliable in testing here to gnab it:
# assert "in use by child ('bp_forever'," in before
# wait for final error in root
while True:
child.sendline('c')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
try:
# root error should be packed as remote error
assert "_exceptions.RemoteActorError: ('name_error'" in before
break
except AssertionError:
assert bp_forever_msg in before
try:
child.sendline('c')
child.expect(pexpect.EOF)
except pexpect.exceptions.TIMEOUT:
# Failed to exit using continue..?
child.sendline('q')
child.expect(pexpect.EOF)
def test_multi_subactors_root_errors(spawn):
'''
Multiple subactors, both erroring and breakpointing as well as
a nested subactor erroring.
'''
child = spawn('multi_subactor_root_errors')
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
# at most one subactor should attach before the root is cancelled
before = str(child.before.decode())
assert "NameError: name 'doggypants' is not defined" in before
# continue again to catch 2nd name error from
# actor 'name_error_1' (which is 2nd depth).
child.sendline('c')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('name_error_1'" in before
assert "NameError" in before
child.sendline('c')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('spawn_error'" in before
# boxed error from previous step
assert "RemoteActorError: ('name_error_1'" in before
assert "NameError" in before
child.sendline('c')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('root'" in before
# boxed error from first level failure
assert "RemoteActorError: ('name_error'" in before
assert "NameError" in before
# warnings assert we probably don't need
# assert "Cancelling nursery in ('spawn_error'," in before
# continue again
child.sendline('c')
child.expect(pexpect.EOF)
before = str(child.before.decode())
# error from root actor and root task that created top level nursery
assert "AssertionError" in before
def test_multi_nested_subactors_error_through_nurseries(spawn):
"""Verify deeply nested actors that error trigger debugger entries
at each actor nurserly (level) all the way up the tree.
"""
# NOTE: previously, inside this script was a bug where if the
# parent errors before a 2-levels-lower actor has released the lock,
# the parent tries to cancel it but it's stuck in the debugger?
# A test (below) has now been added to explicitly verify this is
# fixed.
child = spawn('multi_nested_subactors_error_up_through_nurseries')
timed_out_early: bool = False
for i in range(12):
try:
child.expect(r"\(Pdb\+\+\)")
child.sendline('c')
time.sleep(0.1)
except pexpect.exceptions.EOF:
# race conditions on how fast the continue is sent?
print(f"Failed early on {i}?")
timed_out_early = True
break
else:
child.expect(pexpect.EOF)
if not timed_out_early:
before = str(child.before.decode())
assert "NameError" in before
def test_root_nursery_cancels_before_child_releases_tty_lock(
spawn,
start_method
):
"""Test that when the root sends a cancel message before a nested
child has unblocked (which can happen when it has the tty lock and
is engaged in pdb) it is indeed cancelled after exiting the debugger.
"""
timed_out_early = False
child = spawn('root_cancelled_but_child_is_in_tty_lock')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "NameError: name 'doggypants' is not defined" in before
assert "tractor._exceptions.RemoteActorError: ('name_error'" not in before
time.sleep(0.5)
child.sendline('c')
for i in range(4):
time.sleep(0.5)
try:
child.expect(r"\(Pdb\+\+\)")
except (
pexpect.exceptions.EOF,
pexpect.exceptions.TIMEOUT,
):
# races all over..
print(f"Failed early on {i}?")
before = str(child.before.decode())
timed_out_early = True
# race conditions on how fast the continue is sent?
break
before = str(child.before.decode())
assert "NameError: name 'doggypants' is not defined" in before
child.sendline('c')
while True:
try:
child.expect(pexpect.EOF)
break
except pexpect.exceptions.TIMEOUT:
child.sendline('c')
print('child was able to grab tty lock again?')
if not timed_out_early:
before = str(child.before.decode())
assert "tractor._exceptions.RemoteActorError: ('spawner0'" in before
assert "tractor._exceptions.RemoteActorError: ('name_error'" in before
assert "NameError: name 'doggypants' is not defined" in before
def test_root_cancels_child_context_during_startup(
spawn,
):
'''Verify a fast fail in the root doesn't lock up the child reaping
and all while using the new context api.
'''
child = spawn('fast_error_in_root_after_spawn')
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "AssertionError" in before
child.sendline('c')
child.expect(pexpect.EOF)
def test_different_debug_mode_per_actor(
spawn,
):
child = spawn('per_actor_debug')
child.expect(r"\(Pdb\+\+\)")
# only one actor should enter the debugger
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('debugged_boi'" in before
assert "RuntimeError" in before
child.sendline('c')
child.expect(pexpect.EOF)
before = str(child.before.decode())
# NOTE: this debugged actor error currently WON'T show up since the
# root will actually cancel and terminate the nursery before the error
# msg reported back from the debug mode actor is processed.
# assert "tractor._exceptions.RemoteActorError: ('debugged_boi'" in before
assert "tractor._exceptions.RemoteActorError: ('crash_boi'" in before
# the crash boi should not have made a debugger request but
# instead crashed completely
assert "tractor._exceptions.RemoteActorError: ('crash_boi'" in before
assert "RuntimeError" in before

View File

@ -9,24 +9,25 @@ import itertools
import pytest
import tractor
from tractor._testing import tractor_test
import trio
from conftest import tractor_test
@tractor_test
async def test_reg_then_unreg(reg_addr):
async def test_reg_then_unreg(arb_addr):
actor = tractor.current_actor()
assert actor.is_arbiter
assert len(actor._registry) == 1 # only self is registered
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as n:
portal = await n.start_actor('actor', enable_modules=[__name__])
uid = portal.channel.uid
async with tractor.get_registry(*reg_addr) as aportal:
async with tractor.get_arbiter(*arb_addr) as aportal:
# this local actor should be the arbiter
assert actor is aportal.actor
@ -52,27 +53,15 @@ async def hi():
return the_line.format(tractor.current_actor().name)
async def say_hello(
other_actor: str,
reg_addr: tuple[str, int],
):
async def say_hello(other_actor):
await trio.sleep(1) # wait for other actor to spawn
async with tractor.find_actor(
other_actor,
registry_addrs=[reg_addr],
) as portal:
async with tractor.find_actor(other_actor) as portal:
assert portal is not None
return await portal.run(__name__, 'hi')
async def say_hello_use_wait(
other_actor: str,
reg_addr: tuple[str, int],
):
async with tractor.wait_for_actor(
other_actor,
registry_addr=reg_addr,
) as portal:
async def say_hello_use_wait(other_actor):
async with tractor.wait_for_actor(other_actor) as portal:
assert portal is not None
result = await portal.run(__name__, 'hi')
return result
@ -80,29 +69,21 @@ async def say_hello_use_wait(
@tractor_test
@pytest.mark.parametrize('func', [say_hello, say_hello_use_wait])
async def test_trynamic_trio(
func,
start_method,
reg_addr,
):
'''
Root actor acting as the "director" and running one-shot-task-actors
for the directed subs.
'''
async def test_trynamic_trio(func, start_method, arb_addr):
"""Main tractor entry point, the "master" process (for now
acts as the "director").
"""
async with tractor.open_nursery() as n:
print("Alright... Action!")
donny = await n.run_in_actor(
func,
other_actor='gretchen',
reg_addr=reg_addr,
name='donny',
)
gretchen = await n.run_in_actor(
func,
other_actor='donny',
reg_addr=reg_addr,
name='gretchen',
)
print(await gretchen.result())
@ -150,7 +131,7 @@ async def unpack_reg(actor_or_portal):
async def spawn_and_check_registry(
reg_addr: tuple,
arb_addr: tuple,
use_signal: bool,
remote_arbiter: bool = False,
with_streaming: bool = False,
@ -158,9 +139,9 @@ async def spawn_and_check_registry(
) -> None:
async with tractor.open_root_actor(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
):
async with tractor.get_registry(*reg_addr) as portal:
async with tractor.get_arbiter(*arb_addr) as portal:
# runtime needs to be up to call this
actor = tractor.current_actor()
@ -181,9 +162,7 @@ async def spawn_and_check_registry(
try:
async with tractor.open_nursery() as n:
async with trio.open_nursery(
strict_exception_groups=False,
) as trion:
async with trio.open_nursery() as trion:
portals = {}
for i in range(3):
@ -234,19 +213,17 @@ async def spawn_and_check_registry(
def test_subactors_unregister_on_cancel(
start_method,
use_signal,
reg_addr,
arb_addr,
with_streaming,
):
'''
Verify that cancelling a nursery results in all subactors
"""Verify that cancelling a nursery results in all subactors
deregistering themselves with the arbiter.
'''
"""
with pytest.raises(KeyboardInterrupt):
trio.run(
partial(
spawn_and_check_registry,
reg_addr,
arb_addr,
use_signal,
remote_arbiter=False,
with_streaming=with_streaming,
@ -260,7 +237,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
daemon,
start_method,
use_signal,
reg_addr,
arb_addr,
with_streaming,
):
"""Verify that cancelling a nursery results in all subactors
@ -271,7 +248,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
trio.run(
partial(
spawn_and_check_registry,
reg_addr,
arb_addr,
use_signal,
remote_arbiter=True,
with_streaming=with_streaming,
@ -285,7 +262,7 @@ async def streamer(agen):
async def close_chans_before_nursery(
reg_addr: tuple,
arb_addr: tuple,
use_signal: bool,
remote_arbiter: bool = False,
) -> None:
@ -298,9 +275,9 @@ async def close_chans_before_nursery(
entries_at_end = 1
async with tractor.open_root_actor(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
):
async with tractor.get_registry(*reg_addr) as aportal:
async with tractor.get_arbiter(*arb_addr) as aportal:
try:
get_reg = partial(unpack_reg, aportal)
@ -318,9 +295,7 @@ async def close_chans_before_nursery(
async with portal2.open_stream_from(
stream_forever
) as agen2:
async with trio.open_nursery(
strict_exception_groups=False,
) as n:
async with trio.open_nursery() as n:
n.start_soon(streamer, agen1)
n.start_soon(cancel, use_signal, .5)
try:
@ -354,7 +329,7 @@ async def close_chans_before_nursery(
def test_close_channel_explicit(
start_method,
use_signal,
reg_addr,
arb_addr,
):
"""Verify that closing a stream explicitly and killing the actor's
"root nursery" **before** the containing nursery tears down also
@ -364,7 +339,7 @@ def test_close_channel_explicit(
trio.run(
partial(
close_chans_before_nursery,
reg_addr,
arb_addr,
use_signal,
remote_arbiter=False,
),
@ -376,7 +351,7 @@ def test_close_channel_explicit_remote_arbiter(
daemon,
start_method,
use_signal,
reg_addr,
arb_addr,
):
"""Verify that closing a stream explicitly and killing the actor's
"root nursery" **before** the containing nursery tears down also
@ -386,7 +361,7 @@ def test_close_channel_explicit_remote_arbiter(
trio.run(
partial(
close_chans_before_nursery,
reg_addr,
arb_addr,
use_signal,
remote_arbiter=True,
),

View File

@ -11,17 +11,18 @@ import platform
import shutil
import pytest
from tractor._testing import (
examples_dir,
)
from conftest import repodir
def examples_dir():
"""Return the abspath to the examples directory.
"""
return os.path.join(repodir(), 'examples')
@pytest.fixture
def run_example_in_subproc(
loglevel: str,
testdir: pytest.Pytester,
reg_addr: tuple[str, int],
):
def run_example_in_subproc(loglevel, testdir, arb_addr):
@contextmanager
def run(script_code):
@ -31,8 +32,8 @@ def run_example_in_subproc(
# on windows we need to create a special __main__.py which will
# be executed with ``python -m <modulename>`` on windows..
shutil.copyfile(
examples_dir() / '__main__.py',
str(testdir / '__main__.py'),
os.path.join(examples_dir(), '__main__.py'),
os.path.join(str(testdir), '__main__.py')
)
# drop the ``if __name__ == '__main__'`` guard onwards from
@ -80,37 +81,24 @@ def run_example_in_subproc(
'example_script',
# walk yields: (dirpath, dirnames, filenames)
[
(p[0], f)
for p in os.walk(examples_dir())
for f in p[2]
[(p[0], f) for p in os.walk(examples_dir()) for f in p[2]
if '__' not in f
and f[0] != '_'
and 'debugging' not in p[0]],
if (
'__' not in f
and f[0] != '_'
and 'debugging' not in p[0]
and 'integration' not in p[0]
and 'advanced_faults' not in p[0]
and 'multihost' not in p[0]
)
],
ids=lambda t: t[1],
)
def test_example(
run_example_in_subproc,
example_script,
):
'''
Load and run scripts from this repo's ``examples/`` dir as a user
def test_example(run_example_in_subproc, example_script):
"""Load and run scripts from this repo's ``examples/`` dir as a user
would copy and pasing them into their editor.
On windows a little more "finessing" is done to make
``multiprocessing`` play nice: we copy the ``__main__.py`` into the
test directory and invoke the script as a module with ``python -m
test_example``.
'''
ex_file: str = os.path.join(*example_script)
"""
ex_file = os.path.join(*example_script)
if 'rpc_bidir_streaming' in ex_file and sys.version_info < (3, 9):
pytest.skip("2-way streaming example requires py3.9 async with syntax")
@ -125,20 +113,9 @@ def test_example(
# print(f'STDOUT: {out}')
# if we get some gnarly output let's aggregate and raise
if err:
errmsg = err.decode()
errlines = errmsg.splitlines()
last_error = errlines[-1]
if (
'Error' in last_error
# XXX: currently we print this to console, but maybe
# shouldn't eventually once we figure out what's
# a better way to be explicit about aio side
# cancels?
and
'asyncio.exceptions.CancelledError' not in last_error
):
raise Exception(errmsg)
errmsg = err.decode()
errlines = errmsg.splitlines()
if err and 'Error' in errlines[-1]:
raise Exception(errmsg)
assert proc.returncode == 0

View File

@ -1,946 +0,0 @@
'''
Low-level functional audits for our
"capability based messaging"-spec feats.
B~)
'''
from contextlib import (
contextmanager as cm,
# nullcontext,
)
import importlib
from typing import (
Any,
Type,
Union,
)
from msgspec import (
# structs,
# msgpack,
Raw,
# Struct,
ValidationError,
)
import pytest
import trio
import tractor
from tractor import (
Actor,
# _state,
MsgTypeError,
Context,
)
from tractor.msg import (
_codec,
_ctxvar_MsgCodec,
_exts,
NamespacePath,
MsgCodec,
MsgDec,
mk_codec,
mk_dec,
apply_codec,
current_codec,
)
from tractor.msg.types import (
log,
Started,
# _payload_msgs,
# PayloadMsg,
# mk_msg_spec,
)
from tractor.msg._ops import (
limit_plds,
)
def enc_nsp(obj: Any) -> Any:
actor: Actor = tractor.current_actor(
err_on_no_runtime=False,
)
uid: tuple[str, str]|None = None if not actor else actor.uid
print(f'{uid} ENC HOOK')
match obj:
# case NamespacePath()|str():
case NamespacePath():
encoded: str = str(obj)
print(
f'----- ENCODING `NamespacePath` as `str` ------\n'
f'|_obj:{type(obj)!r} = {obj!r}\n'
f'|_encoded: str = {encoded!r}\n'
)
# if type(obj) != NamespacePath:
# breakpoint()
return encoded
case _:
logmsg: str = (
f'{uid}\n'
'FAILED ENCODE\n'
f'obj-> `{obj}: {type(obj)}`\n'
)
raise NotImplementedError(logmsg)
def dec_nsp(
obj_type: Type,
obj: Any,
) -> Any:
# breakpoint()
actor: Actor = tractor.current_actor(
err_on_no_runtime=False,
)
uid: tuple[str, str]|None = None if not actor else actor.uid
print(
f'{uid}\n'
'CUSTOM DECODE\n'
f'type-arg-> {obj_type}\n'
f'obj-arg-> `{obj}`: {type(obj)}\n'
)
nsp = None
# XXX, never happens right?
if obj_type is Raw:
breakpoint()
if (
obj_type is NamespacePath
and isinstance(obj, str)
and ':' in obj
):
nsp = NamespacePath(obj)
# TODO: we could built a generic handler using
# JUST matching the obj_type part?
# nsp = obj_type(obj)
if nsp:
print(f'Returning NSP instance: {nsp}')
return nsp
logmsg: str = (
f'{uid}\n'
'FAILED DECODE\n'
f'type-> {obj_type}\n'
f'obj-arg-> `{obj}`: {type(obj)}\n\n'
f'current codec:\n'
f'{current_codec()}\n'
)
# TODO: figure out the ignore subsys for this!
# -[ ] option whether to defense-relay backc the msg
# inside an `Invalid`/`Ignore`
# -[ ] how to make this handling pluggable such that a
# `Channel`/`MsgTransport` can intercept and process
# back msgs either via exception handling or some other
# signal?
log.warning(logmsg)
# NOTE: this delivers the invalid
# value up to `msgspec`'s decoding
# machinery for error raising.
return obj
# raise NotImplementedError(logmsg)
def ex_func(*args):
'''
A mod level func we can ref and load via our `NamespacePath`
python-object pointer `str` subtype.
'''
print(f'ex_func({args})')
@pytest.mark.parametrize(
'add_codec_hooks',
[
True,
False,
],
ids=['use_codec_hooks', 'no_codec_hooks'],
)
def test_custom_extension_types(
debug_mode: bool,
add_codec_hooks: bool
):
'''
Verify that a `MsgCodec` (used for encoding all outbound IPC msgs
and decoding all inbound `PayloadMsg`s) and a paired `MsgDec`
(used for decoding the `PayloadMsg.pld: Raw` received within a given
task's ipc `Context` scope) can both send and receive "extension types"
as supported via custom converter hooks passed to `msgspec`.
'''
nsp_pld_dec: MsgDec = mk_dec(
spec=None, # ONLY support the ext type
dec_hook=dec_nsp if add_codec_hooks else None,
ext_types=[NamespacePath],
)
nsp_codec: MsgCodec = mk_codec(
# ipc_pld_spec=Raw, # default!
# NOTE XXX: the encode hook MUST be used no matter what since
# our `NamespacePath` is not any of a `Any` native type nor
# a `msgspec.Struct` subtype - so `msgspec` has no way to know
# how to encode it unless we provide the custom hook.
#
# AGAIN that is, regardless of whether we spec an
# `Any`-decoded-pld the enc has no knowledge (by default)
# how to enc `NamespacePath` (nsp), so we add a custom
# hook to do that ALWAYS.
enc_hook=enc_nsp if add_codec_hooks else None,
# XXX NOTE: pretty sure this is mutex with the `type=` to
# `Decoder`? so it won't work in tandem with the
# `ipc_pld_spec` passed above?
ext_types=[NamespacePath],
# TODO? is it useful to have the `.pld` decoded *prior* to
# the `PldRx`?? like perf or mem related?
# ext_dec=nsp_pld_dec,
)
if add_codec_hooks:
assert nsp_codec.dec.dec_hook is None
# TODO? if we pass `ext_dec` above?
# assert nsp_codec.dec.dec_hook is dec_nsp
assert nsp_codec.enc.enc_hook is enc_nsp
nsp = NamespacePath.from_ref(ex_func)
try:
nsp_bytes: bytes = nsp_codec.encode(nsp)
nsp_rt_sin_msg = nsp_pld_dec.decode(nsp_bytes)
nsp_rt_sin_msg.load_ref() is ex_func
except TypeError:
if not add_codec_hooks:
pass
try:
msg_bytes: bytes = nsp_codec.encode(
Started(
cid='cid',
pld=nsp,
)
)
# since the ext-type obj should also be set as the msg.pld
assert nsp_bytes in msg_bytes
started_rt: Started = nsp_codec.decode(msg_bytes)
pld: Raw = started_rt.pld
assert isinstance(pld, Raw)
nsp_rt: NamespacePath = nsp_pld_dec.decode(pld)
assert isinstance(nsp_rt, NamespacePath)
# in obj comparison terms they should be the same
assert nsp_rt == nsp
# ensure we've decoded to ext type!
assert nsp_rt.load_ref() is ex_func
except TypeError:
if not add_codec_hooks:
pass
@tractor.context
async def sleep_forever_in_sub(
ctx: Context,
) -> None:
await trio.sleep_forever()
def mk_custom_codec(
add_hooks: bool,
) -> tuple[
MsgCodec, # encode to send
MsgDec, # pld receive-n-decode
]:
'''
Create custom `msgpack` enc/dec-hooks and set a `Decoder`
which only loads `pld_spec` (like `NamespacePath`) types.
'''
# XXX NOTE XXX: despite defining `NamespacePath` as a type
# field on our `PayloadMsg.pld`, we still need a enc/dec_hook() pair
# to cast to/from that type on the wire. See the docs:
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
# if pld_spec is Any:
# pld_spec = Raw
nsp_codec: MsgCodec = mk_codec(
# ipc_pld_spec=Raw, # default!
# NOTE XXX: the encode hook MUST be used no matter what since
# our `NamespacePath` is not any of a `Any` native type nor
# a `msgspec.Struct` subtype - so `msgspec` has no way to know
# how to encode it unless we provide the custom hook.
#
# AGAIN that is, regardless of whether we spec an
# `Any`-decoded-pld the enc has no knowledge (by default)
# how to enc `NamespacePath` (nsp), so we add a custom
# hook to do that ALWAYS.
enc_hook=enc_nsp if add_hooks else None,
# XXX NOTE: pretty sure this is mutex with the `type=` to
# `Decoder`? so it won't work in tandem with the
# `ipc_pld_spec` passed above?
ext_types=[NamespacePath],
)
# dec_hook=dec_nsp if add_hooks else None,
return nsp_codec
@pytest.mark.parametrize(
'limit_plds_args',
[
(
{'dec_hook': None, 'ext_types': None},
None,
),
(
{'dec_hook': dec_nsp, 'ext_types': None},
TypeError,
),
(
{'dec_hook': dec_nsp, 'ext_types': [NamespacePath]},
None,
),
(
{'dec_hook': dec_nsp, 'ext_types': [NamespacePath|None]},
None,
),
],
ids=[
'no_hook_no_ext_types',
'only_hook',
'hook_and_ext_types',
'hook_and_ext_types_w_null',
]
)
def test_pld_limiting_usage(
limit_plds_args: tuple[dict, Exception|None],
):
'''
Verify `dec_hook()` and `ext_types` need to either both be
provided or we raise a explanator type-error.
'''
kwargs, maybe_err = limit_plds_args
async def main():
async with tractor.open_nursery() as an: # just to open runtime
# XXX SHOULD NEVER WORK outside an ipc ctx scope!
try:
with limit_plds(**kwargs):
pass
except RuntimeError:
pass
p: tractor.Portal = await an.start_actor(
'sub',
enable_modules=[__name__],
)
async with (
p.open_context(
sleep_forever_in_sub
) as (ctx, first),
):
try:
with limit_plds(**kwargs):
pass
except maybe_err as exc:
assert type(exc) is maybe_err
pass
def chk_codec_applied(
expect_codec: MsgCodec|None,
enter_value: MsgCodec|None = None,
) -> MsgCodec:
'''
buncha sanity checks ensuring that the IPC channel's
context-vars are set to the expected codec and that are
ctx-var wrapper APIs match the same.
'''
# TODO: play with tricyle again, bc this is supposed to work
# the way we want?
#
# TreeVar
# task: trio.Task = trio.lowlevel.current_task()
# curr_codec = _ctxvar_MsgCodec.get_in(task)
# ContextVar
# task_ctx: Context = task.context
# assert _ctxvar_MsgCodec in task_ctx
# curr_codec: MsgCodec = task.context[_ctxvar_MsgCodec]
if expect_codec is None:
assert enter_value is None
return
# NOTE: currently we use this!
# RunVar
curr_codec: MsgCodec = current_codec()
last_read_codec = _ctxvar_MsgCodec.get()
# assert curr_codec is last_read_codec
assert (
(same_codec := expect_codec) is
# returned from `mk_codec()`
# yielded value from `apply_codec()`
# read from current task's `contextvars.Context`
curr_codec is
last_read_codec
# the default `msgspec` settings
is not _codec._def_msgspec_codec
is not _codec._def_tractor_codec
)
if enter_value:
assert enter_value is same_codec
@tractor.context
async def send_back_values(
ctx: Context,
rent_pld_spec_type_strs: list[str],
add_hooks: bool,
) -> None:
'''
Setup up a custom codec to load instances of `NamespacePath`
and ensure we can round trip a func ref with our parent.
'''
uid: tuple = tractor.current_actor().uid
# init state in sub-actor should be default
chk_codec_applied(
expect_codec=_codec._def_tractor_codec,
)
# load pld spec from input str
rent_pld_spec = _exts.dec_type_union(
rent_pld_spec_type_strs,
mods=[
importlib.import_module(__name__),
],
)
rent_pld_spec_types: set[Type] = _codec.unpack_spec_types(
rent_pld_spec,
)
# ONLY add ext-hooks if the rent specified a non-std type!
add_hooks: bool = (
NamespacePath in rent_pld_spec_types
and
add_hooks
)
# same as on parent side config.
nsp_codec: MsgCodec|None = None
if add_hooks:
nsp_codec = mk_codec(
enc_hook=enc_nsp,
ext_types=[NamespacePath],
)
with (
maybe_apply_codec(nsp_codec) as codec,
limit_plds(
rent_pld_spec,
dec_hook=dec_nsp if add_hooks else None,
ext_types=[NamespacePath] if add_hooks else None,
) as pld_dec,
):
# ?XXX? SHOULD WE NOT be swapping the global codec since it
# breaks `Context.started()` roundtripping checks??
chk_codec_applied(
expect_codec=nsp_codec,
enter_value=codec,
)
# ?TODO, mismatch case(s)?
#
# ensure pld spec matches on both sides
ctx_pld_dec: MsgDec = ctx._pld_rx._pld_dec
assert pld_dec is ctx_pld_dec
child_pld_spec: Type = pld_dec.spec
child_pld_spec_types: set[Type] = _codec.unpack_spec_types(
child_pld_spec,
)
assert (
child_pld_spec_types.issuperset(
rent_pld_spec_types
)
)
# ?TODO, try loop for each of the types in pld-superset?
#
# for send_value in [
# nsp,
# str(nsp),
# None,
# ]:
nsp = NamespacePath.from_ref(ex_func)
try:
print(
f'{uid}: attempting to `.started({nsp})`\n'
f'\n'
f'rent_pld_spec: {rent_pld_spec}\n'
f'child_pld_spec: {child_pld_spec}\n'
f'codec: {codec}\n'
)
# await tractor.pause()
await ctx.started(nsp)
except tractor.MsgTypeError as _mte:
mte = _mte
# false -ve case
if add_hooks:
raise RuntimeError(
f'EXPECTED to `.started()` value given spec ??\n\n'
f'child_pld_spec -> {child_pld_spec}\n'
f'value = {nsp}: {type(nsp)}\n'
)
# true -ve case
raise mte
# TODO: maybe we should add our own wrapper error so as to
# be interchange-lib agnostic?
# -[ ] the error type is wtv is raised from the hook so we
# could also require a type-class of errors for
# indicating whether the hook-failure can be handled by
# a nasty-dialog-unprot sub-sys?
except TypeError as typerr:
# false -ve
if add_hooks:
raise RuntimeError('Should have been able to send `nsp`??')
# true -ve
print('Failed to send `nsp` due to no ext hooks set!')
raise typerr
# now try sending a set of valid and invalid plds to ensure
# the pld spec is respected.
sent: list[Any] = []
async with ctx.open_stream() as ipc:
print(
f'{uid}: streaming all pld types to rent..'
)
# for send_value, expect_send in iter_send_val_items:
for send_value in [
nsp,
str(nsp),
None,
]:
send_type: Type = type(send_value)
print(
f'{uid}: SENDING NEXT pld\n'
f'send_type: {send_type}\n'
f'send_value: {send_value}\n'
)
try:
await ipc.send(send_value)
sent.append(send_value)
except ValidationError as valerr:
print(f'{uid} FAILED TO SEND {send_value}!')
# false -ve
if add_hooks:
raise RuntimeError(
f'EXPECTED to roundtrip value given spec:\n'
f'rent_pld_spec -> {rent_pld_spec}\n'
f'child_pld_spec -> {child_pld_spec}\n'
f'value = {send_value}: {send_type}\n'
)
# true -ve
raise valerr
# continue
else:
print(
f'{uid}: finished sending all values\n'
'Should be exiting stream block!\n'
)
print(f'{uid}: exited streaming block!')
@cm
def maybe_apply_codec(codec: MsgCodec|None) -> MsgCodec|None:
if codec is None:
yield None
return
with apply_codec(codec) as codec:
yield codec
@pytest.mark.parametrize(
'pld_spec',
[
Any,
NamespacePath,
NamespacePath|None, # the "maybe" spec Bo
],
ids=[
'any_type',
'only_nsp_ext',
'maybe_nsp_ext',
]
)
@pytest.mark.parametrize(
'add_hooks',
[
True,
False,
],
ids=[
'use_codec_hooks',
'no_codec_hooks',
],
)
def test_ext_types_over_ipc(
debug_mode: bool,
pld_spec: Union[Type],
add_hooks: bool,
):
'''
Ensure we can support extension types coverted using
`enc/dec_hook()`s passed to the `.msg.limit_plds()` API
and that sane errors happen when we try do the same without
the codec hooks.
'''
pld_types: set[Type] = _codec.unpack_spec_types(pld_spec)
async def main():
# sanity check the default pld-spec beforehand
chk_codec_applied(
expect_codec=_codec._def_tractor_codec,
)
# extension type we want to send as msg payload
nsp = NamespacePath.from_ref(ex_func)
# ^NOTE, 2 cases:
# - codec hooks noto added -> decode nsp as `str`
# - codec with hooks -> decode nsp as `NamespacePath`
nsp_codec: MsgCodec|None = None
if (
NamespacePath in pld_types
and
add_hooks
):
nsp_codec = mk_codec(
enc_hook=enc_nsp,
ext_types=[NamespacePath],
)
async with tractor.open_nursery(
debug_mode=debug_mode,
) as an:
p: tractor.Portal = await an.start_actor(
'sub',
enable_modules=[__name__],
)
with (
maybe_apply_codec(nsp_codec) as codec,
):
chk_codec_applied(
expect_codec=nsp_codec,
enter_value=codec,
)
rent_pld_spec_type_strs: list[str] = _exts.enc_type_union(pld_spec)
# XXX should raise an mte (`MsgTypeError`)
# when `add_hooks == False` bc the input
# `expect_ipc_send` kwarg has a nsp which can't be
# serialized!
#
# TODO:can we ensure this happens from the
# `Return`-side (aka the sub) as well?
try:
ctx: tractor.Context
ipc: tractor.MsgStream
async with (
# XXX should raise an mte (`MsgTypeError`)
# when `add_hooks == False`..
p.open_context(
send_back_values,
# expect_debug=debug_mode,
rent_pld_spec_type_strs=rent_pld_spec_type_strs,
add_hooks=add_hooks,
# expect_ipc_send=expect_ipc_send,
) as (ctx, first),
ctx.open_stream() as ipc,
):
with (
limit_plds(
pld_spec,
dec_hook=dec_nsp if add_hooks else None,
ext_types=[NamespacePath] if add_hooks else None,
) as pld_dec,
):
ctx_pld_dec: MsgDec = ctx._pld_rx._pld_dec
assert pld_dec is ctx_pld_dec
# if (
# not add_hooks
# and
# NamespacePath in
# ):
# pytest.fail('ctx should fail to open without custom enc_hook!?')
await ipc.send(nsp)
nsp_rt = await ipc.receive()
assert nsp_rt == nsp
assert nsp_rt.load_ref() is ex_func
# this test passes bc we can go no further!
except MsgTypeError as mte:
# if not add_hooks:
# # teardown nursery
# await p.cancel_actor()
# return
raise mte
await p.cancel_actor()
if (
NamespacePath in pld_types
and
add_hooks
):
trio.run(main)
else:
with pytest.raises(
expected_exception=tractor.RemoteActorError,
) as excinfo:
trio.run(main)
exc = excinfo.value
# bc `.started(nsp: NamespacePath)` will raise
assert exc.boxed_type is TypeError
# def chk_pld_type(
# payload_spec: Type[Struct]|Any,
# pld: Any,
# expect_roundtrip: bool|None = None,
# ) -> bool:
# pld_val_type: Type = type(pld)
# # TODO: verify that the overridden subtypes
# # DO NOT have modified type-annots from original!
# # 'Start', .pld: FuncSpec
# # 'StartAck', .pld: IpcCtxSpec
# # 'Stop', .pld: UNSEt
# # 'Error', .pld: ErrorData
# codec: MsgCodec = mk_codec(
# # NOTE: this ONLY accepts `PayloadMsg.pld` fields of a specified
# # type union.
# ipc_pld_spec=payload_spec,
# )
# # make a one-off dec to compare with our `MsgCodec` instance
# # which does the below `mk_msg_spec()` call internally
# ipc_msg_spec: Union[Type[Struct]]
# msg_types: list[PayloadMsg[payload_spec]]
# (
# ipc_msg_spec,
# msg_types,
# ) = mk_msg_spec(
# payload_type_union=payload_spec,
# )
# _enc = msgpack.Encoder()
# _dec = msgpack.Decoder(
# type=ipc_msg_spec or Any, # like `PayloadMsg[Any]`
# )
# assert (
# payload_spec
# ==
# codec.pld_spec
# )
# # assert codec.dec == dec
# #
# # ^-XXX-^ not sure why these aren't "equal" but when cast
# # to `str` they seem to match ?? .. kk
# assert (
# str(ipc_msg_spec)
# ==
# str(codec.msg_spec)
# ==
# str(_dec.type)
# ==
# str(codec.dec.type)
# )
# # verify the boxed-type for all variable payload-type msgs.
# if not msg_types:
# breakpoint()
# roundtrip: bool|None = None
# pld_spec_msg_names: list[str] = [
# td.__name__ for td in _payload_msgs
# ]
# for typedef in msg_types:
# skip_runtime_msg: bool = typedef.__name__ not in pld_spec_msg_names
# if skip_runtime_msg:
# continue
# pld_field = structs.fields(typedef)[1]
# assert pld_field.type is payload_spec # TODO-^ does this need to work to get all subtypes to adhere?
# kwargs: dict[str, Any] = {
# 'cid': '666',
# 'pld': pld,
# }
# enc_msg: PayloadMsg = typedef(**kwargs)
# _wire_bytes: bytes = _enc.encode(enc_msg)
# wire_bytes: bytes = codec.enc.encode(enc_msg)
# assert _wire_bytes == wire_bytes
# ve: ValidationError|None = None
# try:
# dec_msg = codec.dec.decode(wire_bytes)
# _dec_msg = _dec.decode(wire_bytes)
# # decoded msg and thus payload should be exactly same!
# assert (roundtrip := (
# _dec_msg
# ==
# dec_msg
# ==
# enc_msg
# ))
# if (
# expect_roundtrip is not None
# and expect_roundtrip != roundtrip
# ):
# breakpoint()
# assert (
# pld
# ==
# dec_msg.pld
# ==
# enc_msg.pld
# )
# # assert (roundtrip := (_dec_msg == enc_msg))
# except ValidationError as _ve:
# ve = _ve
# roundtrip: bool = False
# if pld_val_type is payload_spec:
# raise ValueError(
# 'Got `ValidationError` despite type-var match!?\n'
# f'pld_val_type: {pld_val_type}\n'
# f'payload_type: {payload_spec}\n'
# ) from ve
# else:
# # ow we good cuz the pld spec mismatched.
# print(
# 'Got expected `ValidationError` since,\n'
# f'{pld_val_type} is not {payload_spec}\n'
# )
# else:
# if (
# payload_spec is not Any
# and
# pld_val_type is not payload_spec
# ):
# raise ValueError(
# 'DID NOT `ValidationError` despite expected type match!?\n'
# f'pld_val_type: {pld_val_type}\n'
# f'payload_type: {payload_spec}\n'
# )
# # full code decode should always be attempted!
# if roundtrip is None:
# breakpoint()
# return roundtrip
# ?TODO? maybe remove since covered in the newer `test_pldrx_limiting`
# via end-2-end testing of all this?
# -[ ] IOW do we really NEED this lowlevel unit testing?
#
# def test_limit_msgspec(
# debug_mode: bool,
# ):
# '''
# Internals unit testing to verify that type-limiting an IPC ctx's
# msg spec with `Pldrx.limit_plds()` results in various
# encapsulated `msgspec` object settings and state.
# '''
# async def main():
# async with tractor.open_root_actor(
# debug_mode=debug_mode,
# ):
# # ensure we can round-trip a boxing `PayloadMsg`
# assert chk_pld_type(
# payload_spec=Any,
# pld=None,
# expect_roundtrip=True,
# )
# # verify that a mis-typed payload value won't decode
# assert not chk_pld_type(
# payload_spec=int,
# pld='doggy',
# )
# # parametrize the boxed `.pld` type as a custom-struct
# # and ensure that parametrization propagates
# # to all payload-msg-spec-able subtypes!
# class CustomPayload(Struct):
# name: str
# value: Any
# assert not chk_pld_type(
# payload_spec=CustomPayload,
# pld='doggy',
# )
# assert chk_pld_type(
# payload_spec=CustomPayload,
# pld=CustomPayload(name='doggy', value='urmom')
# )
# # yah, we can `.pause_from_sync()` now!
# # breakpoint()
# trio.run(main)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -7,24 +7,31 @@ import pytest
import trio
import tractor
from tractor._testing import tractor_test
from conftest import tractor_test
@pytest.mark.trio
async def test_no_runtime():
async def test_no_arbitter():
"""An arbitter must be established before any nurseries
can be created.
(In other words ``tractor.open_root_actor()`` must be engaged at
some point?)
"""
with pytest.raises(RuntimeError) :
async with tractor.find_actor('doggy'):
with pytest.raises(RuntimeError):
with tractor.open_nursery():
pass
def test_no_main():
"""An async function **must** be passed to ``tractor.run()``.
"""
with pytest.raises(TypeError):
tractor.run(None)
@tractor_test
async def test_self_is_registered(reg_addr):
async def test_self_is_registered(arb_addr):
"Verify waiting on the arbiter to register itself using the standard api."
actor = tractor.current_actor()
assert actor.is_arbiter
@ -34,20 +41,20 @@ async def test_self_is_registered(reg_addr):
@tractor_test
async def test_self_is_registered_localportal(reg_addr):
async def test_self_is_registered_localportal(arb_addr):
"Verify waiting on the arbiter to register itself using a local portal."
actor = tractor.current_actor()
assert actor.is_arbiter
async with tractor.get_registry(*reg_addr) as portal:
async with tractor.get_arbiter(*arb_addr) as portal:
assert isinstance(portal, tractor._portal.LocalPortal)
with trio.fail_after(0.2):
sockaddr = await portal.run_from_ns(
'self', 'wait_for_actor', name='root')
assert sockaddr[0] == reg_addr
assert sockaddr[0] == arb_addr
def test_local_actor_async_func(reg_addr):
def test_local_actor_async_func(arb_addr):
"""Verify a simple async function in-process.
"""
nums = []
@ -55,7 +62,7 @@ def test_local_actor_async_func(reg_addr):
async def print_loop():
async with tractor.open_root_actor(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
):
# arbiter is started in-proc if dne
assert tractor.current_actor().is_arbiter

View File

@ -7,10 +7,8 @@ import time
import pytest
import trio
import tractor
from tractor._testing import (
from conftest import (
tractor_test,
)
from .conftest import (
sig_prog,
_INT_SIGNAL,
_INT_RETURN_CODE,
@ -30,9 +28,9 @@ def test_abort_on_sigint(daemon):
@tractor_test
async def test_cancel_remote_arbiter(daemon, reg_addr):
async def test_cancel_remote_arbiter(daemon, arb_addr):
assert not tractor.current_actor().is_arbiter
async with tractor.get_registry(*reg_addr) as portal:
async with tractor.get_arbiter(*arb_addr) as portal:
await portal.cancel_actor()
time.sleep(0.1)
@ -41,16 +39,16 @@ async def test_cancel_remote_arbiter(daemon, reg_addr):
# no arbiter socket should exist
with pytest.raises(OSError):
async with tractor.get_registry(*reg_addr) as portal:
async with tractor.get_arbiter(*arb_addr) as portal:
pass
def test_register_duplicate_name(daemon, reg_addr):
def test_register_duplicate_name(daemon, arb_addr):
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
) as n:
assert not tractor.current_actor().is_arbiter

View File

@ -1,364 +0,0 @@
'''
Audit sub-sys APIs from `.msg._ops`
mostly for ensuring correct `contextvars`
related settings around IPC contexts.
'''
from contextlib import (
asynccontextmanager as acm,
)
from msgspec import (
Struct,
)
import pytest
import trio
import tractor
from tractor import (
Context,
MsgTypeError,
current_ipc_ctx,
Portal,
)
from tractor.msg import (
_ops as msgops,
Return,
)
from tractor.msg import (
_codec,
)
from tractor.msg.types import (
log,
)
class PldMsg(
Struct,
# TODO: with multiple structs in-spec we need to tag them!
# -[ ] offer a built-in `PldMsg` type to inherit from which takes
# case of these details?
#
# https://jcristharif.com/msgspec/structs.html#tagged-unions
# tag=True,
# tag_field='msg_type',
):
field: str
maybe_msg_spec = PldMsg|None
@acm
async def maybe_expect_raises(
raises: BaseException|None = None,
ensure_in_message: list[str]|None = None,
post_mortem: bool = False,
timeout: int = 3,
) -> None:
'''
Async wrapper for ensuring errors propagate from the inner scope.
'''
if tractor._state.debug_mode():
timeout += 999
with trio.fail_after(timeout):
try:
yield
except BaseException as _inner_err:
inner_err = _inner_err
# wasn't-expected to error..
if raises is None:
raise
else:
assert type(inner_err) is raises
# maybe check for error txt content
if ensure_in_message:
part: str
err_repr: str = repr(inner_err)
for part in ensure_in_message:
for i, arg in enumerate(inner_err.args):
if part in err_repr:
break
# if part never matches an arg, then we're
# missing a match.
else:
raise ValueError(
'Failed to find error message content?\n\n'
f'expected: {ensure_in_message!r}\n'
f'part: {part!r}\n\n'
f'{inner_err.args}'
)
if post_mortem:
await tractor.post_mortem()
else:
if raises:
raise RuntimeError(
f'Expected a {raises.__name__!r} to be raised?'
)
@tractor.context(
pld_spec=maybe_msg_spec,
)
async def child(
ctx: Context,
started_value: int|PldMsg|None,
return_value: str|None,
validate_pld_spec: bool,
raise_on_started_mte: bool = True,
) -> None:
'''
Call ``Context.started()`` more then once (an error).
'''
expect_started_mte: bool = started_value == 10
# sanaity check that child RPC context is the current one
curr_ctx: Context = current_ipc_ctx()
assert ctx is curr_ctx
rx: msgops.PldRx = ctx._pld_rx
curr_pldec: _codec.MsgDec = rx.pld_dec
ctx_meta: dict = getattr(
child,
'_tractor_context_meta',
None,
)
if ctx_meta:
assert (
ctx_meta['pld_spec']
is curr_pldec.spec
is curr_pldec.pld_spec
)
# 2 cases: hdndle send-side and recv-only validation
# - when `raise_on_started_mte == True`, send validate
# - else, parent-recv-side only validation
mte: MsgTypeError|None = None
try:
await ctx.started(
value=started_value,
validate_pld_spec=validate_pld_spec,
)
except MsgTypeError as _mte:
mte = _mte
log.exception('started()` raised an MTE!\n')
if not expect_started_mte:
raise RuntimeError(
'Child-ctx-task SHOULD NOT HAVE raised an MTE for\n\n'
f'{started_value!r}\n'
)
boxed_div: str = '------ - ------'
assert boxed_div not in mte._message
assert boxed_div not in mte.tb_str
assert boxed_div not in repr(mte)
assert boxed_div not in str(mte)
mte_repr: str = repr(mte)
for line in mte.message.splitlines():
assert line in mte_repr
# since this is a *local error* there should be no
# boxed traceback content!
assert not mte.tb_str
# propagate to parent?
if raise_on_started_mte:
raise
# no-send-side-error fallthrough
if (
validate_pld_spec
and
expect_started_mte
):
raise RuntimeError(
'Child-ctx-task SHOULD HAVE raised an MTE for\n\n'
f'{started_value!r}\n'
)
assert (
not expect_started_mte
or
not validate_pld_spec
)
# if wait_for_parent_to_cancel:
# ...
#
# ^-TODO-^ logic for diff validation policies on each side:
#
# -[ ] ensure that if we don't validate on the send
# side, that we are eventually error-cancelled by our
# parent due to the bad `Started` payload!
# -[ ] the boxed error should be srced from the parent's
# runtime NOT ours!
# -[ ] we should still error on bad `return_value`s
# despite the parent not yet error-cancelling us?
# |_ how do we want the parent side to look in that
# case?
# -[ ] maybe the equiv of "during handling of the
# above error another occurred" for the case where
# the parent sends a MTE to this child and while
# waiting for the child to terminate it gets back
# the MTE for this case?
#
# XXX should always fail on recv side since we can't
# really do much else beside terminate and relay the
# msg-type-error from this RPC task ;)
return return_value
@pytest.mark.parametrize(
'return_value',
[
'yo',
None,
],
ids=[
'return[invalid-"yo"]',
'return[valid-None]',
],
)
@pytest.mark.parametrize(
'started_value',
[
10,
PldMsg(field='yo'),
],
ids=[
'Started[invalid-10]',
'Started[valid-PldMsg]',
],
)
@pytest.mark.parametrize(
'pld_check_started_value',
[
True,
False,
],
ids=[
'check-started-pld',
'no-started-pld-validate',
],
)
def test_basic_payload_spec(
debug_mode: bool,
loglevel: str,
return_value: str|None,
started_value: int|PldMsg,
pld_check_started_value: bool,
):
'''
Validate the most basic `PldRx` msg-type-spec semantics around
a IPC `Context` endpoint start, started-sync, and final return
value depending on set payload types and the currently applied
pld-spec.
'''
invalid_return: bool = return_value == 'yo'
invalid_started: bool = started_value == 10
async def main():
async with tractor.open_nursery(
debug_mode=debug_mode,
loglevel=loglevel,
) as an:
p: Portal = await an.start_actor(
'child',
enable_modules=[__name__],
)
# since not opened yet.
assert current_ipc_ctx() is None
if invalid_started:
msg_type_str: str = 'Started'
bad_value: int = 10
elif invalid_return:
msg_type_str: str = 'Return'
bad_value: str = 'yo'
else:
# XXX but should never be used below then..
msg_type_str: str = ''
bad_value: str = ''
maybe_mte: MsgTypeError|None = None
should_raise: Exception|None = (
MsgTypeError if (
invalid_return
or
invalid_started
) else None
)
async with (
maybe_expect_raises(
raises=should_raise,
ensure_in_message=[
f"invalid `{msg_type_str}` msg payload",
f'{bad_value}',
f'has type {type(bad_value)!r}',
'not match type-spec',
f'`{msg_type_str}.pld: PldMsg|NoneType`',
],
# only for debug
# post_mortem=True,
),
p.open_context(
child,
return_value=return_value,
started_value=started_value,
validate_pld_spec=pld_check_started_value,
) as (ctx, first),
):
# now opened with 'child' sub
assert current_ipc_ctx() is ctx
assert type(first) is PldMsg
assert first.field == 'yo'
try:
res: None|PldMsg = await ctx.result(hide_tb=False)
assert res is None
except MsgTypeError as mte:
maybe_mte = mte
if not invalid_return:
raise
# expected this invalid `Return.pld` so audit
# the error state + meta-data
assert mte.expected_msg_type is Return
assert mte.cid == ctx.cid
mte_repr: str = repr(mte)
for line in mte.message.splitlines():
assert line in mte_repr
assert mte.tb_str
# await tractor.pause(shield=True)
# verify expected remote mte deats
assert ctx._local_error is None
assert (
mte is
ctx._remote_error is
ctx.maybe_error is
ctx.outcome
)
if should_raise is None:
assert maybe_mte is None
await p.cancel_actor()
trio.run(main)

View File

@ -4,8 +4,8 @@ from itertools import cycle
import pytest
import trio
import tractor
from tractor.testing import tractor_test
from tractor.experimental import msgpub
from tractor._testing import tractor_test
def test_type_checks():
@ -159,7 +159,7 @@ async def test_required_args(callwith_expecterror):
)
def test_multi_actor_subs_arbiter_pub(
loglevel,
reg_addr,
arb_addr,
pub_actor,
):
"""Try out the neato @pub decorator system.
@ -169,7 +169,7 @@ def test_multi_actor_subs_arbiter_pub(
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
enable_modules=[__name__],
) as n:
@ -254,12 +254,12 @@ def test_multi_actor_subs_arbiter_pub(
def test_single_subactor_pub_multitask_subs(
loglevel,
reg_addr,
arb_addr,
):
async def main():
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
enable_modules=[__name__],
) as n:

View File

@ -34,6 +34,7 @@ def test_resource_only_entered_once(key_on):
global _resource
_resource = 0
kwargs = {}
key = None
if key_on == 'key_value':
key = 'some_common_key'
@ -138,7 +139,7 @@ def test_open_local_sub_to_stream():
N local tasks using ``trionics.maybe_open_context():``.
'''
timeout: float = 3.6 if platform.system() != "Windows" else 10
timeout = 3 if platform.system() != "Windows" else 10
async def main():

View File

@ -1,248 +0,0 @@
'''
Special attention cases for using "infect `asyncio`" mode from a root
actor; i.e. not using a std `trio.run()` bootstrap.
'''
import asyncio
from functools import partial
import pytest
import trio
import tractor
from tractor import (
to_asyncio,
)
from tests.test_infected_asyncio import (
aio_echo_server,
)
@pytest.mark.parametrize(
'raise_error_mid_stream',
[
False,
Exception,
KeyboardInterrupt,
],
ids='raise_error={}'.format,
)
def test_infected_root_actor(
raise_error_mid_stream: bool|Exception,
# conftest wide
loglevel: str,
debug_mode: bool,
):
'''
Verify you can run the `tractor` runtime with `Actor.is_infected_aio() == True`
in the root actor.
'''
async def _trio_main():
with trio.fail_after(2 if not debug_mode else 999):
first: str
chan: to_asyncio.LinkedTaskChannel
async with (
tractor.open_root_actor(
debug_mode=debug_mode,
loglevel=loglevel,
),
to_asyncio.open_channel_from(
aio_echo_server,
) as (first, chan),
):
assert first == 'start'
for i in range(1000):
await chan.send(i)
out = await chan.receive()
assert out == i
print(f'asyncio echoing {i}')
if (
raise_error_mid_stream
and
i == 500
):
raise raise_error_mid_stream
if out is None:
try:
out = await chan.receive()
except trio.EndOfChannel:
break
else:
raise RuntimeError(
'aio channel never stopped?'
)
if raise_error_mid_stream:
with pytest.raises(raise_error_mid_stream):
tractor.to_asyncio.run_as_asyncio_guest(
trio_main=_trio_main,
)
else:
tractor.to_asyncio.run_as_asyncio_guest(
trio_main=_trio_main,
)
async def sync_and_err(
# just signature placeholders for compat with
# ``to_asyncio.open_channel_from()``
to_trio: trio.MemorySendChannel,
from_trio: asyncio.Queue,
ev: asyncio.Event,
):
if to_trio:
to_trio.send_nowait('start')
await ev.wait()
raise RuntimeError('asyncio-side')
@pytest.mark.parametrize(
'aio_err_trigger',
[
'before_start_point',
'after_trio_task_starts',
'after_start_point',
],
ids='aio_err_triggered={}'.format
)
def test_trio_prestarted_task_bubbles(
aio_err_trigger: str,
# conftest wide
loglevel: str,
debug_mode: bool,
):
async def pre_started_err(
raise_err: bool = False,
pre_sleep: float|None = None,
aio_trigger: asyncio.Event|None = None,
task_status=trio.TASK_STATUS_IGNORED,
):
'''
Maybe pre-started error then sleep.
'''
if pre_sleep is not None:
print(f'Sleeping from trio for {pre_sleep!r}s !')
await trio.sleep(pre_sleep)
# signal aio-task to raise JUST AFTER this task
# starts but has not yet `.started()`
if aio_trigger:
print('Signalling aio-task to raise from `trio`!!')
aio_trigger.set()
if raise_err:
print('Raising from trio!')
raise TypeError('trio-side')
task_status.started()
await trio.sleep_forever()
async def _trio_main():
# with trio.fail_after(2):
with trio.fail_after(999):
first: str
chan: to_asyncio.LinkedTaskChannel
aio_ev = asyncio.Event()
async with (
tractor.open_root_actor(
debug_mode=False,
loglevel=loglevel,
),
):
# TODO, tests for this with 3.13 egs?
# from tractor.devx import open_crash_handler
# with open_crash_handler():
async with (
# where we'll start a sub-task that errors BEFORE
# calling `.started()` such that the error should
# bubble before the guest run terminates!
trio.open_nursery() as tn,
# THEN start an infect task which should error just
# after the trio-side's task does.
to_asyncio.open_channel_from(
partial(
sync_and_err,
ev=aio_ev,
)
) as (first, chan),
):
for i in range(5):
pre_sleep: float|None = None
last_iter: bool = (i == 4)
# TODO, missing cases?
# -[ ] error as well on
# 'after_start_point' case as well for
# another case?
raise_err: bool = False
if last_iter:
raise_err: bool = True
# trigger aio task to error on next loop
# tick/checkpoint
if aio_err_trigger == 'before_start_point':
aio_ev.set()
pre_sleep: float = 0
await tn.start(
pre_started_err,
raise_err,
pre_sleep,
(aio_ev if (
aio_err_trigger == 'after_trio_task_starts'
and
last_iter
) else None
),
)
if (
aio_err_trigger == 'after_start_point'
and
last_iter
):
aio_ev.set()
with pytest.raises(
expected_exception=ExceptionGroup,
) as excinfo:
tractor.to_asyncio.run_as_asyncio_guest(
trio_main=_trio_main,
)
eg = excinfo.value
rte_eg, rest_eg = eg.split(RuntimeError)
# ensure the trio-task's error bubbled despite the aio-side
# having (maybe) errored first.
if aio_err_trigger in (
'after_trio_task_starts',
'after_start_point',
):
assert len(errs := rest_eg.exceptions) == 1
typerr = errs[0]
assert (
type(typerr) is TypeError
and
'trio-side' in typerr.args
)
# when aio errors BEFORE (last) trio task is scheduled, we should
# never see anythinb but the aio-side.
else:
assert len(rtes := rte_eg.exceptions) == 1
assert 'asyncio-side' in rtes[0].args[0]

View File

@ -1,8 +1,6 @@
'''
RPC (or maybe better labelled as "RTS: remote task scheduling"?)
related API and error checks.
'''
"""
RPC related
"""
import itertools
import pytest
@ -15,19 +13,9 @@ async def sleep_back_actor(
func_name,
func_defined,
exposed_mods,
*,
reg_addr: tuple,
):
if actor_name:
async with tractor.find_actor(
actor_name,
# NOTE: must be set manually since
# the subactor doesn't have the reg_addr
# fixture code run in it!
# TODO: maybe we should just set this once in the
# _state mod and derive to all children?
registry_addrs=[reg_addr],
) as portal:
async with tractor.find_actor(actor_name) as portal:
try:
await portal.run(__name__, func_name)
except tractor.RemoteActorError as err:
@ -36,7 +24,7 @@ async def sleep_back_actor(
if not exposed_mods:
expect = tractor.ModuleNotExposed
assert err.boxed_type is expect
assert err.type is expect
raise
else:
await trio.sleep(float('inf'))
@ -54,25 +42,14 @@ async def short_sleep():
(['tmp_mod'], 'import doggy', ModuleNotFoundError),
(['tmp_mod'], '4doggy', SyntaxError),
],
ids=[
'no_mods',
'this_mod',
'this_mod_bad_func',
'fail_to_import',
'fail_on_syntax',
],
ids=['no_mods', 'this_mod', 'this_mod_bad_func', 'fail_to_import',
'fail_on_syntax'],
)
def test_rpc_errors(
reg_addr,
to_call,
testdir,
):
'''
Test errors when making various RPC requests to an actor
def test_rpc_errors(arb_addr, to_call, testdir):
"""Test errors when making various RPC requests to an actor
that either doesn't have the requested module exposed or doesn't define
the named function.
'''
"""
exposed_mods, funcname, inside_err = to_call
subactor_exposed_mods = []
func_defined = globals().get(funcname, False)
@ -100,13 +77,8 @@ def test_rpc_errors(
# spawn a subactor which calls us back
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
enable_modules=exposed_mods.copy(),
# NOTE: will halt test in REPL if uncommented, so only
# do that if actually debugging subactor but keep it
# disabled for the test.
# debug_mode=True,
) as n:
actor = tractor.current_actor()
@ -123,7 +95,6 @@ def test_rpc_errors(
exposed_mods=exposed_mods,
func_defined=True if func_defined else False,
enable_modules=subactor_exposed_mods,
reg_addr=reg_addr,
)
def run():
@ -134,20 +105,18 @@ def test_rpc_errors(
run()
else:
# underlying errors aren't propagated upwards (yet)
with pytest.raises(
expected_exception=(remote_err, ExceptionGroup),
) as err:
with pytest.raises(remote_err) as err:
run()
# get raw instance from pytest wrapper
value = err.value
# might get multiple `trio.Cancelled`s as well inside an inception
if isinstance(value, ExceptionGroup):
if isinstance(value, trio.MultiError):
value = next(itertools.dropwhile(
lambda exc: not isinstance(exc, tractor.RemoteActorError),
value.exceptions
))
if getattr(value, 'type', None):
assert value.boxed_type is inside_err
assert value.type is inside_err

View File

@ -1,74 +0,0 @@
"""
Verifying internal runtime state and undocumented extras.
"""
import os
import pytest
import trio
import tractor
from tractor._testing import tractor_test
_file_path: str = ''
def unlink_file():
print('Removing tmp file!')
os.remove(_file_path)
async def crash_and_clean_tmpdir(
tmp_file_path: str,
error: bool = True,
):
global _file_path
_file_path = tmp_file_path
actor = tractor.current_actor()
actor.lifetime_stack.callback(unlink_file)
assert os.path.isfile(tmp_file_path)
await trio.sleep(0.1)
if error:
assert 0
else:
actor.cancel_soon()
@pytest.mark.parametrize(
'error_in_child',
[True, False],
)
@tractor_test
async def test_lifetime_stack_wipes_tmpfile(
tmp_path,
error_in_child: bool,
):
child_tmp_file = tmp_path / "child.txt"
child_tmp_file.touch()
assert child_tmp_file.exists()
path = str(child_tmp_file)
try:
with trio.move_on_after(0.5):
async with tractor.open_nursery() as n:
await ( # inlined portal
await n.run_in_actor(
crash_and_clean_tmpdir,
tmp_file_path=path,
error=error_in_child,
)
).result()
except (
tractor.RemoteActorError,
# tractor.BaseExceptionGroup,
BaseExceptionGroup,
):
pass
# tmp file should have been wiped by
# teardown stack.
assert not child_tmp_file.exists()

View File

@ -1,38 +1,38 @@
"""
Spawning basics
"""
from typing import (
Any,
)
from typing import Dict, Tuple, Optional
import pytest
import trio
import tractor
from tractor._testing import tractor_test
from conftest import tractor_test
data_to_pass_down = {'doggy': 10, 'kitty': 4}
async def spawn(
is_arbiter: bool,
data: dict,
reg_addr: tuple[str, int],
data: Dict,
arb_addr: Tuple[str, int],
):
namespaces = [__name__]
await trio.sleep(0.1)
async with tractor.open_root_actor(
arbiter_addr=reg_addr,
arbiter_addr=arb_addr,
):
actor = tractor.current_actor()
assert actor.is_arbiter == is_arbiter
data = data_to_pass_down
if actor.is_arbiter:
async with tractor.open_nursery() as nursery:
async with tractor.open_nursery(
) as nursery:
# forks here
portal = await nursery.run_in_actor(
@ -40,7 +40,7 @@ async def spawn(
is_arbiter=False,
name='sub-actor',
data=data,
reg_addr=reg_addr,
arb_addr=arb_addr,
enable_modules=namespaces,
)
@ -54,14 +54,12 @@ async def spawn(
return 10
def test_local_arbiter_subactor_global_state(
reg_addr,
):
def test_local_arbiter_subactor_global_state(arb_addr):
result = trio.run(
spawn,
True,
data_to_pass_down,
reg_addr,
arb_addr,
)
assert result == 10
@ -95,9 +93,7 @@ async def test_movie_theatre_convo(start_method):
await portal.cancel_actor()
async def cellar_door(
return_value: str|None,
):
async def cellar_door(return_value: Optional[str]):
return return_value
@ -107,18 +103,16 @@ async def cellar_door(
)
@tractor_test
async def test_most_beautiful_word(
start_method: str,
return_value: Any,
debug_mode: bool,
start_method,
return_value
):
'''
The main ``tractor`` routine.
'''
with trio.fail_after(1):
async with tractor.open_nursery(
debug_mode=debug_mode,
) as n:
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
cellar_door,
return_value=return_value,
@ -145,9 +139,9 @@ async def check_loglevel(level):
def test_loglevel_propagated_to_subactor(
start_method,
capfd,
reg_addr,
arb_addr,
):
if start_method == 'mp_forkserver':
if start_method == 'forkserver':
pytest.skip(
"a bug with `capfd` seems to make forkserver capture not work?")
@ -156,13 +150,13 @@ def test_loglevel_propagated_to_subactor(
async def main():
async with tractor.open_nursery(
name='arbiter',
loglevel=level,
start_method=start_method,
arbiter_addr=reg_addr,
arbiter_addr=arb_addr,
) as tn:
await tn.run_in_actor(
check_loglevel,
loglevel=level,
level=level,
)

View File

@ -7,10 +7,9 @@ import platform
import trio
import tractor
from tractor.testing import tractor_test
import pytest
from tractor._testing import tractor_test
def test_must_define_ctx():
@ -38,13 +37,10 @@ async def async_gen_stream(sequence):
assert cs.cancelled_caught
# TODO: deprecated either remove entirely
# or re-impl in terms of `MsgStream` one-sides
# wrapper, but at least remove `Portal.open_stream_from()`
@tractor.stream
async def context_stream(
ctx: tractor.Context,
sequence: list[int],
sequence
):
for i in sequence:
await ctx.send_yield(i)
@ -58,7 +54,7 @@ async def context_stream(
async def stream_from_single_subactor(
reg_addr,
arb_addr,
start_method,
stream_func,
):
@ -67,7 +63,7 @@ async def stream_from_single_subactor(
# only one per host address, spawns an actor if None
async with tractor.open_nursery(
registry_addrs=[reg_addr],
arbiter_addr=arb_addr,
start_method=start_method,
) as nursery:
@ -118,13 +114,13 @@ async def stream_from_single_subactor(
@pytest.mark.parametrize(
'stream_func', [async_gen_stream, context_stream]
)
def test_stream_from_single_subactor(reg_addr, start_method, stream_func):
def test_stream_from_single_subactor(arb_addr, start_method, stream_func):
"""Verify streaming from a spawned async generator.
"""
trio.run(
partial(
stream_from_single_subactor,
reg_addr,
arb_addr,
start_method,
stream_func=stream_func,
),
@ -228,14 +224,14 @@ async def a_quadruple_example():
return result_stream
async def cancel_after(wait, reg_addr):
async with tractor.open_root_actor(registry_addrs=[reg_addr]):
async def cancel_after(wait, arb_addr):
async with tractor.open_root_actor(arbiter_addr=arb_addr):
with trio.move_on_after(wait):
return await a_quadruple_example()
@pytest.fixture(scope='module')
def time_quad_ex(reg_addr, ci_env, spawn_backend):
def time_quad_ex(arb_addr, ci_env, spawn_backend):
if spawn_backend == 'mp':
"""no idea but the mp *nix runs are flaking out here often...
"""
@ -243,7 +239,7 @@ def time_quad_ex(reg_addr, ci_env, spawn_backend):
timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4
start = time.time()
results = trio.run(cancel_after, timeout, reg_addr)
results = trio.run(cancel_after, timeout, arb_addr)
diff = time.time() - start
assert results
return results, diff
@ -254,7 +250,7 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend):
results, diff = time_quad_ex
assert results
this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 3
this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 2.666
assert diff < this_fast
@ -263,14 +259,14 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend):
list(map(lambda i: i/10, range(3, 9)))
)
def test_not_fast_enough_quad(
reg_addr, time_quad_ex, cancel_delay, ci_env, spawn_backend
arb_addr, time_quad_ex, cancel_delay, ci_env, spawn_backend
):
"""Verify we can cancel midway through the quad example and all actors
cancel gracefully.
"""
results, diff = time_quad_ex
delay = max(diff - cancel_delay, 0)
results = trio.run(cancel_after, delay, reg_addr)
results = trio.run(cancel_after, delay, arb_addr)
system = platform.system()
if system in ('Windows', 'Darwin') and results is not None:
# In CI envoirments it seems later runs are quicker then the first
@ -283,7 +279,7 @@ def test_not_fast_enough_quad(
@tractor_test
async def test_respawn_consumer_task(
reg_addr,
arb_addr,
spawn_backend,
loglevel,
):

View File

@ -2,23 +2,17 @@
Broadcast channels for fan-out to local tasks.
"""
from contextlib import (
asynccontextmanager as acm,
)
from contextlib import asynccontextmanager
from functools import partial
from itertools import cycle
import time
from typing import Optional
from typing import Optional, List, Tuple
import pytest
import trio
from trio.lowlevel import current_task
import tractor
from tractor.trionics import (
broadcast_receiver,
Lagged,
collapse_eg,
)
from tractor.trionics import broadcast_receiver, Lagged
@tractor.context
@ -43,7 +37,7 @@ async def echo_sequences(
async def ensure_sequence(
stream: tractor.MsgStream,
stream: tractor.ReceiveMsgStream,
sequence: list,
delay: Optional[float] = None,
@ -65,21 +59,21 @@ async def ensure_sequence(
break
@acm
@asynccontextmanager
async def open_sequence_streamer(
sequence: list[int],
reg_addr: tuple[str, int],
sequence: List[int],
arb_addr: Tuple[str, int],
start_method: str,
) -> tractor.MsgStream:
async with tractor.open_nursery(
arbiter_addr=reg_addr,
arbiter_addr=arb_addr,
start_method=start_method,
) as an:
) as tn:
portal = await an.start_actor(
portal = await tn.start_actor(
'sequence_echoer',
enable_modules=[__name__],
)
@ -89,14 +83,14 @@ async def open_sequence_streamer(
) as (ctx, first):
assert first is None
async with ctx.open_stream(allow_overruns=True) as stream:
async with ctx.open_stream(backpressure=True) as stream:
yield stream
await portal.cancel_actor()
def test_stream_fan_out_to_local_subscriptions(
reg_addr,
arb_addr,
start_method,
):
@ -106,7 +100,7 @@ def test_stream_fan_out_to_local_subscriptions(
async with open_sequence_streamer(
sequence,
reg_addr,
arb_addr,
start_method,
) as stream:
@ -141,7 +135,7 @@ def test_stream_fan_out_to_local_subscriptions(
]
)
def test_consumer_and_parent_maybe_lag(
reg_addr,
arb_addr,
start_method,
task_delays,
):
@ -153,17 +147,14 @@ def test_consumer_and_parent_maybe_lag(
async with open_sequence_streamer(
sequence,
reg_addr,
arb_addr,
start_method,
) as stream:
try:
async with (
collapse_eg(),
trio.open_nursery() as tn,
):
async with trio.open_nursery() as n:
tn.start_soon(
n.start_soon(
ensure_sequence,
stream,
sequence.copy(),
@ -217,11 +208,10 @@ def test_consumer_and_parent_maybe_lag(
def test_faster_task_to_recv_is_cancelled_by_slower(
reg_addr,
arb_addr,
start_method,
):
'''
Ensure that if a faster task consuming from a stream is cancelled
'''Ensure that if a faster task consuming from a stream is cancelled
the slower task can continue to receive all expected values.
'''
@ -231,13 +221,13 @@ def test_faster_task_to_recv_is_cancelled_by_slower(
async with open_sequence_streamer(
sequence,
reg_addr,
arb_addr,
start_method,
) as stream:
async with trio.open_nursery() as tn:
tn.start_soon(
async with trio.open_nursery() as n:
n.start_soon(
ensure_sequence,
stream,
sequence.copy(),
@ -259,7 +249,7 @@ def test_faster_task_to_recv_is_cancelled_by_slower(
continue
print('cancelling faster subtask')
tn.cancel_scope.cancel()
n.cancel_scope.cancel()
try:
value = await stream.receive()
@ -277,7 +267,7 @@ def test_faster_task_to_recv_is_cancelled_by_slower(
# the faster subtask was cancelled
break
# await tractor.pause()
# await tractor.breakpoint()
# await stream.receive()
print(f'final value: {value}')
@ -308,7 +298,7 @@ def test_subscribe_errors_after_close():
def test_ensure_slow_consumers_lag_out(
reg_addr,
arb_addr,
start_method,
):
'''This is a pure local task test; no tractor
@ -377,13 +367,13 @@ def test_ensure_slow_consumers_lag_out(
f'on {lags}:{value}')
return
async with trio.open_nursery() as tn:
async with trio.open_nursery() as nursery:
for i in range(1, num_laggers):
task_name = f'sub_{i}'
laggers[task_name] = 0
tn.start_soon(
nursery.start_soon(
partial(
sub_and_print,
delay=i*0.001,
@ -419,8 +409,8 @@ def test_ensure_slow_consumers_lag_out(
seq = brx._state.subs[brx.key]
assert seq == len(brx._state.queue) - 1
# all no_overruns entries in the underlying
# channel should have been copied into the bcaster
# all backpressured entries in the underlying
# channel should have been copied into the caster
# queue trailing-window
async for i in rx:
print(f'bped: {i}')
@ -470,52 +460,3 @@ def test_first_recver_is_cancelled():
assert value == 1
trio.run(main)
def test_no_raise_on_lag():
'''
Run a simple 2-task broadcast where one task is slow but configured
so that it does not raise `Lagged` on overruns using
`raise_on_lasg=False` and verify that the task does not raise.
'''
size = 100
tx, rx = trio.open_memory_channel(size)
brx = broadcast_receiver(rx, size)
async def slow():
async with brx.subscribe(
raise_on_lag=False,
) as br:
async for msg in br:
print(f'slow task got: {msg}')
await trio.sleep(0.1)
async def fast():
async with brx.subscribe() as br:
async for msg in br:
print(f'fast task got: {msg}')
async def main():
async with (
tractor.open_root_actor(
# NOTE: so we see the warning msg emitted by the bcaster
# internals when the no raise flag is set.
loglevel='warning',
),
collapse_eg(),
trio.open_nursery() as n,
):
n.start_soon(slow)
n.start_soon(fast)
for i in range(1000):
await tx.send(i)
# simulate user nailing ctl-c after realizing
# there's a lag in the slow task.
await trio.sleep(1)
raise KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
trio.run(main)

View File

@ -3,13 +3,9 @@ Reminders for oddities in `trio` that we need to stay aware of and/or
want to see changed.
'''
from contextlib import (
asynccontextmanager as acm,
)
import pytest
import trio
from trio import TaskStatus
from trio_typing import TaskStatus
@pytest.mark.parametrize(
@ -64,9 +60,7 @@ def test_stashed_child_nursery(use_start_soon):
async def main():
async with (
trio.open_nursery(
strict_exception_groups=False,
) as pn,
trio.open_nursery() as pn,
):
cn = await pn.start(mk_child_nursery)
assert cn
@ -86,118 +80,3 @@ def test_stashed_child_nursery(use_start_soon):
with pytest.raises(NameError):
trio.run(main)
@pytest.mark.parametrize(
('unmask_from_canc', 'canc_from_finally'),
[
(True, False),
(True, True),
pytest.param(False, True,
marks=pytest.mark.xfail(reason="never raises!")
),
],
# TODO, ask ronny how to impl this .. XD
# ids='unmask_from_canc={0}, canc_from_finally={1}',#.format,
)
def test_acm_embedded_nursery_propagates_enter_err(
canc_from_finally: bool,
unmask_from_canc: bool,
debug_mode: bool,
):
'''
Demo how a masking `trio.Cancelled` could be handled by unmasking from the
`.__context__` field when a user (by accident) re-raises from a `finally:`.
'''
import tractor
@acm
async def maybe_raise_from_masking_exc(
tn: trio.Nursery,
unmask_from: BaseException|None = trio.Cancelled
# TODO, maybe offer a collection?
# unmask_from: set[BaseException] = {
# trio.Cancelled,
# },
):
if not unmask_from:
yield
return
try:
yield
except* unmask_from as be_eg:
# TODO, if we offer `unmask_from: set`
# for masker_exc_type in unmask_from:
matches, rest = be_eg.split(unmask_from)
if not matches:
raise
for exc_match in be_eg.exceptions:
if (
(exc_ctx := exc_match.__context__)
and
type(exc_ctx) not in {
# trio.Cancelled, # always by default?
unmask_from,
}
):
exc_ctx.add_note(
f'\n'
f'WARNING: the above error was masked by a {unmask_from!r} !?!\n'
f'Are you always cancelling? Say from a `finally:` ?\n\n'
f'{tn!r}'
)
raise exc_ctx from exc_match
@acm
async def wraps_tn_that_always_cancels():
async with (
trio.open_nursery() as tn,
maybe_raise_from_masking_exc(
tn=tn,
unmask_from=(
trio.Cancelled
if unmask_from_canc
else None
),
)
):
try:
yield tn
finally:
if canc_from_finally:
tn.cancel_scope.cancel()
await trio.lowlevel.checkpoint()
async def _main():
with tractor.devx.maybe_open_crash_handler(
pdb=debug_mode,
) as bxerr:
assert not bxerr.value
async with (
wraps_tn_that_always_cancels() as tn,
):
assert not tn.cancel_scope.cancel_called
assert 0
assert (
(err := bxerr.value)
and
type(err) is AssertionError
)
with pytest.raises(ExceptionGroup) as excinfo:
trio.run(_main)
eg: ExceptionGroup = excinfo.value
assert_eg, rest_eg = eg.split(AssertionError)
assert len(assert_eg.exceptions) == 1

7
towncrier.toml 100644
View File

@ -0,0 +1,7 @@
[tool.towncrier]
package = "tractor"
filename = "NEWS.rst"
directory = "nooz/"
title_format = "tractor {version} ({project_date})"
version = "0.1.0a4"
template = "nooz/_template.rst"

View File

@ -15,56 +15,64 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
tractor: structured concurrent ``trio``-"actors".
tractor: structured concurrent "actors".
"""
from trio import MultiError
from ._clustering import (
open_actor_cluster as open_actor_cluster,
)
from ._context import (
Context as Context, # the type
context as context, # a func-decorator
)
from ._clustering import open_actor_cluster
from ._ipc import Channel
from ._streaming import (
MsgStream as MsgStream,
stream as stream,
Context,
ReceiveMsgStream,
MsgStream,
stream,
context,
)
from ._discovery import (
get_registry as get_registry,
find_actor as find_actor,
wait_for_actor as wait_for_actor,
query_actor as query_actor,
)
from ._supervise import (
open_nursery as open_nursery,
ActorNursery as ActorNursery,
)
from ._state import (
current_actor as current_actor,
is_root_process as is_root_process,
current_ipc_ctx as current_ipc_ctx,
debug_mode as debug_mode
get_arbiter,
find_actor,
wait_for_actor,
query_actor,
)
from ._supervise import open_nursery
from ._state import current_actor, is_root_process
from ._exceptions import (
ContextCancelled as ContextCancelled,
ModuleNotExposed as ModuleNotExposed,
MsgTypeError as MsgTypeError,
RemoteActorError as RemoteActorError,
TransportClosed as TransportClosed,
RemoteActorError,
ModuleNotExposed,
ContextCancelled,
)
from .devx import (
breakpoint as breakpoint,
pause as pause,
pause_from_sync as pause_from_sync,
post_mortem as post_mortem,
)
from . import msg as msg
from ._root import (
run_daemon as run_daemon,
open_root_actor as open_root_actor,
)
from ._ipc import Channel as Channel
from ._portal import Portal as Portal
from ._runtime import Actor as Actor
# from . import hilevel as hilevel
from ._debug import breakpoint, post_mortem
from . import msg
from ._root import run, run_daemon, open_root_actor
from ._portal import Portal
__all__ = [
'Channel',
'Context',
'ContextCancelled',
'ModuleNotExposed',
'MsgStream',
'MultiError',
'Portal',
'ReceiveMsgStream',
'RemoteActorError',
'breakpoint',
'context',
'current_actor',
'find_actor',
'get_arbiter',
'is_root_process',
'msg',
'open_actor_cluster',
'open_nursery',
'open_root_actor',
'post_mortem',
'query_actor',
'run',
'run_daemon',
'stream',
'to_asyncio',
'wait_for_actor',
]

1567
tractor/_actor.py 100644

File diff suppressed because it is too large Load Diff

View File

@ -18,11 +18,13 @@
This is the "bootloader" for actors started using the native trio backend.
"""
import sys
import trio
import argparse
from ast import literal_eval
from ._runtime import Actor
from ._actor import Actor
from ._entry import _trio_main
@ -35,8 +37,9 @@ def parse_ipaddr(arg):
return (str(host), int(port))
from ._entry import _trio_main
if __name__ == "__main__":
__tracebackhide__: bool = True
parser = argparse.ArgumentParser()
parser.add_argument("--uid", type=parse_uid)

View File

@ -19,13 +19,10 @@ Actor cluster helpers.
'''
from __future__ import annotations
from contextlib import (
asynccontextmanager as acm,
)
from contextlib import asynccontextmanager as acm
from multiprocessing import cpu_count
from typing import (
AsyncGenerator,
)
from typing import AsyncGenerator, Optional
import trio
import tractor
@ -35,12 +32,9 @@ import tractor
async def open_actor_cluster(
modules: list[str],
count: int = cpu_count(),
names: list[str] | None = None,
names: Optional[list[str]] = None,
start_method: Optional[str] = None,
hard_kill: bool = False,
# passed through verbatim to ``open_root_actor()``
**runtime_kwargs,
) -> AsyncGenerator[
dict[str, tractor.Portal],
None,
@ -55,9 +49,7 @@ async def open_actor_cluster(
raise ValueError(
'Number of names is {len(names)} but count it {count}')
async with tractor.open_nursery(
**runtime_kwargs,
) as an:
async with tractor.open_nursery(start_method=start_method) as an:
async with trio.open_nursery() as n:
uid = tractor.current_actor().uid

File diff suppressed because it is too large Load Diff

652
tractor/_debug.py 100644
View File

@ -0,0 +1,652 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Multi-core debugging for da peeps!
"""
import bdb
import sys
from functools import partial
from contextlib import asynccontextmanager as acm
from typing import (
Tuple,
Optional,
Callable,
AsyncIterator,
AsyncGenerator,
)
import tractor
import trio
from trio_typing import TaskStatus
from .log import get_logger
from . import _state
from ._discovery import get_root
from ._state import is_root_process, debug_mode
from ._exceptions import is_multi_cancelled
try:
# wtf: only exported when installed in dev mode?
import pdbpp
except ImportError:
# pdbpp is installed in regular mode...it monkey patches stuff
import pdb
assert pdb.xpm, "pdbpp is not installed?" # type: ignore
pdbpp = pdb
log = get_logger(__name__)
__all__ = ['breakpoint', 'post_mortem']
# TODO: wrap all these in a static global class: ``DebugLock`` maybe?
# placeholder for function to set a ``trio.Event`` on debugger exit
_pdb_release_hook: Optional[Callable] = None
# actor-wide variable pointing to current task name using debugger
_local_task_in_debug: Optional[str] = None
# actor tree-wide actor uid that supposedly has the tty lock
_global_actor_in_debug: Optional[Tuple[str, str]] = None
# lock in root actor preventing multi-access to local tty
_debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock()
_local_pdb_complete: Optional[trio.Event] = None
_no_remote_has_tty: Optional[trio.Event] = None
# XXX: set by the current task waiting on the root tty lock
# and must be cancelled if this actor is cancelled via message
# otherwise deadlocks with the parent actor may ensure
_debugger_request_cs: Optional[trio.CancelScope] = None
class TractorConfig(pdbpp.DefaultConfig):
"""Custom ``pdbpp`` goodness.
"""
# sticky_by_default = True
class PdbwTeardown(pdbpp.Pdb):
"""Add teardown hooks to the regular ``pdbpp.Pdb``.
"""
# override the pdbpp config with our coolio one
DefaultConfig = TractorConfig
# TODO: figure out how to disallow recursive .set_trace() entry
# since that'll cause deadlock for us.
def set_continue(self):
try:
super().set_continue()
finally:
global _local_task_in_debug
_local_task_in_debug = None
_pdb_release_hook()
def set_quit(self):
try:
super().set_quit()
finally:
global _local_task_in_debug
_local_task_in_debug = None
_pdb_release_hook()
# TODO: will be needed whenever we get to true remote debugging.
# XXX see https://github.com/goodboy/tractor/issues/130
# # TODO: is there some way to determine this programatically?
# _pdb_exit_patterns = tuple(
# str.encode(patt + "\n") for patt in (
# 'c', 'cont', 'continue', 'q', 'quit')
# )
# def subactoruid2proc(
# actor: 'Actor', # noqa
# uid: Tuple[str, str]
# ) -> trio.Process:
# n = actor._actoruid2nursery[uid]
# _, proc, _ = n._children[uid]
# return proc
# async def hijack_stdin():
# log.info(f"Hijacking stdin from {actor.uid}")
# trap std in and relay to subproc
# async_stdin = trio.wrap_file(sys.stdin)
# async with aclosing(async_stdin):
# async for msg in async_stdin:
# log.runtime(f"Stdin input:\n{msg}")
# # encode to bytes
# bmsg = str.encode(msg)
# # relay bytes to subproc over pipe
# # await proc.stdin.send_all(bmsg)
# if bmsg in _pdb_exit_patterns:
# log.info("Closing stdin hijack")
# break
@acm
async def _acquire_debug_lock(
uid: Tuple[str, str]
) -> AsyncIterator[trio.StrictFIFOLock]:
'''Acquire a root-actor local FIFO lock which tracks mutex access of
the process tree's global debugger breakpoint.
This lock avoids tty clobbering (by preventing multiple processes
reading from stdstreams) and ensures multi-actor, sequential access
to the ``pdb`` repl.
'''
global _debug_lock, _global_actor_in_debug, _no_remote_has_tty
task_name = trio.lowlevel.current_task().name
log.debug(
f"Attempting to acquire TTY lock, remote task: {task_name}:{uid}"
)
we_acquired = False
if _no_remote_has_tty is None:
# mark the tty lock as being in use so that the runtime
# can try to avoid clobbering any connection from a child
# that's currently relying on it.
_no_remote_has_tty = trio.Event()
try:
log.debug(
f"entering lock checkpoint, remote task: {task_name}:{uid}"
)
we_acquired = True
await _debug_lock.acquire()
_global_actor_in_debug = uid
log.debug(f"TTY lock acquired, remote task: {task_name}:{uid}")
# NOTE: critical section: this yield is unshielded!
# IF we received a cancel during the shielded lock entry of some
# next-in-queue requesting task, then the resumption here will
# result in that ``trio.Cancelled`` being raised to our caller
# (likely from ``_hijack_stdin_for_child()`` below)! In
# this case the ``finally:`` below should trigger and the
# surrounding caller side context should cancel normally
# relaying back to the caller.
yield _debug_lock
finally:
# if _global_actor_in_debug == uid:
if we_acquired and _debug_lock.locked():
_debug_lock.release()
# IFF there are no more requesting tasks queued up fire, the
# "tty-unlocked" event thereby alerting any monitors of the lock that
# we are now back in the "tty unlocked" state. This is basically
# and edge triggered signal around an empty queue of sub-actor
# tasks that may have tried to acquire the lock.
stats = _debug_lock.statistics()
if (
not stats.owner
):
log.debug(f"No more tasks waiting on tty lock! says {uid}")
_no_remote_has_tty.set()
_no_remote_has_tty = None
_global_actor_in_debug = None
log.debug(f"TTY lock released, remote task: {task_name}:{uid}")
def handler(signum, frame, *args):
"""Specialized debugger compatible SIGINT handler.
In childred we always ignore to avoid deadlocks since cancellation
should always be managed by the parent supervising actor. The root
is always cancelled on ctrl-c.
"""
if is_root_process():
tractor.current_actor().cancel_soon()
else:
print(
"tractor ignores SIGINT while in debug mode\n"
"If you have a special need for it please open an issue.\n"
)
@tractor.context
async def _hijack_stdin_for_child(
ctx: tractor.Context,
subactor_uid: Tuple[str, str]
) -> str:
'''
Hijack the tty in the root process of an actor tree such that
the pdbpp debugger console can be allocated to a sub-actor for repl
bossing.
'''
task_name = trio.lowlevel.current_task().name
# TODO: when we get to true remote debugging
# this will deliver stdin data?
log.debug(
"Attempting to acquire TTY lock\n"
f"remote task: {task_name}:{subactor_uid}"
)
log.debug(f"Actor {subactor_uid} is WAITING on stdin hijack lock")
with trio.CancelScope(shield=True):
try:
lock = None
async with _acquire_debug_lock(subactor_uid) as lock:
# indicate to child that we've locked stdio
await ctx.started('Locked')
log.debug(f"Actor {subactor_uid} acquired stdin hijack lock")
# wait for unlock pdb by child
async with ctx.open_stream() as stream:
assert await stream.receive() == 'pdb_unlock'
# try:
# assert await stream.receive() == 'pdb_unlock'
except (
# BaseException,
trio.MultiError,
trio.BrokenResourceError,
trio.Cancelled, # by local cancellation
trio.ClosedResourceError, # by self._rx_chan
) as err:
# XXX: there may be a race with the portal teardown
# with the calling actor which we can safely ignore.
# The alternative would be sending an ack message
# and allowing the client to wait for us to teardown
# first?
if lock and lock.locked():
lock.release()
if isinstance(err, trio.Cancelled):
raise
finally:
log.debug(
"TTY lock released, remote task:"
f"{task_name}:{subactor_uid}")
return "pdb_unlock_complete"
async def wait_for_parent_stdin_hijack(
actor_uid: Tuple[str, str],
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED
):
'''
Connect to the root actor via a ctx and invoke a task which locks
a root-local TTY lock.
This function is used by any sub-actor to acquire mutex access to
pdb and the root's TTY for interactive debugging (see below inside
``_breakpoint()``). It can be used to ensure that an intermediate
nursery-owning actor does not clobber its children if they are in
debug (see below inside ``maybe_wait_for_debugger()``).
'''
global _debugger_request_cs
with trio.CancelScope(shield=True) as cs:
_debugger_request_cs = cs
try:
async with get_root() as portal:
# this syncs to child's ``Context.started()`` call.
async with portal.open_context(
tractor._debug._hijack_stdin_for_child,
subactor_uid=actor_uid,
) as (ctx, val):
log.pdb('locked context')
assert val == 'Locked'
async with ctx.open_stream() as stream:
# unblock local caller
task_status.started(cs)
try:
assert _local_pdb_complete
await _local_pdb_complete.wait()
finally:
# TODO: shielding currently can cause hangs...
with trio.CancelScope(shield=True):
await stream.send('pdb_unlock')
# sync with callee termination
assert await ctx.result() == "pdb_unlock_complete"
except tractor.ContextCancelled:
log.warning('Root actor cancelled debug lock')
finally:
log.debug(f"Exiting debugger for actor {actor_uid}")
global _local_task_in_debug
_local_task_in_debug = None
log.debug(f"Child {actor_uid} released parent stdio lock")
async def _breakpoint(
debug_func,
# TODO:
# shield: bool = False
) -> None:
'''``tractor`` breakpoint entry for engaging pdb machinery
in the root or a subactor.
'''
# TODO: is it possible to debug a trio.Cancelled except block?
# right now it seems like we can kinda do with by shielding
# around ``tractor.breakpoint()`` but not if we move the shielded
# scope here???
# with trio.CancelScope(shield=shield):
actor = tractor.current_actor()
task_name = trio.lowlevel.current_task().name
global _local_pdb_complete, _pdb_release_hook
global _local_task_in_debug, _global_actor_in_debug
await trio.lowlevel.checkpoint()
if not _local_pdb_complete or _local_pdb_complete.is_set():
_local_pdb_complete = trio.Event()
# TODO: need a more robust check for the "root" actor
if actor._parent_chan and not is_root_process():
if _local_task_in_debug:
if _local_task_in_debug == task_name:
# this task already has the lock and is
# likely recurrently entering a breakpoint
return
# if **this** actor is already in debug mode block here
# waiting for the control to be released - this allows
# support for recursive entries to `tractor.breakpoint()`
log.warning(f"{actor.uid} already has a debug lock, waiting...")
await _local_pdb_complete.wait()
await trio.sleep(0.1)
# mark local actor as "in debug mode" to avoid recurrent
# entries/requests to the root process
_local_task_in_debug = task_name
# assign unlock callback for debugger teardown hooks
_pdb_release_hook = _local_pdb_complete.set
# this **must** be awaited by the caller and is done using the
# root nursery so that the debugger can continue to run without
# being restricted by the scope of a new task nursery.
# NOTE: if we want to debug a trio.Cancelled triggered exception
# we have to figure out how to avoid having the service nursery
# cancel on this task start? I *think* this works below?
# actor._service_n.cancel_scope.shield = shield
with trio.CancelScope(shield=True):
await actor._service_n.start(
wait_for_parent_stdin_hijack,
actor.uid,
)
elif is_root_process():
# we also wait in the root-parent for any child that
# may have the tty locked prior
global _debug_lock
# TODO: wait, what about multiple root tasks acquiring it though?
# root process (us) already has it; ignore
if _global_actor_in_debug == actor.uid:
return
# XXX: since we need to enter pdb synchronously below,
# we have to release the lock manually from pdb completion
# callbacks. Can't think of a nicer way then this atm.
if _debug_lock.locked():
log.warning(
'Root actor attempting to shield-acquire active tty lock'
f' owned by {_global_actor_in_debug}')
# must shield here to avoid hitting a ``Cancelled`` and
# a child getting stuck bc we clobbered the tty
with trio.CancelScope(shield=True):
await _debug_lock.acquire()
else:
# may be cancelled
await _debug_lock.acquire()
_global_actor_in_debug = actor.uid
_local_task_in_debug = task_name
# the lock must be released on pdb completion
def teardown():
global _local_pdb_complete, _debug_lock
global _global_actor_in_debug, _local_task_in_debug
_debug_lock.release()
_global_actor_in_debug = None
_local_task_in_debug = None
_local_pdb_complete.set()
_pdb_release_hook = teardown
# block here one (at the appropriate frame *up*) where
# ``breakpoint()`` was awaited and begin handling stdio.
log.debug("Entering the synchronous world of pdb")
debug_func(actor)
def _mk_pdb() -> PdbwTeardown:
# XXX: setting these flags on the pdb instance are absolutely
# critical to having ctrl-c work in the ``trio`` standard way! The
# stdlib's pdb supports entering the current sync frame on a SIGINT,
# with ``trio`` we pretty much never want this and if we did we can
# handle it in the ``tractor`` task runtime.
pdb = PdbwTeardown()
pdb.allow_kbdint = True
pdb.nosigint = True
return pdb
def _set_trace(actor=None):
pdb = _mk_pdb()
if actor is not None:
log.pdb(f"\nAttaching pdb to actor: {actor.uid}\n")
pdb.set_trace(
# start 2 levels up in user code
frame=sys._getframe().f_back.f_back,
)
else:
# we entered the global ``breakpoint()`` built-in from sync code
global _local_task_in_debug, _pdb_release_hook
_local_task_in_debug = 'sync'
def nuttin():
pass
_pdb_release_hook = nuttin
pdb.set_trace(
# start 2 levels up in user code
frame=sys._getframe().f_back,
)
breakpoint = partial(
_breakpoint,
_set_trace,
)
def _post_mortem(actor):
log.pdb(f"\nAttaching to pdb in crashed actor: {actor.uid}\n")
pdb = _mk_pdb()
# custom Pdb post-mortem entry
pdbpp.xpm(Pdb=lambda: pdb)
post_mortem = partial(
_breakpoint,
_post_mortem,
)
async def _maybe_enter_pm(err):
if (
debug_mode()
# NOTE: don't enter debug mode recursively after quitting pdb
# Iow, don't re-enter the repl if the `quit` command was issued
# by the user.
and not isinstance(err, bdb.BdbQuit)
# XXX: if the error is the likely result of runtime-wide
# cancellation, we don't want to enter the debugger since
# there's races between when the parent actor has killed all
# comms and when the child tries to contact said parent to
# acquire the tty lock.
# Really we just want to mostly avoid catching KBIs here so there
# might be a simpler check we can do?
and not is_multi_cancelled(err)
):
log.debug("Actor crashed, entering debug mode")
await post_mortem()
return True
else:
return False
@acm
async def acquire_debug_lock(
subactor_uid: Tuple[str, str],
) -> AsyncGenerator[None, tuple]:
'''
Grab root's debug lock on entry, release on exit.
This helper is for actor's who don't actually need
to acquired the debugger but want to wait until the
lock is free in the tree root.
'''
if not debug_mode():
yield None
return
async with trio.open_nursery() as n:
cs = await n.start(
wait_for_parent_stdin_hijack,
subactor_uid,
)
yield None
cs.cancel()
async def maybe_wait_for_debugger(
poll_steps: int = 2,
poll_delay: float = 0.1,
child_in_debug: bool = False,
) -> None:
if not debug_mode() and not child_in_debug:
return
if (
is_root_process()
):
global _no_remote_has_tty, _global_actor_in_debug, _wait_all_tasks_lock
# If we error in the root but the debugger is
# engaged we don't want to prematurely kill (and
# thus clobber access to) the local tty since it
# will make the pdb repl unusable.
# Instead try to wait for pdb to be released before
# tearing down.
sub_in_debug = None
for _ in range(poll_steps):
if _global_actor_in_debug:
sub_in_debug = tuple(_global_actor_in_debug)
log.debug(
'Root polling for debug')
with trio.CancelScope(shield=True):
await trio.sleep(poll_delay)
# TODO: could this make things more deterministic? wait
# to see if a sub-actor task will be scheduled and grab
# the tty lock on the next tick?
# XXX: doesn't seem to work
# await trio.testing.wait_all_tasks_blocked(cushion=0)
debug_complete = _no_remote_has_tty
if (
(debug_complete and
not debug_complete.is_set())
):
log.debug(
'Root has errored but pdb is in use by '
f'child {sub_in_debug}\n'
'Waiting on tty lock to release..')
await debug_complete.wait()
await trio.sleep(poll_delay)
continue
else:
log.debug(
'Root acquired TTY LOCK'
)

View File

@ -15,71 +15,46 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Discovery (protocols) API for automatic addressing and location
management of (service) actors.
Actor discovery API.
"""
from __future__ import annotations
from typing import (
AsyncGenerator,
AsyncContextManager,
TYPE_CHECKING,
)
from typing import Tuple, Optional, Union, AsyncGenerator
from contextlib import asynccontextmanager as acm
from tractor.log import get_logger
from .trionics import gather_contexts
from ._ipc import _connect_chan, Channel
from ._portal import (
Portal,
open_portal,
LocalPortal,
)
from ._state import (
current_actor,
_runtime_vars,
)
if TYPE_CHECKING:
from ._runtime import Actor
log = get_logger(__name__)
from ._state import current_actor, _runtime_vars
@acm
async def get_registry(
async def get_arbiter(
host: str,
port: int,
) -> AsyncGenerator[
Portal | LocalPortal | None,
None,
]:
) -> AsyncGenerator[Union[Portal, LocalPortal], None]:
'''Return a portal instance connected to a local or remote
arbiter.
'''
Return a portal instance connected to a local or remote
registry-service actor; if a connection already exists re-use it
(presumably to call a `.register_actor()` registry runtime RPC
ep).
actor = current_actor()
'''
actor: Actor = current_actor()
if actor.is_registrar:
if not actor:
raise RuntimeError("No actor instance has been defined yet?")
if actor.is_arbiter:
# we're already the arbiter
# (likely a re-entrant call from the arbiter actor)
yield LocalPortal(
actor,
Channel((host, port))
)
yield LocalPortal(actor, Channel((host, port)))
else:
# TODO: try to look pre-existing connection from
# `Actor._peers` and use it instead?
async with (
_connect_chan(host, port) as chan,
open_portal(chan) as regstr_ptl,
):
yield regstr_ptl
async with _connect_chan(host, port) as chan:
async with open_portal(chan) as arb_portal:
yield arb_portal
@acm
@ -87,125 +62,51 @@ async def get_root(
**kwargs,
) -> AsyncGenerator[Portal, None]:
# TODO: rename mailbox to `_root_maddr` when we finally
# add and impl libp2p multi-addrs?
host, port = _runtime_vars['_root_mailbox']
assert host is not None
async with (
_connect_chan(host, port) as chan,
open_portal(chan, **kwargs) as portal,
):
yield portal
def get_peer_by_name(
name: str,
# uuid: str|None = None,
) -> list[Channel]|None: # at least 1
'''
Scan for an existing connection (set) to a named actor
and return any channels from `Actor._peers`.
This is an optimization method over querying the registrar for
the same info.
'''
actor: Actor = current_actor()
to_scan: dict[tuple, list[Channel]] = actor._peers.copy()
pchan: Channel|None = actor._parent_chan
if pchan:
to_scan[pchan.uid].append(pchan)
for aid, chans in to_scan.items():
_, peer_name = aid
if name == peer_name:
if not chans:
log.warning(
'No IPC chans for matching peer {peer_name}\n'
)
continue
return chans
return None
async with _connect_chan(host, port) as chan:
async with open_portal(chan, **kwargs) as portal:
yield portal
@acm
async def query_actor(
name: str,
regaddr: tuple[str, int]|None = None,
arbiter_sockaddr: Optional[tuple[str, int]] = None,
) -> AsyncGenerator[
tuple[str, int]|None,
None,
]:
) -> AsyncGenerator[tuple[str, int], None]:
'''
Lookup a transport address (by actor name) via querying a registrar
listening @ `regaddr`.
Simple address lookup for a given actor name.
Returns the transport protocol (socket) address or `None` if no
entry under that name exists.
Returns the (socket) address or ``None``.
'''
actor: Actor = current_actor()
if (
name == 'registrar'
and actor.is_registrar
):
raise RuntimeError(
'The current actor IS the registry!?'
)
actor = current_actor()
async with get_arbiter(
*arbiter_sockaddr or actor._arb_addr
) as arb_portal:
maybe_peers: list[Channel]|None = get_peer_by_name(name)
if maybe_peers:
yield maybe_peers[0].raddr
return
reg_portal: Portal
regaddr: tuple[str, int] = regaddr or actor.reg_addrs[0]
async with get_registry(*regaddr) as reg_portal:
# TODO: return portals to all available actors - for now
# just the last one that registered
sockaddr: tuple[str, int] = await reg_portal.run_from_ns(
sockaddr = await arb_portal.run_from_ns(
'self',
'find_actor',
name=name,
)
yield sockaddr
# TODO: return portals to all available actors - for now just
# the last one that registered
if name == 'arbiter' and actor.is_arbiter:
raise RuntimeError("The current actor is the arbiter")
@acm
async def maybe_open_portal(
addr: tuple[str, int],
name: str,
):
async with query_actor(
name=name,
regaddr=addr,
) as sockaddr:
pass
if sockaddr:
async with _connect_chan(*sockaddr) as chan:
async with open_portal(chan) as portal:
yield portal
else:
yield None
yield sockaddr if sockaddr else None
@acm
async def find_actor(
name: str,
registry_addrs: list[tuple[str, int]]|None = None,
arbiter_sockaddr: Tuple[str, int] = None
only_first: bool = True,
raise_on_none: bool = False,
) -> AsyncGenerator[
Portal | list[Portal] | None,
None,
]:
) -> AsyncGenerator[Optional[Portal], None]:
'''
Ask the arbiter to find actor(s) by name.
@ -213,102 +114,39 @@ async def find_actor(
known to the arbiter.
'''
# optimization path, use any pre-existing peer channel
maybe_peers: list[Channel]|None = get_peer_by_name(name)
if maybe_peers and only_first:
async with open_portal(maybe_peers[0]) as peer_portal:
yield peer_portal
return
if not registry_addrs:
# XXX NOTE: make sure to dynamically read the value on
# every call since something may change it globally (eg.
# like in our discovery test suite)!
from . import _root
registry_addrs = (
_runtime_vars['_registry_addrs']
or
_root._default_lo_addrs
)
maybe_portals: list[
AsyncContextManager[tuple[str, int]]
] = list(
maybe_open_portal(
addr=addr,
name=name,
)
for addr in registry_addrs
)
portals: list[Portal]
async with gather_contexts(
mngrs=maybe_portals,
) as portals:
# log.runtime(
# 'Gathered portals:\n'
# f'{portals}'
# )
# NOTE: `gather_contexts()` will return a
# `tuple[None, None, ..., None]` if no contact
# can be made with any regstrar at any of the
# N provided addrs!
if not any(portals):
if raise_on_none:
raise RuntimeError(
f'No actor "{name}" found registered @ {registry_addrs}'
)
yield None
return
portals: list[Portal] = list(portals)
if only_first:
yield portals[0]
async with query_actor(
name=name,
arbiter_sockaddr=arbiter_sockaddr,
) as sockaddr:
if sockaddr:
async with _connect_chan(*sockaddr) as chan:
async with open_portal(chan) as portal:
yield portal
else:
# TODO: currently this may return multiple portals
# given there are multi-homed or multiple registrars..
# SO, we probably need de-duplication logic?
yield portals
yield None
@acm
async def wait_for_actor(
name: str,
registry_addr: tuple[str, int] | None = None,
arbiter_sockaddr: Tuple[str, int] = None
) -> AsyncGenerator[Portal, None]:
'''
Wait on at least one peer actor to register `name` with the
registrar, yield a `Portal to the first registree.
"""Wait on an actor to register with the arbiter.
'''
actor: Actor = current_actor()
A portal to the first registered actor is returned.
"""
actor = current_actor()
# optimization path, use any pre-existing peer channel
maybe_peers: list[Channel]|None = get_peer_by_name(name)
if maybe_peers:
async with open_portal(maybe_peers[0]) as peer_portal:
yield peer_portal
return
regaddr: tuple[str, int] = (
registry_addr
or
actor.reg_addrs[0]
)
# TODO: use `.trionics.gather_contexts()` like
# above in `find_actor()` as well?
reg_portal: Portal
async with get_registry(*regaddr) as reg_portal:
sockaddrs = await reg_portal.run_from_ns(
async with get_arbiter(
*arbiter_sockaddr or actor._arb_addr,
) as arb_portal:
sockaddrs = await arb_portal.run_from_ns(
'self',
'wait_for_actor',
name=name,
)
# get latest registered addr by default?
# TODO: offer multi-portal yields in multi-homed case?
sockaddr: tuple[str, int] = sockaddrs[-1]
sockaddr = sockaddrs[-1]
async with _connect_chan(*sockaddr) as chan:
async with open_portal(chan) as portal:

View File

@ -18,32 +18,15 @@
Sub-process entry points.
"""
from __future__ import annotations
from functools import partial
import multiprocessing as mp
import os
import textwrap
from typing import (
Any,
TYPE_CHECKING,
)
from typing import Tuple, Any
import signal
import trio # type: ignore
from .log import (
get_console_log,
get_logger,
)
from .log import get_console_log, get_logger
from . import _state
from .devx import _debug
from .to_asyncio import run_as_asyncio_guest
from ._runtime import (
async_main,
Actor,
)
if TYPE_CHECKING:
from ._spawn import SpawnMethodKey
log = get_logger(__name__)
@ -51,40 +34,37 @@ log = get_logger(__name__)
def _mp_main(
actor: Actor,
accept_addrs: list[tuple[str, int]],
forkserver_info: tuple[Any, Any, Any, Any, Any],
start_method: SpawnMethodKey,
parent_addr: tuple[str, int] | None = None,
actor: 'Actor', # type: ignore
accept_addr: Tuple[str, int],
forkserver_info: Tuple[Any, Any, Any, Any, Any],
start_method: str,
parent_addr: Tuple[str, int] = None,
infect_asyncio: bool = False,
) -> None:
'''
The routine called *after fork* which invokes a fresh `trio.run()`
The routine called *after fork* which invokes a fresh ``trio.run``
'''
actor._forkserver_info = forkserver_info
from ._spawn import try_set_start_method
spawn_ctx: mp.context.BaseContext = try_set_start_method(start_method)
assert spawn_ctx
spawn_ctx = try_set_start_method(start_method)
if actor.loglevel is not None:
log.info(
f'Setting loglevel for {actor.uid} to {actor.loglevel}'
)
f"Setting loglevel for {actor.uid} to {actor.loglevel}")
get_console_log(actor.loglevel)
# TODO: use scops headers like for `trio` below!
# (well after we libify it maybe..)
assert spawn_ctx
log.info(
f'Started new {spawn_ctx.current_process()} for {actor.uid}'
# f"parent_addr is {parent_addr}"
)
_state._current_actor: Actor = actor
f"Started new {spawn_ctx.current_process()} for {actor.uid}")
_state._current_actor = actor
log.debug(f"parent_addr is {parent_addr}")
trio_main = partial(
async_main,
actor=actor,
accept_addrs=accept_addrs,
actor._async_main,
accept_addr,
parent_addr=parent_addr
)
try:
@ -97,116 +77,14 @@ def _mp_main(
pass # handle it the same way trio does?
finally:
log.info(
f'`mp`-subactor {actor.uid} exited'
)
# TODO: move this func to some kinda `.devx._conc_lang.py` eventually
# as we work out our multi-domain state-flow-syntax!
def nest_from_op(
input_op: str,
#
# ?TODO? an idea for a syntax to the state of concurrent systems
# as a "3-domain" (execution, scope, storage) model and using
# a minimal ascii/utf-8 operator-set.
#
# try not to take any of this seriously yet XD
#
# > is a "play operator" indicating (CPU bound)
# exec/work/ops required at the "lowest level computing"
#
# execution primititves (tasks, threads, actors..) denote their
# lifetime with '(' and ')' since parentheses normally are used
# in many langs to denote function calls.
#
# starting = (
# >( opening/starting; beginning of the thread-of-exec (toe?)
# (> opened/started, (finished spawning toe)
# |_<Task: blah blah..> repr of toe, in py these look like <objs>
#
# >) closing/exiting/stopping,
# )> closed/exited/stopped,
# |_<Task: blah blah..>
# [OR <), )< ?? ]
#
# ending = )
# >c) cancelling to close/exit
# c)> cancelled (caused close), OR?
# |_<Actor: ..>
# OR maybe "<c)" which better indicates the cancel being
# "delivered/returned" / returned" to LHS?
#
# >x) erroring to eventuall exit
# x)> errored and terminated
# |_<Actor: ...>
#
# scopes: supers/nurseries, IPC-ctxs, sessions, perms, etc.
# >{ opening
# {> opened
# }> closed
# >} closing
#
# storage: like queues, shm-buffers, files, etc..
# >[ opening
# [> opened
# |_<FileObj: ..>
#
# >] closing
# ]> closed
# IPC ops: channels, transports, msging
# => req msg
# <= resp msg
# <=> 2-way streaming (of msgs)
# <- recv 1 msg
# -> send 1 msg
#
# TODO: still not sure on R/L-HS approach..?
# =>( send-req to exec start (task, actor, thread..)
# (<= recv-req to ^
#
# (<= recv-req ^
# <=( recv-resp opened remote exec primitive
# <=) recv-resp closed
#
# )<=c req to stop due to cancel
# c=>) req to stop due to cancel
#
# =>{ recv-req to open
# <={ send-status that it closed
tree_str: str,
# NOTE: so move back-from-the-left of the `input_op` by
# this amount.
back_from_op: int = 0,
) -> str:
'''
Depth-increment the input (presumably hierarchy/supervision)
input "tree string" below the provided `input_op` execution
operator, so injecting a `"\n|_{input_op}\n"`and indenting the
`tree_str` to nest content aligned with the ops last char.
'''
return (
f'{input_op}\n'
+
textwrap.indent(
tree_str,
prefix=(
len(input_op)
-
(back_from_op + 1)
) * ' ',
)
)
log.info(f"Actor {actor.uid} terminated")
def _trio_main(
actor: Actor,
actor: 'Actor', # type: ignore
*,
parent_addr: tuple[str, int] | None = None,
parent_addr: Tuple[str, int] = None,
infect_asyncio: bool = False,
) -> None:
@ -214,73 +92,32 @@ def _trio_main(
Entry point for a `trio_run_in_process` subactor.
'''
_debug.hide_runtime_frames()
log.info(f"Started new trio process for {actor.uid}")
if actor.loglevel is not None:
log.info(
f"Setting loglevel for {actor.uid} to {actor.loglevel}")
get_console_log(actor.loglevel)
log.info(
f"Started {actor.uid}")
_state._current_actor = actor
log.debug(f"parent_addr is {parent_addr}")
trio_main = partial(
async_main,
actor,
actor._async_main,
parent_addr=parent_addr
)
if actor.loglevel is not None:
get_console_log(actor.loglevel)
actor_info: str = (
f'|_{actor}\n'
f' uid: {actor.uid}\n'
f' pid: {os.getpid()}\n'
f' parent_addr: {parent_addr}\n'
f' loglevel: {actor.loglevel}\n'
)
log.info(
'Starting new `trio` subactor:\n'
+
nest_from_op(
input_op='>(', # see syntax ideas above
tree_str=actor_info,
back_from_op=2, # since "complete"
)
)
logmeth = log.info
exit_status: str = (
'Subactor exited\n'
+
nest_from_op(
input_op=')>', # like a "closed-to-play"-icon from super perspective
tree_str=actor_info,
back_from_op=1,
)
)
try:
if infect_asyncio:
actor._infected_aio = True
run_as_asyncio_guest(trio_main)
else:
trio.run(trio_main)
except KeyboardInterrupt:
logmeth = log.cancel
exit_status: str = (
'Actor received KBI (aka an OS-cancel)\n'
+
nest_from_op(
input_op='c)>', # closed due to cancel (see above)
tree_str=actor_info,
)
)
except BaseException as err:
logmeth = log.error
exit_status: str = (
'Main actor task exited due to crash?\n'
+
nest_from_op(
input_op='x)>', # closed by error
tree_str=actor_info,
)
)
# NOTE since we raise a tb will already be shown on the
# console, thus we do NOT use `.exception()` above.
raise err
log.warning(f"Actor {actor.uid} received KBI")
finally:
logmeth(exit_status)
log.info(f"Actor {actor.uid} terminated")

File diff suppressed because it is too large Load Diff

View File

@ -19,64 +19,31 @@ Inter-process comms abstractions
"""
from __future__ import annotations
from collections.abc import (
AsyncGenerator,
AsyncIterator,
)
from contextlib import (
asynccontextmanager as acm,
contextmanager as cm,
)
import platform
from pprint import pformat
import struct
import typing
from collections.abc import AsyncGenerator, AsyncIterator
from typing import (
Any,
Callable,
runtime_checkable,
Protocol,
Type,
TypeVar,
Any, Tuple, Optional,
Type, Protocol, TypeVar,
)
import msgspec
from tricycle import BufferedReceiveStream
import msgpack
import trio
from async_generator import asynccontextmanager
from tractor.log import get_logger
from tractor._exceptions import (
MsgTypeError,
pack_from_raise,
TransportClosed,
_mk_send_mte,
_mk_recv_mte,
)
from tractor.msg import (
_ctxvar_MsgCodec,
# _codec, XXX see `self._codec` sanity/debug checks
MsgCodec,
types as msgtypes,
pretty_struct,
)
from .log import get_logger
from ._exceptions import TransportClosed
log = get_logger(__name__)
_is_windows = platform.system() == 'Windows'
log = get_logger(__name__)
def get_stream_addrs(
stream: trio.SocketStream
) -> tuple[
tuple[str, int], # local
tuple[str, int], # remote
]:
'''
Return the `trio` streaming transport prot's socket-addrs for
both the local and remote sides as a pair.
'''
# rn, should both be IP sockets
def get_stream_addrs(stream: trio.SocketStream) -> Tuple:
# should both be IP sockets
lsockname = stream.socket.getsockname()
rsockname = stream.socket.getpeername()
return (
@ -85,22 +52,15 @@ def get_stream_addrs(
)
# from tractor.msg.types import MsgType
# ?TODO? this should be our `Union[*msgtypes.__spec__]` alias now right..?
# => BLEH, except can't bc prots must inherit typevar or param-spec
# vars..
MsgType = TypeVar('MsgType')
MsgType = TypeVar("MsgType")
# TODO: break up this mod into a subpkg so we can start adding new
# backends and move this type stuff into a dedicated file.. Bo
#
@runtime_checkable
class MsgTransport(Protocol[MsgType]):
#
# ^-TODO-^ consider using a generic def and indexing with our
# eventual msg definition/types?
# TODO: consider using a generic def and indexing with our eventual
# msg definition/types?
# - https://docs.python.org/3/library/typing.html#typing.Protocol
# - https://jcristharif.com/msgspec/usage.html#structs
class MsgTransport(Protocol[MsgType]):
stream: trio.SocketStream
drained: list[MsgType]
@ -127,44 +87,23 @@ class MsgTransport(Protocol[MsgType]):
...
@property
def laddr(self) -> tuple[str, int]:
def laddr(self) -> Tuple[str, int]:
...
@property
def raddr(self) -> tuple[str, int]:
def raddr(self) -> Tuple[str, int]:
...
# TODO: typing oddity.. not sure why we have to inherit here, but it
# seems to be an issue with `get_msg_transport()` returning
# a `Type[Protocol]`; probably should make a `mypy` issue?
class MsgpackTCPStream(MsgTransport):
class MsgpackTCPStream:
'''
A ``trio.SocketStream`` delivering ``msgpack`` formatted data
using the ``msgspec`` codec lib.
using ``msgpack-python``.
'''
layer_key: int = 4
name_key: str = 'tcp'
# TODO: better naming for this?
# -[ ] check how libp2p does naming for such things?
codec_key: str = 'msgpack'
def __init__(
self,
stream: trio.SocketStream,
prefix_size: int = 4,
# XXX optionally provided codec pair for `msgspec`:
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
#
# TODO: define this as a `Codec` struct which can be
# overriden dynamically by the application/runtime?
codec: tuple[
Callable[[Any], Any]|None, # coder
Callable[[type, Any], Any]|None, # decoder
]|None = None,
) -> None:
@ -175,277 +114,74 @@ class MsgpackTCPStream(MsgTransport):
self._laddr, self._raddr = get_stream_addrs(stream)
# create read loop instance
self._aiter_pkts = self._iter_packets()
self._agen = self._iter_packets()
self._send_lock = trio.StrictFIFOLock()
# public i guess?
self.drained: list[dict] = []
self.recv_stream = BufferedReceiveStream(
transport_stream=stream
)
self.prefix_size = prefix_size
# allow for custom IPC msg interchange format
# dynamic override Bo
self._task = trio.lowlevel.current_task()
# XXX for ctxvar debug only!
# self._codec: MsgCodec = (
# codec
# or
# _codec._ctxvar_MsgCodec.get()
# )
async def _iter_packets(self) -> AsyncGenerator[dict, None]:
'''
Yield `bytes`-blob decoded packets from the underlying TCP
stream using the current task's `MsgCodec`.
This is a streaming routine implemented as an async generator
func (which was the original design, but could be changed?)
and is allocated by a `.__call__()` inside `.__init__()` where
it is assigned to the `._aiter_pkts` attr.
Yield packets from the underlying stream.
'''
decodes_failed: int = 0
unpacker = msgpack.Unpacker(
raw=False,
)
while True:
try:
header: bytes = await self.recv_stream.receive_exactly(4)
except (
ValueError,
ConnectionResetError,
data = await self.stream.receive_some(2**10)
# not sure entirely why we need this but without it we
# seem to be getting racy failures here on
# arbiter/registry name subs..
trio.BrokenResourceError,
except trio.BrokenResourceError as err:
msg = err.args[0]
) as trans_err:
# XXX: handle connection-reset-by-peer the same as a EOF.
# we're currently remapping this since we allow
# a quick connect then drop for root actors when
# checking to see if there exists an "arbiter"
# on the chosen sockaddr (``_root.py:108`` or thereabouts)
if (
# nix
'[Errno 104]' in msg or
loglevel = 'transport'
match trans_err:
# case (
# ConnectionResetError()
# ):
# loglevel = 'transport'
# peer actor (graceful??) TCP EOF but `tricycle`
# seems to raise a 0-bytes-read?
case ValueError() if (
'unclean EOF' in trans_err.args[0]
):
pass
# peer actor (task) prolly shutdown quickly due
# to cancellation
case trio.BrokenResourceError() if (
'Connection reset by peer' in trans_err.args[0]
):
pass
# unless the disconnect condition falls under "a
# normal operation breakage" we usualy console warn
# about it.
case _:
loglevel: str = 'warning'
raise TransportClosed(
message=(
f'IPC transport already closed by peer\n'
f'x]> {type(trans_err)}\n'
f' |_{self}\n'
),
loglevel=loglevel,
) from trans_err
# XXX definitely can happen if transport is closed
# manually by another `trio.lowlevel.Task` in the
# same actor; we use this in some simulated fault
# testing for ex, but generally should never happen
# under normal operation!
#
# NOTE: as such we always re-raise this error from the
# RPC msg loop!
except trio.ClosedResourceError as closure_err:
raise TransportClosed(
message=(
f'IPC transport already manually closed locally?\n'
f'x]> {type(closure_err)} \n'
f' |_{self}\n'
),
loglevel='error',
raise_on_report=(
closure_err.args[0] == 'another task closed this fd'
or
closure_err.args[0] in ['another task closed this fd']
),
) from closure_err
# graceful TCP EOF disconnect
if header == b'':
raise TransportClosed(
message=(
f'IPC transport already gracefully closed\n'
f']>\n'
f' |_{self}\n'
),
loglevel='transport',
# cause=??? # handy or no?
)
size: int
size, = struct.unpack("<I", header)
log.transport(f'received header {size}') # type: ignore
msg_bytes: bytes = await self.recv_stream.receive_exactly(size)
log.transport(f"received {msg_bytes}") # type: ignore
try:
# NOTE: lookup the `trio.Task.context`'s var for
# the current `MsgCodec`.
codec: MsgCodec = _ctxvar_MsgCodec.get()
# XXX for ctxvar debug only!
# if self._codec.pld_spec != codec.pld_spec:
# assert (
# task := trio.lowlevel.current_task()
# ) is not self._task
# self._task = task
# self._codec = codec
# log.runtime(
# f'Using new codec in {self}.recv()\n'
# f'codec: {self._codec}\n\n'
# f'msg_bytes: {msg_bytes}\n'
# )
yield codec.decode(msg_bytes)
# XXX NOTE: since the below error derives from
# `DecodeError` we need to catch is specially
# and always raise such that spec violations
# are never allowed to be caught silently!
except msgspec.ValidationError as verr:
msgtyperr: MsgTypeError = _mk_recv_mte(
msg=msg_bytes,
codec=codec,
src_validation_error=verr,
)
# XXX deliver up to `Channel.recv()` where
# a re-raise and `Error`-pack can inject the far
# end actor `.uid`.
yield msgtyperr
except (
msgspec.DecodeError,
UnicodeDecodeError,
):
if decodes_failed < 4:
# ignore decoding errors for now and assume they have to
# do with a channel drop - hope that receiving from the
# channel will raise an expected error and bubble up.
try:
msg_str: str|bytes = msg_bytes.decode()
except UnicodeDecodeError:
msg_str = msg_bytes
log.exception(
'Failed to decode msg?\n'
f'{codec}\n\n'
'Rxed bytes from wire:\n\n'
f'{msg_str!r}\n'
# on windows it seems there are a variety of errors
# to handle..
_is_windows
):
raise TransportClosed(
f'{self} was broken with {msg}'
)
decodes_failed += 1
else:
raise
async def send(
self,
msg: msgtypes.MsgType,
log.transport(f"received {data}") # type: ignore
strict_types: bool = True,
hide_tb: bool = False,
) -> None:
'''
Send a msgpack encoded py-object-blob-as-msg over TCP.
If `strict_types == True` then a `MsgTypeError` will be raised on any
invalid msg type
'''
__tracebackhide__: bool = hide_tb
# XXX see `trio._sync.AsyncContextManagerMixin` for details
# on the `.acquire()`/`.release()` sequencing..
async with self._send_lock:
# NOTE: lookup the `trio.Task.context`'s var for
# the current `MsgCodec`.
codec: MsgCodec = _ctxvar_MsgCodec.get()
# XXX for ctxvar debug only!
# if self._codec.pld_spec != codec.pld_spec:
# self._codec = codec
# log.runtime(
# f'Using new codec in {self}.send()\n'
# f'codec: {self._codec}\n\n'
# f'msg: {msg}\n'
# )
if type(msg) not in msgtypes.__msg_types__:
if strict_types:
raise _mk_send_mte(
msg,
codec=codec,
)
else:
log.warning(
'Sending non-`Msg`-spec msg?\n\n'
f'{msg}\n'
)
try:
bytes_data: bytes = codec.encode(msg)
except TypeError as _err:
typerr = _err
msgtyperr: MsgTypeError = _mk_send_mte(
msg,
codec=codec,
message=(
f'IPC-msg-spec violation in\n\n'
f'{pretty_struct.Struct.pformat(msg)}'
),
src_type_error=typerr,
if data == b'':
raise TransportClosed(
f'transport {self} was already closed prior to read'
)
raise msgtyperr from typerr
# supposedly the fastest says,
# https://stackoverflow.com/a/54027962
size: bytes = struct.pack("<I", len(bytes_data))
return await self.stream.send_all(size + bytes_data)
# ?TODO? does it help ever to dynamically show this
# frame?
# try:
# <the-above_code>
# except BaseException as _err:
# err = _err
# if not isinstance(err, MsgTypeError):
# __tracebackhide__: bool = False
# raise
unpacker.feed(data)
for packet in unpacker:
yield packet
@property
def laddr(self) -> tuple[str, int]:
def laddr(self) -> Tuple[Any, ...]:
return self._laddr
@property
def raddr(self) -> tuple[str, int]:
def raddr(self) -> Tuple[Any, ...]:
return self._raddr
async def send(self, msg: Any) -> None:
async with self._send_lock:
return await self.stream.send_all(
msgpack.dumps(msg, use_bin_type=True)
)
async def recv(self) -> Any:
return await self._aiter_pkts.asend(None)
return await self._agen.asend(None)
async def drain(self) -> AsyncIterator[dict]:
'''
@ -462,20 +198,105 @@ class MsgpackTCPStream(MsgTransport):
yield msg
def __aiter__(self):
return self._aiter_pkts
return self._agen
def connected(self) -> bool:
return self.stream.socket.fileno() != -1
class MsgspecTCPStream(MsgpackTCPStream):
'''
A ``trio.SocketStream`` delivering ``msgpack`` formatted data
using ``msgspec``.
'''
def __init__(
self,
stream: trio.SocketStream,
prefix_size: int = 4,
) -> None:
import msgspec
super().__init__(stream)
self.recv_stream = BufferedReceiveStream(transport_stream=stream)
self.prefix_size = prefix_size
# TODO: struct aware messaging coders
self.encode = msgspec.msgpack.Encoder().encode
self.decode = msgspec.msgpack.Decoder().decode # dict[str, Any])
async def _iter_packets(self) -> AsyncGenerator[dict, None]:
'''Yield packets from the underlying stream.
'''
import msgspec # noqa
last_decode_failed: bool = False
while True:
try:
header = await self.recv_stream.receive_exactly(4)
except (
ValueError,
# not sure entirely why we need this but without it we
# seem to be getting racy failures here on
# arbiter/registry name subs..
trio.BrokenResourceError,
):
raise TransportClosed(
f'transport {self} was already closed prior ro read'
)
if header == b'':
raise TransportClosed(
f'transport {self} was already closed prior ro read'
)
size, = struct.unpack("<I", header)
log.transport(f'received header {size}') # type: ignore
msg_bytes = await self.recv_stream.receive_exactly(size)
log.transport(f"received {msg_bytes}") # type: ignore
try:
yield self.decode(msg_bytes)
except (
msgspec.DecodeError,
UnicodeDecodeError,
):
if not last_decode_failed:
# ignore decoding errors for now and assume they have to
# do with a channel drop - hope that receiving from the
# channel will raise an expected error and bubble up.
log.error('`msgspec` failed to decode!?')
last_decode_failed = True
else:
raise
async def send(self, msg: Any) -> None:
async with self._send_lock:
bytes_data: bytes = self.encode(msg)
# supposedly the fastest says,
# https://stackoverflow.com/a/54027962
size: bytes = struct.pack("<I", len(bytes_data))
return await self.stream.send_all(size + bytes_data)
def get_msg_transport(
key: tuple[str, str],
key: Tuple[str, str],
) -> Type[MsgTransport]:
return {
('msgpack', 'tcp'): MsgpackTCPStream,
('msgspec', 'tcp'): MsgspecTCPStream,
}[key]
@ -484,18 +305,16 @@ class Channel:
An inter-process channel for communication between (remote) actors.
Wraps a ``MsgStream``: transport + encoding IPC connection.
Currently we only support ``trio.SocketStream`` for transport
(aka TCP) and the ``msgpack`` interchange format via the ``msgspec``
codec libary.
(aka TCP).
'''
def __init__(
self,
destaddr: tuple[str, int]|None,
destaddr: Optional[Tuple[str, int]],
msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'),
msg_transport_type_key: Tuple[str, str] = ('msgpack', 'tcp'),
# TODO: optional reconnection support?
# auto_reconnect: bool = False,
@ -506,36 +325,31 @@ class Channel:
# self._recon_seq = on_reconnect
# self._autorecon = auto_reconnect
# TODO: maybe expose this through the nursery api?
try:
# if installed load the msgspec transport since it's faster
import msgspec # noqa
msg_transport_type_key = ('msgspec', 'tcp')
except ImportError:
pass
self._destaddr = destaddr
self._transport_key = msg_transport_type_key
# Either created in ``.connect()`` or passed in by
# user in ``.from_stream()``.
self._stream: trio.SocketStream|None = None
self._transport: MsgTransport|None = None
self._stream: Optional[trio.SocketStream] = None
self.msgstream: Optional[MsgTransport] = None
# set after handshake - always uid of far end
self.uid: tuple[str, str]|None = None
self.uid: Optional[Tuple[str, str]] = None
self._aiter_msgs = self._iter_msgs()
self._exc: Exception|None = None # set if far end actor errors
self._agen = self._aiter_recv()
self._exc: Optional[Exception] = None # set if far end actor errors
self._closed: bool = False
# flag set by ``Portal.cancel_actor()`` indicating remote
# (possibly peer) cancellation of the far end actor
# runtime.
self._cancel_called: bool = False
@property
def msgstream(self) -> MsgTransport:
log.info(
'`Channel.msgstream` is an old name, use `._transport`'
)
return self._transport
@property
def transport(self) -> MsgTransport:
return self._transport
# flag set on ``Portal.cancel_actor()`` indicating
# remote (peer) cancellation of the far end actor runtime.
self._cancel_called: bool = False # set on ``Portal.cancel_actor()``
@classmethod
def from_stream(
@ -546,82 +360,41 @@ class Channel:
) -> Channel:
src, dst = get_stream_addrs(stream)
chan = Channel(
destaddr=dst,
**kwargs,
)
chan = Channel(destaddr=dst, **kwargs)
# set immediately here from provided instance
chan._stream: trio.SocketStream = stream
chan._stream = stream
chan.set_msg_transport(stream)
return chan
def set_msg_transport(
self,
stream: trio.SocketStream,
type_key: tuple[str, str]|None = None,
# XXX optionally provided codec pair for `msgspec`:
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
codec: MsgCodec|None = None,
type_key: Optional[Tuple[str, str]] = None,
) -> MsgTransport:
type_key = (
type_key
or
self._transport_key
)
# get transport type, then
self._transport = get_msg_transport(
type_key
# instantiate an instance of the msg-transport
)(
stream,
codec=codec,
)
return self._transport
type_key = type_key or self._transport_key
self.msgstream = get_msg_transport(type_key)(stream)
return self.msgstream
@cm
def apply_codec(
self,
codec: MsgCodec,
) -> None:
'''
Temporarily override the underlying IPC msg codec for
dynamic enforcement of messaging schema.
'''
orig: MsgCodec = self._transport.codec
try:
self._transport.codec = codec
yield
finally:
self._transport.codec = orig
# TODO: do a .src/.dst: str for maddrs?
def __repr__(self) -> str:
if not self._transport:
return '<Channel with inactive transport?>'
return repr(
self._transport.stream.socket._sock
).replace( # type: ignore
"socket.socket",
"Channel",
)
if self.msgstream:
return repr(
self.msgstream.stream.socket._sock).replace( # type: ignore
"socket.socket", "Channel")
return object.__repr__(self)
@property
def laddr(self) -> tuple[str, int]|None:
return self._transport.laddr if self._transport else None
def laddr(self) -> Optional[Tuple[str, int]]:
return self.msgstream.laddr if self.msgstream else None
@property
def raddr(self) -> tuple[str, int]|None:
return self._transport.raddr if self._transport else None
def raddr(self) -> Optional[Tuple[str, int]]:
return self.msgstream.raddr if self.msgstream else None
async def connect(
self,
destaddr: tuple[Any, ...] | None = None,
destaddr: Tuple[Any, ...] = None,
**kwargs
) -> MsgTransport:
@ -636,62 +409,26 @@ class Channel:
*destaddr,
**kwargs
)
transport = self.set_msg_transport(stream)
msgstream = self.set_msg_transport(stream)
log.transport(
f'Opened channel[{type(transport)}]: {self.laddr} -> {self.raddr}'
f'Opened channel[{type(msgstream)}]: {self.laddr} -> {self.raddr}'
)
return transport
return msgstream
# TODO: something like,
# `pdbp.hideframe_on(errors=[MsgTypeError])`
# instead of the `try/except` hack we have rn..
# seems like a pretty useful thing to have in general
# along with being able to filter certain stack frame(s / sets)
# possibly based on the current log-level?
async def send(
self,
payload: Any,
async def send(self, item: Any) -> None:
hide_tb: bool = False,
log.transport(f"send `{item}`") # type: ignore
assert self.msgstream
) -> None:
'''
Send a coded msg-blob over the transport.
'''
__tracebackhide__: bool = hide_tb
try:
log.transport(
'=> send IPC msg:\n\n'
f'{pformat(payload)}\n'
)
# assert self._transport # but why typing?
await self._transport.send(
payload,
hide_tb=hide_tb,
)
except BaseException as _err:
err = _err # bind for introspection
if not isinstance(_err, MsgTypeError):
# assert err
__tracebackhide__: bool = False
else:
assert err.cid
raise
await self.msgstream.send(item)
async def recv(self) -> Any:
assert self._transport
return await self._transport.recv()
assert self.msgstream
return await self.msgstream.recv()
# TODO: auto-reconnect features like 0mq/nanomsg?
# -[ ] implement it manually with nods to SC prot
# possibly on multiple transport backends?
# -> seems like that might be re-inventing scalability
# prots tho no?
# try:
# return await self._transport.recv()
# return await self.msgstream.recv()
# except trio.BrokenResourceError:
# if self._autorecon:
# await self._reconnect()
@ -704,8 +441,8 @@ class Channel:
f'Closing channel to {self.uid} '
f'{self.laddr} -> {self.raddr}'
)
assert self._transport
await self._transport.stream.aclose()
assert self.msgstream
await self.msgstream.stream.aclose()
self._closed = True
async def __aenter__(self):
@ -716,11 +453,8 @@ class Channel:
await self.aclose(*args)
def __aiter__(self):
return self._aiter_msgs
return self._agen
# ?TODO? run any reconnection sequence?
# -[ ] prolly should be impl-ed as deco-API?
#
# async def _reconnect(self) -> None:
# """Handle connection failures by polling until a reconnect can be
# established.
@ -738,6 +472,7 @@ class Channel:
# else:
# log.transport("Stream connection re-established!")
# # TODO: run any reconnection sequence
# # on_recon = self._recon_seq
# # if on_recon:
# # await on_recon(self)
@ -751,42 +486,23 @@ class Channel:
# " for re-establishment")
# await trio.sleep(1)
async def _iter_msgs(
async def _aiter_recv(
self
) -> AsyncGenerator[Any, None]:
'''
Yield `MsgType` IPC msgs decoded and deliverd from
an underlying `MsgTransport` protocol.
This is a streaming routine alo implemented as an async-gen
func (same a `MsgTransport._iter_pkts()`) gets allocated by
a `.__call__()` inside `.__init__()` where it is assigned to
the `._aiter_msgs` attr.
Async iterate items from underlying stream.
'''
assert self._transport
assert self.msgstream
while True:
try:
async for msg in self._transport:
match msg:
# NOTE: if transport/interchange delivers
# a type error, we pack it with the far
# end peer `Actor.uid` and relay the
# `Error`-msg upward to the `._rpc` stack
# for normal RAE handling.
case MsgTypeError():
yield pack_from_raise(
local_err=msg,
cid=msg.cid,
# XXX we pack it here bc lower
# layers have no notion of an
# actor-id ;)
src_uid=self.uid,
)
case _:
yield msg
async for item in self.msgstream:
yield item
# sent = yield item
# if sent is not None:
# # optimization, passing None through all the
# # time is pointless
# await self.msgstream.send(sent)
except trio.BrokenResourceError:
# if not self._autorecon:
@ -799,14 +515,12 @@ class Channel:
# continue
def connected(self) -> bool:
return self._transport.connected() if self._transport else False
return self.msgstream.connected() if self.msgstream else False
@acm
@asynccontextmanager
async def _connect_chan(
host: str,
port: int
host: str, port: int
) -> typing.AsyncGenerator[Channel, None]:
'''
Create and connect a channel with disconnect on context manager
@ -816,5 +530,4 @@ async def _connect_chan(
chan = Channel((host, port))
await chan.connect()
yield chan
with trio.CancelScope(shield=True):
await chan.aclose()
await chan.aclose()

Some files were not shown because too many files have changed in this diff Show More