Compare commits

..

No commits in common. "master" and "drop_warn" have entirely different histories.

117 changed files with 3721 additions and 15860 deletions

View File

@ -1,131 +1,41 @@
name: CI
on:
# any time someone pushes a new branch to origin
push:
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
on: push
jobs:
mypy:
name: 'MyPy'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: '3.10'
python-version: '3.8'
- name: Install dependencies
run: pip install -U . --upgrade-strategy eager -r requirements-test.txt
run: pip install -U . --upgrade-strategy eager
- name: Run MyPy check
run: mypy tractor/ --ignore-missing-imports --show-traceback
run: mypy tractor/ --ignore-missing-imports
# test that we can generate a software distribution and install it
# thus avoid missing file issues after packaging.
sdist-linux:
name: 'sdist'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: '3.10'
- name: Build sdist
run: python setup.py sdist --formats=zip
- name: Install sdist from .zips
run: python -m pip install dist/*.zip
testing-linux:
testing:
name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}'
timeout-minutes: 10
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python: ['3.10']
spawn_backend: [
'trio',
'mp_spawn',
'mp_forkserver',
]
os: [ubuntu-latest, windows-latest]
python: ['3.7', '3.8']
spawn_backend: ['trio', 'mp']
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: '${{ matrix.python }}'
- name: Install dependencies
run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager
- name: List dependencies
run: pip list
- name: Run tests
run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx
# We skip 3.10 on windows for now due to not having any collabs to
# debug the CI failures. Anyone wanting to hack and solve them is very
# welcome, but our primary user base is not using that OS.
# TODO: use job filtering to accomplish instead of repeated
# boilerplate as is above XD:
# - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows
# - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
# - https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idif
# testing-windows:
# name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}'
# timeout-minutes: 12
# runs-on: ${{ matrix.os }}
# strategy:
# fail-fast: false
# matrix:
# os: [windows-latest]
# python: ['3.10']
# spawn_backend: ['trio', 'mp']
# steps:
# - name: Checkout
# uses: actions/checkout@v2
# - name: Setup python
# uses: actions/setup-python@v2
# with:
# python-version: '${{ matrix.python }}'
# - name: Install dependencies
# run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager
# # TODO: pretty sure this solves debugger deps-issues on windows, but it needs to
# # be verified by someone with a native setup.
# # - name: Force pyreadline3
# # run: pip uninstall pyreadline; pip install -U pyreadline3
# - name: List dependencies
# run: pip list
# - name: Run tests
# run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx
run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs

147
LICENSE
View File

@ -1,21 +1,23 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
@ -24,34 +26,44 @@ them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
@ -60,7 +72,7 @@ modification follow.
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
@ -537,45 +549,35 @@ to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
@ -633,29 +635,40 @@ the "copyright" line and a pointer to where the full notice is found.
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

View File

@ -1,2 +0,0 @@
# https://packaging.python.org/en/latest/guides/using-manifest-in/#using-manifest-in
include docs/README.rst

528
NEWS.rst
View File

@ -1,528 +0,0 @@
=========
Changelog
=========
.. towncrier release notes start
tractor 0.1.0a5 (2022-08-03)
============================
This is our final release supporting Python 3.9 since we will be moving
internals to the new `match:` syntax from 3.10 going forward and
further, we have officially dropped usage of the `msgpack` library and
happily adopted `msgspec`.
Features
--------
- `#165 <https://github.com/goodboy/tractor/issues/165>`_: Add SIGINT
protection to our `pdbpp` based debugger subystem such that for
(single-depth) actor trees in debug mode we ignore interrupts in any
actor currently holding the TTY lock thus avoiding clobbering IPC
connections and/or task and process state when working in the REPL.
As a big note currently so called "nested" actor trees (trees with
actors having more then one parent/ancestor) are not fully supported
since we don't yet have a mechanism to relay the debug mode knowledge
"up" the actor tree (for eg. when handling a crash in a leaf actor).
As such currently there is a set of tests and known scenarios which will
result in process cloberring by the zombie repaing machinery and these
have been documented in https://github.com/goodboy/tractor/issues/320.
The implementation details include:
- utilizing a custom SIGINT handler which we apply whenever an actor's
runtime enters the debug machinery, which we also make sure the
stdlib's `pdb` configuration doesn't override (which it does by
default without special instance config).
- litter the runtime with `maybe_wait_for_debugger()` mostly in spots
where the root actor should block before doing embedded nursery
teardown ops which both cancel potential-children-in-deubg as well
as eventually trigger zombie reaping machinery.
- hardening of the TTY locking semantics/API both in terms of IPC
terminations and cancellation and lock release determinism from
sync debugger instance methods.
- factoring of locking infrastructure into a new `._debug.Lock` global
which encapsulates all details of the ``trio`` sync primitives and
task/actor uid management and tracking.
We also add `ctrl-c` cases throughout the test suite though these are
disabled for py3.9 (`pdbpp` UX differences that don't seem worth
compensating for, especially since this will be our last 3.9 supported
release) and there are a slew of marked cases that aren't expected to
work in CI more generally (as mentioned in the "nested" tree note
above) despite seemingly working when run manually on linux.
- `#304 <https://github.com/goodboy/tractor/issues/304>`_: Add a new
``to_asyncio.LinkedTaskChannel.subscribe()`` which gives task-oriented
broadcast functionality semantically equivalent to
``tractor.MsgStream.subscribe()`` this makes it possible for multiple
``trio``-side tasks to consume ``asyncio``-side task msgs in tandem.
Further Improvements to the test suite were added in this patch set
including a new scenario test for a sub-actor managed "service nursery"
(implementing the basics of a "service manager") including use of
*infected asyncio* mode. Further we added a lower level
``test_trioisms.py`` to start to track issues we need to work around in
``trio`` itself which in this case included a bug we were trying to
solve related to https://github.com/python-trio/trio/issues/2258.
Bug Fixes
---------
- `#318 <https://github.com/goodboy/tractor/issues/318>`_: Fix
a previously undetected ``trio``-``asyncio`` task lifetime linking
issue with the ``to_asyncio.open_channel_from()`` api where both sides
where not properly waiting/signalling termination and it was possible
for ``asyncio``-side errors to not propagate due to a race condition.
The implementation fix summary is:
- add state to signal the end of the ``trio`` side task to be
read by the ``asyncio`` side and always cancel any ongoing
task in such cases.
- always wait on the ``asyncio`` task termination from the ``trio``
side on error before maybe raising said error.
- always close the ``trio`` mem chan on exit to ensure the other
side can detect it and follow.
Trivial/Internal Changes
------------------------
- `#248 <https://github.com/goodboy/tractor/issues/248>`_: Adjust the
`tractor._spawn.soft_wait()` strategy to avoid sending an actor cancel
request (via `Portal.cancel_actor()`) if either the child process is
detected as having terminated or the IPC channel is detected to be
closed.
This ensures (even) more deterministic inter-actor cancellation by
avoiding the timeout condition where possible when a whild never
sucessfully spawned, crashed, or became un-contactable over IPC.
- `#295 <https://github.com/goodboy/tractor/issues/295>`_: Add an
experimental ``tractor.msg.NamespacePath`` type for passing Python
objects by "reference" through a ``str``-subtype message and using the
new ``pkgutil.resolve_name()`` for reference loading.
- `#298 <https://github.com/goodboy/tractor/issues/298>`_: Add a new
`tractor.experimental` subpackage for staging new high level APIs and
subystems that we might eventually make built-ins.
- `#300 <https://github.com/goodboy/tractor/issues/300>`_: Update to and
pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) both of which
required adjustments for backwards imcompatible API tweaks.
- `#303 <https://github.com/goodboy/tractor/issues/303>`_: Fence off
``multiprocessing`` imports until absolutely necessary in an effort to
avoid "resource tracker" spawning side effects that seem to have
varying degrees of unreliability per Python release. Port to new
``msgspec.DecodeError``.
- `#305 <https://github.com/goodboy/tractor/issues/305>`_: Add
``tractor.query_actor()`` an addr looker-upper which doesn't deliver
a ``Portal`` instance and instead just a socket address ``tuple``.
Sometimes it's handy to just have a simple way to figure out if
a "service" actor is up, so add this discovery helper for that. We'll
prolly just leave it undocumented for now until we figure out
a longer-term/better discovery system.
- `#316 <https://github.com/goodboy/tractor/issues/316>`_: Run windows
CI jobs on python 3.10 after some hacks for ``pdbpp`` dependency
issues.
Issue was to do with the now deprecated `pyreadline` project which
should be changed over to `pyreadline3`.
- `#317 <https://github.com/goodboy/tractor/issues/317>`_: Drop use of
the ``msgpack`` package and instead move fully to the ``msgspec``
codec library.
We've now used ``msgspec`` extensively in production and there's no
reason to not use it as default. Further this change preps us for the up
and coming typed messaging semantics (#196), dialog-unprotocol system
(#297), and caps-based messaging-protocols (#299) planned before our
first beta.
tractor 0.1.0a4 (2021-12-18)
============================
Features
--------
- `#275 <https://github.com/goodboy/tractor/issues/275>`_: Re-license
code base under AGPLv3. Also see `#274
<https://github.com/goodboy/tractor/pull/274>`_ for majority
contributor consensus on this decision.
- `#121 <https://github.com/goodboy/tractor/issues/121>`_: Add
"infected ``asyncio`` mode; a sub-system to spawn and control
``asyncio`` actors using ``trio``'s guest-mode.
This gets us the following very interesting functionality:
- ability to spawn an actor that has a process entry point of
``asyncio.run()`` by passing ``infect_asyncio=True`` to
``Portal.start_actor()`` (and friends).
- the ``asyncio`` actor embeds ``trio`` using guest-mode and starts
a main ``trio`` task which runs the ``tractor.Actor._async_main()``
entry point engages all the normal ``tractor`` runtime IPC/messaging
machinery; for all purposes the actor is now running normally on
a ``trio.run()``.
- the actor can now make one-to-one task spawning requests to the
underlying ``asyncio`` event loop using either of:
* ``to_asyncio.run_task()`` to spawn and run an ``asyncio`` task to
completion and block until a return value is delivered.
* ``async with to_asyncio.open_channel_from():`` which spawns a task
and hands it a pair of "memory channels" to allow for bi-directional
streaming between the now SC-linked ``trio`` and ``asyncio`` tasks.
The output from any call(s) to ``asyncio`` can be handled as normal in
``trio``/``tractor`` task operation with the caveat of the overhead due
to guest-mode use.
For more details see the `original PR
<https://github.com/goodboy/tractor/pull/121>`_ and `issue
<https://github.com/goodboy/tractor/issues/120>`_.
- `#257 <https://github.com/goodboy/tractor/issues/257>`_: Add
``trionics.maybe_open_context()`` an actor-scoped async multi-task
context manager resource caching API.
Adds an SC-safe cacheing async context manager api that only enters on
the *first* task entry and only exits on the *last* task exit while in
between delivering the same cached value per input key. Keys can be
either an explicit ``key`` named arg provided by the user or a
hashable ``kwargs`` dict (will be converted to a ``list[tuple]``) which
is passed to the underlying manager function as input.
- `#261 <https://github.com/goodboy/tractor/issues/261>`_: Add
cross-actor-task ``Context`` oriented error relay, a new stream
overrun error-signal ``StreamOverrun``, and support disabling
``MsgStream`` backpressure as the default before a stream is opened or
by choice of the user.
We added stricter semantics around ``tractor.Context.open_stream():``
particularly to do with streams which are only opened at one end.
Previously, if only one end opened a stream there was no way for that
sender to know if msgs are being received until first, the feeder mem
chan on the receiver side hit a backpressure state and then that
condition delayed its msg loop processing task to eventually create
backpressure on the associated IPC transport. This is non-ideal in the
case where the receiver side never opened a stream by mistake since it
results in silent block of the sender and no adherence to the underlying
mem chan buffer size settings (which is still unsolved btw).
To solve this we add non-backpressure style message pushing inside
``Actor._push_result()`` by default and only use the backpressure
``trio.MemorySendChannel.send()`` call **iff** the local end of the
context has entered ``Context.open_stream():``. This way if the stream
was never opened but the mem chan is overrun, we relay back to the
sender a (new exception) ``SteamOverrun`` error which is raised in the
sender's scope with a special error message about the stream never
having been opened. Further, this behaviour (non-backpressure style
where senders can expect an error on overruns) can now be enabled with
``.open_stream(backpressure=False)`` and the underlying mem chan size
can be specified with a kwarg ``msg_buffer_size: int``.
Further bug fixes and enhancements in this changeset include:
- fix a race we were ignoring where if the callee task opened a context
it could enter ``Context.open_stream()`` before calling
``.started()``.
- Disallow calling ``Context.started()`` more then once.
- Enable ``Context`` linked tasks error relaying via the new
``Context._maybe_raise_from_remote_msg()`` which (for now) uses
a simple ``trio.Nursery.start_soon()`` to raise the error via closure
in the local scope.
- `#267 <https://github.com/goodboy/tractor/issues/267>`_: This
(finally) adds fully acknowledged remote cancellation messaging
support for both explicit ``Portal.cancel_actor()`` calls as well as
when there is a "runtime-wide" cancellations (eg. during KBI or
general actor nursery exception handling which causes a full actor
"crash"/termination).
You can think of this as the most ideal case in 2-generals where the
actor requesting the cancel of its child is able to always receive back
the ACK to that request. This leads to a more deterministic shutdown of
the child where the parent is able to wait for the child to fully
respond to the request. On a localhost setup, where the parent can
monitor the state of the child through process or other OS APIs instead
of solely through IPC messaging, the parent can know whether or not the
child decided to cancel with more certainty. In the case of separate
hosts, we still rely on a simple timeout approach until such a time
where we prefer to get "fancier".
- `#271 <https://github.com/goodboy/tractor/issues/271>`_: Add a per
actor ``debug_mode: bool`` control to our nursery.
This allows spawning actors via ``ActorNursery.start_actor()`` (and
other dependent methods) with a ``debug_mode=True`` flag much like
``tractor.open_nursery():`` such that per process crash handling
can be toggled for cases where a user does not need/want all child actors
to drop into the debugger on error. This is often useful when you have
actor-tasks which are expected to error often (and be re-run) but want
to specifically interact with some (problematic) child.
Bugfixes
--------
- `#239 <https://github.com/goodboy/tractor/issues/239>`_: Fix
keyboard interrupt handling in ``Portal.open_context()`` blocks.
Previously this was not triggering cancellation of the remote task
context and could result in hangs if a stream was also opened. This
fix is to accept `BaseException` since it is likely any other top
level exception other then KBI (even though not expected) should also
get this result.
- `#264 <https://github.com/goodboy/tractor/issues/264>`_: Fix
``Portal.run_in_actor()`` returns ``None`` result.
``None`` was being used as the cached result flag and obviously breaks
on a ``None`` returned from the remote target task. This would cause an
infinite hang if user code ever called ``Portal.result()`` *before* the
nursery exit. The simple fix is to use the *return message* as the
initial "no-result-received-yet" flag value and, once received, the
return value is read from the message to avoid the cache logic error.
- `#266 <https://github.com/goodboy/tractor/issues/266>`_: Fix
graceful cancellation of daemon actors
Previously, his was a bug where if the soft wait on a sub-process (the
``await .proc.wait()``) in the reaper task teardown was cancelled we
would fail over to the hard reaping sequence (meant for culling off any
potential zombies via system kill signals). The hard reap has a timeout
of 3s (currently though in theory we could make it shorter?) before
system signalling kicks in. This means that any daemon actor still
running during nursery exit would get hard reaped (3s later) instead of
cancelled via IPC message. Now we catch the ``trio.Cancelled``, call
``Portal.cancel_actor()`` on the daemon and expect the child to
self-terminate after the runtime cancels and shuts down the process.
- `#278 <https://github.com/goodboy/tractor/issues/278>`_: Repair
inter-actor stream closure semantics to work correctly with
``tractor.trionics.BroadcastReceiver`` task fan out usage.
A set of previously unknown bugs discovered in `#257
<https://github.com/goodboy/tractor/pull/257>`_ let graceful stream
closure result in hanging consumer tasks that use the broadcast APIs.
This adds better internal closure state tracking to the broadcast
receiver and message stream APIs and in particular ensures that when an
underlying stream/receive-channel (a broadcast receiver is receiving
from) is closed, all consumer tasks waiting on that underlying channel
are woken so they can receive the ``trio.EndOfChannel`` signal and
promptly terminate.
tractor 0.1.0a3 (2021-11-02)
============================
Features
--------
- Switch to using the ``trio`` process spawner by default on windows. (#166)
This gets windows users debugger support (manually tested) and in
general a more resilient (nested) actor tree implementation.
- Add optional `msgspec <https://jcristharif.com/msgspec/>`_ support
as an alernative, faster MessagePack codec. (#214)
Provides us with a path toward supporting typed IPC message contracts. Further,
``msgspec`` structs may be a valid tool to start for formalizing our
"SC dialog un-protocol" messages as described in `#36
<https://github.com/goodboy/tractor/issues/36>`_.
- Introduce a new ``tractor.trionics`` `sub-package`_ that exposes
a selection of our relevant high(er) level trio primitives and
goodies. (#241)
At outset we offer a ``gather_contexts()`` context manager for
concurrently entering a sequence of async context managers (much like
a version of ``asyncio.gather()`` but for context managers) and use it
in a new ``tractor.open_actor_cluster()`` manager-helper that can be
entered to concurrently spawn a flat actor pool. We also now publicly
expose our "broadcast channel" APIs (``open_broadcast_receiver()``)
from here.
.. _sub-package: ../tractor/trionics
- Change the core message loop to handle task and actor-runtime cancel
requests immediately instead of scheduling them as is done for rpc-task
requests. (#245)
In order to obtain more reliable teardown mechanics for (complex) actor
trees it's important that we specially treat cancel requests as having
higher priority. Previously, it was possible that task cancel requests
could actually also themselves be cancelled if a "actor-runtime" cancel
request was received (can happen during messy multi actor crashes that
propagate). Instead cancels now block the msg loop until serviced and
a response is relayed back to the requester. This also allows for
improved debugger support since we have determinism guarantees about
which processes must wait before hard killing their children.
- (`#248 <https://github.com/goodboy/tractor/pull/248>`_) Drop Python
3.8 support in favour of rolling with two latest releases for the time
being.
Misc
----
- (`#243 <https://github.com/goodboy/tractor/pull/243>`_) add a distinct
``'CANCEL'`` log level to allow the runtime to emit details about
cancellation machinery statuses.
tractor 0.1.0a2 (2021-09-07)
============================
Features
--------
- Add `tokio-style broadcast channels
<https://docs.rs/tokio/1.11.0/tokio/sync/broadcast/index.html>`_ as
a solution for `#204 <https://github.com/goodboy/tractor/pull/204>`_ and
discussed thoroughly in `trio/#987
<https://github.com/python-trio/trio/issues/987>`_.
This gives us local task broadcast functionality using a new
``BroadcastReceiver`` type which can wrap ``trio.ReceiveChannel`` and
provide fan-out copies of a stream of data to every subscribed consumer.
We use this new machinery to provide a ``ReceiveMsgStream.subscribe()``
async context manager which can be used by actor-local concumers tasks
to easily pull from a shared and dynamic IPC stream. (`#229
<https://github.com/goodboy/tractor/pull/229>`_)
Bugfixes
--------
- Handle broken channel/stream faults where the root's tty lock is left
acquired by some child actor who went MIA and the root ends up hanging
indefinitely. (`#234 <https://github.com/goodboy/tractor/pull/234>`_)
There's two parts here: we no longer shield wait on the lock and,
now always do our best to release the lock on the expected worst
case connection faults.
Deprecations and Removals
-------------------------
- Drop stream "shielding" support which was originally added to sidestep
a cancelled call to ``.receive()``
In the original api design a stream instance was returned directly from
a call to ``Portal.run()`` and thus there was no "exit phase" to handle
cancellations and errors which would trigger implicit closure. Now that
we have said enter/exit semantics with ``Portal.open_stream_from()`` and
``Context.open_stream()`` we can drop this implicit (and arguably
confusing) behavior. (`#230 <https://github.com/goodboy/tractor/pull/230>`_)
- Drop Python 3.7 support in preparation for supporting 3.9+ syntax.
(`#232 <https://github.com/goodboy/tractor/pull/232>`_)
tractor 0.1.0a1 (2021-08-01)
============================
Features
--------
- Updated our uni-directional streaming API (`#206
<https://github.com/goodboy/tractor/pull/206>`_) to require a context
manager style ``async with Portal.open_stream_from(target) as stream:``
which explicitly determines when to stop a stream in the calling (aka
portal opening) actor much like ``async_generator.aclosing()``
enforcement.
- Improved the ``multiprocessing`` backend sub-actor reaping (`#208
<https://github.com/goodboy/tractor/pull/208>`_) during actor nursery
exit, particularly during cancellation scenarios that previously might
result in hard to debug hangs.
- Added initial bi-directional streaming support in `#219
<https://github.com/goodboy/tractor/pull/219>`_ with follow up debugger
improvements via `#220 <https://github.com/goodboy/tractor/pull/220>`_
using the new ``tractor.Context`` cross-actor task syncing system.
The debugger upgrades add an edge triggered last-in-tty-lock semaphore
which allows the root process for a tree to avoid clobbering children
who have queued to acquire the ``pdb`` repl by waiting to cancel
sub-actors until the lock is known to be released **and** has no
pending waiters.
Experiments and WIPs
--------------------
- Initial optional ``msgspec`` serialization support in `#214
<https://github.com/goodboy/tractor/pull/214>`_ which should hopefully
land by next release.
- Improved "infect ``asyncio``" cross-loop task cancellation and error
propagation by vastly simplifying the cross-loop-task streaming approach.
We may end up just going with a use of ``anyio`` in the medium term to
avoid re-doing work done by their cross-event-loop portals. See the
``infect_asyncio`` for details.
Improved Documentation
----------------------
- `Updated our readme <https://github.com/goodboy/tractor/pull/211>`_ to
include more (and better) `examples
<https://github.com/goodboy/tractor#run-a-func-in-a-process>`_ (with
matching multi-terminal process monitoring shell commands) as well as
added many more examples to the `repo set
<https://github.com/goodboy/tractor/tree/master/examples>`_.
- Added a readme `"actors under the hood" section
<https://github.com/goodboy/tractor#under-the-hood>`_ in an effort to
guard against suggestions for changing the API away from ``trio``'s
*tasks-as-functions* style.
- Moved to using the `sphinx book theme
<https://sphinx-book-theme.readthedocs.io/en/latest/index.html>`_
though it needs some heavy tweaking and doesn't seem to show our logo
on rtd :(
Trivial/Internal Changes
------------------------
- Added a new ``TransportClosed`` internal exception/signal (`#215
<https://github.com/goodboy/tractor/pull/215>`_ for catching TCP
channel gentle closes instead of silently falling through the message
handler loop via an async generator ``return``.
Deprecations and Removals
-------------------------
- Dropped support for invoking sync functions (`#205
<https://github.com/goodboy/tractor/pull/205>`_) in other
actors/processes since you can always wrap a sync function from an
async one. Users can instead consider using ``trio-parallel`` which
is a project specifically geared for purely synchronous calls in
sub-processes.
- Deprecated our ``tractor.run()`` entrypoint `#197
<https://github.com/goodboy/tractor/pull/197>`_; the runtime is now
either started implicitly in first actor nursery use or via an
explicit call to ``tractor.open_root_actor()``. Full removal of
``tractor.run()`` will come by beta release.
tractor 0.1.0a0 (2021-02-28)
============================
..
TODO: fill out more of the details of the initial feature set in some TLDR form
Summary
-------
- ``trio`` based process spawner (using ``subprocess``)
- initial multi-process debugging with ``pdb++``
- windows support using both ``trio`` and ``multiprocessing`` spawners
- "portal" api for cross-process, structured concurrent, (streaming) IPC

65
README.rst 100644
View File

@ -0,0 +1,65 @@
tractor
=======
A `structured concurrent`_, async-native "`actor model`_" built on trio_ and multiprocessing_.
|gh_actions|
|docs|
.. _actor model: https://en.wikipedia.org/wiki/Actor_model
.. _trio: https://github.com/python-trio/trio
.. _multiprocessing: https://en.wikipedia.org/wiki/Multiprocessing
.. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles
.. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
``tractor`` is an attempt to bring trionic_ `structured concurrency`_ to
distributed multi-core Python; it aims to be the Python multi-processing
framework *you always wanted*.
``tractor`` lets you spawn ``trio`` *"actors"*: processes which each run
a ``trio`` scheduled task tree (also known as an `async sandwich`_).
*Actors* communicate by exchanging asynchronous messages_ and avoid
sharing any state. This model allows for highly distributed software
architecture which works just as well on multiple cores as it does over
many hosts.
The first step to grok ``tractor`` is to get the basics of ``trio`` down.
A great place to start is the `trio docs`_ and this `blog post`_.
.. _messages: https://en.wikipedia.org/wiki/Message_passing
.. _trio docs: https://trio.readthedocs.io/en/latest/
.. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
.. _structured concurrency: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
.. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts
.. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony
.. _async generators: https://www.python.org/dev/peps/pep-0525/
Install
-------
No PyPi release yet!
::
pip install git+git://github.com/goodboy/tractor.git
Feel like saying hi?
--------------------
This project is very much coupled to the ongoing development of
``trio`` (i.e. ``tractor`` gets most of its ideas from that brilliant
community). If you want to help, have suggestions or just want to
say hi, please feel free to reach us on in our `matrix channel`_. If
matrix seems too hip, we're also mostly all in the the `trio gitter
channel`_!
.. _trio gitter channel: https://gitter.im/python-trio/general
.. _matrix channel: https://matrix.to/#/!tractor:matrix.org
.. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square
:target: https://actions-badge.atrox.dev/goodboy/tractor/goto
.. |docs| image:: https://readthedocs.org/projects/tractor/badge/?version=latest
:target: https://tractor.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status

View File

@ -1,632 +0,0 @@
|logo| ``tractor``: next-gen Python parallelism
|gh_actions|
|docs|
``tractor`` is a `structured concurrent`_, multi-processing_ runtime
built on trio_.
Fundamentally, ``tractor`` gives you parallelism via
``trio``-"*actors*": independent Python processes (aka
non-shared-memory threads) which maintain structured
concurrency (SC) *end-to-end* inside a *supervision tree*.
Cross-process (and thus cross-host) SC is accomplished through the
combined use of our "actor nurseries_" and an "SC-transitive IPC
protocol" constructed on top of multiple Pythons each running a ``trio``
scheduled runtime - a call to ``trio.run()``.
We believe the system adheres to the `3 axioms`_ of an "`actor model`_"
but likely *does not* look like what *you* probably think an "actor
model" looks like, and that's *intentional*.
The first step to grok ``tractor`` is to get the basics of ``trio`` down.
A great place to start is the `trio docs`_ and this `blog post`_.
Features
--------
- **It's just** a ``trio`` API
- *Infinitely nesteable* process trees
- Builtin IPC streaming APIs with task fan-out broadcasting
- A "native" multi-core debugger REPL using `pdbp`_ (a fork & fix of
`pdb++`_ thanks to @mdmintz!)
- Support for a swappable, OS specific, process spawning layer
- A modular transport stack, allowing for custom serialization (eg. with
`msgspec`_), communications protocols, and environment specific IPC
primitives
- Support for spawning process-level-SC, inter-loop one-to-one-task oriented
``asyncio`` actors via "infected ``asyncio``" mode
- `structured chadcurrency`_ from the ground up
Run a func in a process
-----------------------
Use ``trio``'s style of focussing on *tasks as functions*:
.. code:: python
"""
Run with a process monitor from a terminal using::
$TERM -e watch -n 0.1 "pstree -a $$" \
& python examples/parallelism/single_func.py \
&& kill $!
"""
import os
import tractor
import trio
async def burn_cpu():
pid = os.getpid()
# burn a core @ ~ 50kHz
for _ in range(50000):
await trio.sleep(1/50000/50)
return os.getpid()
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(burn_cpu)
# burn rubber in the parent too
await burn_cpu()
# wait on result from target function
pid = await portal.result()
# end of nursery block
print(f"Collected subproc {pid}")
if __name__ == '__main__':
trio.run(main)
This runs ``burn_cpu()`` in a new process and reaps it on completion
of the nursery block.
If you only need to run a sync function and retreive a single result, you
might want to check out `trio-parallel`_.
Zombie safe: self-destruct a process tree
-----------------------------------------
``tractor`` tries to protect you from zombies, no matter what.
.. code:: python
"""
Run with a process monitor from a terminal using::
$TERM -e watch -n 0.1 "pstree -a $$" \
& python examples/parallelism/we_are_processes.py \
&& kill $!
"""
from multiprocessing import cpu_count
import os
import tractor
import trio
async def target():
print(
f"Yo, i'm '{tractor.current_actor().name}' "
f"running in pid {os.getpid()}"
)
await trio.sleep_forever()
async def main():
async with tractor.open_nursery() as n:
for i in range(cpu_count()):
await n.run_in_actor(target, name=f'worker_{i}')
print('This process tree will self-destruct in 1 sec...')
await trio.sleep(1)
# raise an error in root actor/process and trigger
# reaping of all minions
raise Exception('Self Destructed')
if __name__ == '__main__':
try:
trio.run(main)
except Exception:
print('Zombies Contained')
If you can create zombie child processes (without using a system signal)
it **is a bug**.
"Native" multi-process debugging
--------------------------------
Using the magic of `pdbp`_ and our internal IPC, we've
been able to create a native feeling debugging experience for
any (sub-)process in your ``tractor`` tree.
.. code:: python
from os import getpid
import tractor
import trio
async def breakpoint_forever():
"Indefinitely re-enter debugger in child actor."
while True:
yield 'yo'
await tractor.breakpoint()
async def name_error():
"Raise a ``NameError``"
getattr(doggypants)
async def main():
"""Test breakpoint in a streaming actor.
"""
async with tractor.open_nursery(
debug_mode=True,
loglevel='error',
) as n:
p0 = await n.start_actor('bp_forever', enable_modules=[__name__])
p1 = await n.start_actor('name_error', enable_modules=[__name__])
# retreive results
stream = await p0.run(breakpoint_forever)
await p1.run(name_error)
if __name__ == '__main__':
trio.run(main)
You can run this with::
>>> python examples/debugging/multi_daemon_subactors.py
And, yes, there's a built-in crash handling mode B)
We're hoping to add a respawn-from-repl system soon!
SC compatible bi-directional streaming
--------------------------------------
Yes, you saw it here first; we provide 2-way streams
with reliable, transitive setup/teardown semantics.
Our nascent api is remniscent of ``trio.Nursery.start()``
style invocation:
.. code:: python
import trio
import tractor
@tractor.context
async def simple_rpc(
ctx: tractor.Context,
data: int,
) -> None:
'''Test a small ping-pong 2-way streaming server.
'''
# signal to parent that we're up much like
# ``trio_typing.TaskStatus.started()``
await ctx.started(data + 1)
async with ctx.open_stream() as stream:
count = 0
async for msg in stream:
assert msg == 'ping'
await stream.send('pong')
count += 1
else:
assert count == 10
async def main() -> None:
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'rpc_server',
enable_modules=[__name__],
)
# XXX: this syntax requires py3.9
async with (
portal.open_context(
simple_rpc,
data=10,
) as (ctx, sent),
ctx.open_stream() as stream,
):
assert sent == 11
count = 0
# receive msgs using async for style
await stream.send('ping')
async for msg in stream:
assert msg == 'pong'
await stream.send('ping')
count += 1
if count >= 9:
break
# explicitly teardown the daemon-actor
await portal.cancel_actor()
if __name__ == '__main__':
trio.run(main)
See original proposal and discussion in `#53`_ as well
as follow up improvements in `#223`_ that we'd love to
hear your thoughts on!
.. _#53: https://github.com/goodboy/tractor/issues/53
.. _#223: https://github.com/goodboy/tractor/issues/223
Worker poolz are easy peasy
---------------------------
The initial ask from most new users is *"how do I make a worker
pool thing?"*.
``tractor`` is built to handle any SC (structured concurrent) process
tree you can imagine; a "worker pool" pattern is a trivial special
case.
We have a `full worker pool re-implementation`_ of the std-lib's
``concurrent.futures.ProcessPoolExecutor`` example for reference.
You can run it like so (from this dir) to see the process tree in
real time::
$TERM -e watch -n 0.1 "pstree -a $$" \
& python examples/parallelism/concurrent_actors_primes.py \
&& kill $!
This uses no extra threads, fancy semaphores or futures; all we need
is ``tractor``'s IPC!
"Infected ``asyncio``" mode
---------------------------
Have a bunch of ``asyncio`` code you want to force to be SC at the process level?
Check out our experimental system for `guest-mode`_ controlled
``asyncio`` actors:
.. code:: python
import asyncio
from statistics import mean
import time
import trio
import tractor
async def aio_echo_server(
to_trio: trio.MemorySendChannel,
from_trio: asyncio.Queue,
) -> None:
# a first message must be sent **from** this ``asyncio``
# task or the ``trio`` side will never unblock from
# ``tractor.to_asyncio.open_channel_from():``
to_trio.send_nowait('start')
# XXX: this uses an ``from_trio: asyncio.Queue`` currently but we
# should probably offer something better.
while True:
# echo the msg back
to_trio.send_nowait(await from_trio.get())
await asyncio.sleep(0)
@tractor.context
async def trio_to_aio_echo_server(
ctx: tractor.Context,
):
# this will block until the ``asyncio`` task sends a "first"
# message.
async with tractor.to_asyncio.open_channel_from(
aio_echo_server,
) as (first, chan):
assert first == 'start'
await ctx.started(first)
async with ctx.open_stream() as stream:
async for msg in stream:
await chan.send(msg)
out = await chan.receive()
# echo back to parent actor-task
await stream.send(out)
async def main():
async with tractor.open_nursery() as n:
p = await n.start_actor(
'aio_server',
enable_modules=[__name__],
infect_asyncio=True,
)
async with p.open_context(
trio_to_aio_echo_server,
) as (ctx, first):
assert first == 'start'
count = 0
async with ctx.open_stream() as stream:
delays = []
send = time.time()
await stream.send(count)
async for msg in stream:
recv = time.time()
delays.append(recv - send)
assert msg == count
count += 1
send = time.time()
await stream.send(count)
if count >= 1e3:
break
print(f'mean round trip rate (Hz): {1/mean(delays)}')
await p.cancel_actor()
if __name__ == '__main__':
trio.run(main)
Yes, we spawn a python process, run ``asyncio``, start ``trio`` on the
``asyncio`` loop, then send commands to the ``trio`` scheduled tasks to
tell ``asyncio`` tasks what to do XD
We need help refining the `asyncio`-side channel API to be more
`trio`-like. Feel free to sling your opinion in `#273`_!
.. _#273: https://github.com/goodboy/tractor/issues/273
Higher level "cluster" APIs
---------------------------
To be extra terse the ``tractor`` devs have started hacking some "higher
level" APIs for managing actor trees/clusters. These interfaces should
generally be condsidered provisional for now but we encourage you to try
them and provide feedback. Here's a new API that let's you quickly
spawn a flat cluster:
.. code:: python
import trio
import tractor
async def sleepy_jane():
uid = tractor.current_actor().uid
print(f'Yo i am actor {uid}')
await trio.sleep_forever()
async def main():
'''
Spawn a flat actor cluster, with one process per
detected core.
'''
portal_map: dict[str, tractor.Portal]
results: dict[str, str]
# look at this hip new syntax!
async with (
tractor.open_actor_cluster(
modules=[__name__]
) as portal_map,
trio.open_nursery() as n,
):
for (name, portal) in portal_map.items():
n.start_soon(portal.run, sleepy_jane)
await trio.sleep(0.5)
# kill the cluster with a cancel
raise KeyboardInterrupt
if __name__ == '__main__':
try:
trio.run(main)
except KeyboardInterrupt:
pass
.. _full worker pool re-implementation: https://github.com/goodboy/tractor/blob/master/examples/parallelism/concurrent_actors_primes.py
Install
-------
From PyPi::
pip install tractor
From git::
pip install git+git://github.com/goodboy/tractor.git
Under the hood
--------------
``tractor`` is an attempt to pair trionic_ `structured concurrency`_ with
distributed Python. You can think of it as a ``trio``
*-across-processes* or simply as an opinionated replacement for the
stdlib's ``multiprocessing`` but built on async programming primitives
from the ground up.
Don't be scared off by this description. ``tractor`` **is just** ``trio``
but with nurseries for process management and cancel-able streaming IPC.
If you understand how to work with ``trio``, ``tractor`` will give you
the parallelism you may have been needing.
Wait, huh?! I thought "actors" have messages, and mailboxes and stuff?!
***********************************************************************
Let's stop and ask how many canon actor model papers have you actually read ;)
From our experience many "actor systems" aren't really "actor models"
since they **don't adhere** to the `3 axioms`_ and pay even less
attention to the problem of *unbounded non-determinism* (which was the
whole point for creation of the model in the first place).
From the author's mouth, **the only thing required** is `adherance to`_
the `3 axioms`_, *and that's it*.
``tractor`` adheres to said base requirements of an "actor model"::
In response to a message, an actor may:
- send a finite number of new messages
- create a finite number of new actors
- designate a new behavior to process subsequent messages
**and** requires *no further api changes* to accomplish this.
If you want do debate this further please feel free to chime in on our
chat or discuss on one of the following issues *after you've read
everything in them*:
- https://github.com/goodboy/tractor/issues/210
- https://github.com/goodboy/tractor/issues/18
Let's clarify our parlance
**************************
Whether or not ``tractor`` has "actors" underneath should be mostly
irrelevant to users other then for referring to the interactions of our
primary runtime primitives: each Python process + ``trio.run()``
+ surrounding IPC machinery. These are our high level, base
*runtime-units-of-abstraction* which both *are* (as much as they can
be in Python) and will be referred to as our *"actors"*.
The main goal of ``tractor`` is is to allow for highly distributed
software that, through the adherence to *structured concurrency*,
results in systems which fail in predictable, recoverable and maybe even
understandable ways; being an "actor model" is just one way to describe
properties of the system.
What's on the TODO:
-------------------
Help us push toward the future of distributed `Python`.
- Erlang-style supervisors via composed context managers (see `#22
<https://github.com/goodboy/tractor/issues/22>`_)
- Typed messaging protocols (ex. via ``msgspec.Struct``, see `#36
<https://github.com/goodboy/tractor/issues/36>`_)
- Typed capability-based (dialog) protocols ( see `#196
<https://github.com/goodboy/tractor/issues/196>`_ with draft work
started in `#311 <https://github.com/goodboy/tractor/pull/311>`_)
- We **recently disabled CI-testing on windows** and need help getting
it running again! (see `#327
<https://github.com/goodboy/tractor/pull/327>`_). **We do have windows
support** (and have for quite a while) but since no active hacker
exists in the user-base to help test on that OS, for now we're not
actively maintaining testing due to the added hassle and general
latency..
Feel like saying hi?
--------------------
This project is very much coupled to the ongoing development of
``trio`` (i.e. ``tractor`` gets most of its ideas from that brilliant
community). If you want to help, have suggestions or just want to
say hi, please feel free to reach us in our `matrix channel`_. If
matrix seems too hip, we're also mostly all in the the `trio gitter
channel`_!
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
.. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing
.. _trio: https://github.com/python-trio/trio
.. _nurseries: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/#nurseries-a-structured-replacement-for-go-statements
.. _actor model: https://en.wikipedia.org/wiki/Actor_model
.. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles
.. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich
.. _3 axioms: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=162s
.. .. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts
.. _adherance to: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=1821s
.. _trio gitter channel: https://gitter.im/python-trio/general
.. _matrix channel: https://matrix.to/#/!tractor:matrix.org
.. _pdbp: https://github.com/mdmintz/pdbp
.. _pdb++: https://github.com/pdbpp/pdbpp
.. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops
.. _messages: https://en.wikipedia.org/wiki/Message_passing
.. _trio docs: https://trio.readthedocs.io/en/latest/
.. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _structured chadcurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
.. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony
.. _async generators: https://www.python.org/dev/peps/pep-0525/
.. _trio-parallel: https://github.com/richardsheridan/trio-parallel
.. _msgspec: https://jcristharif.com/msgspec/
.. _guest-mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops
.. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square
:target: https://actions-badge.atrox.dev/goodboy/tractor/goto
.. |docs| image:: https://readthedocs.org/projects/tractor/badge/?version=latest
:target: https://tractor.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. |logo| image:: _static/tractor_logo_side.svg
:width: 250
:align: middle

View File

@ -54,44 +54,28 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_book_theme'
html_theme = 'sphinx_typlog_theme'
pygments_style = 'algol_nu'
pygments_style = 'sphinx'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# 'logo': 'tractor_logo_side.svg',
# 'description': 'Structured concurrent "actors"',
"repository_url": "https://github.com/goodboy/tractor",
"use_repository_button": True,
"home_page_in_toc": False,
"show_toc_level": 1,
"path_to_docs": "docs",
'logo': 'tractor_logo_side.svg',
'description': 'Structured concurrent "actors"',
'github_user': 'goodboy',
'github_repo': 'tractor',
}
html_sidebars = {
"**": [
"sbt-sidebar-nav.html",
# "sidebar-search-bs.html",
# 'localtoc.html',
],
# 'logo.html',
# 'github.html',
# 'relations.html',
# 'searchbox.html'
# ]
'logo.html',
'github.html',
'relations.html',
'searchbox.html'
]
}
# doesn't seem to work?
# extra_navbar = "<p>nextttt-gennnnn</p>"
html_title = ''
html_logo = '_static/tractor_logo_side.svg'
html_favicon = '_static/tractor_logo_side.svg'
# show_navbar_depth = 1
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".

View File

@ -1,51 +0,0 @@
Hot tips for ``tractor`` hackers
================================
This is a WIP guide for newcomers to the project mostly to do with
dev, testing, CI and release gotchas, reminders and best practises.
``tractor`` is a fairly novel project compared to most since it is
effectively a new way of doing distributed computing in Python and is
much closer to working with an "application level runtime" (like erlang
OTP or scala's akka project) then it is a traditional Python library.
As such, having an arsenal of tools and recipes for figuring out the
right way to debug problems when they do arise is somewhat of
a necessity.
Making a Release
----------------
We currently do nothing special here except the traditional
PyPa release recipe as in `documented by twine`_. I personally
create sub-dirs within the generated `dist/` with an explicit
release name such as `alpha3/` when there's been a sequence of
releases I've made, but it really is up to you how you like to
organize generated sdists locally.
The resulting build cmds are approximately:
.. code:: bash
python setup.py sdist -d ./dist/XXX.X/
twine upload -r testpypi dist/XXX.X/*
twine upload dist/XXX.X/*
.. _documented by twine: https://twine.readthedocs.io/en/latest/#using-twine
Debugging and monitoring actor trees
------------------------------------
TODO: but there are tips in the readme for some terminal commands
which can be used to see the process trees easily on Linux.
Using the log system to trace `trio` task flow
----------------------------------------------
TODO: the logging system is meant to be oriented around
stack "layers" of the runtime such that you can track
"logical abstraction layers" in the code such as errors, cancellation,
IPC and streaming, and the low level transport and wire protocols.

View File

@ -1,109 +0,0 @@
tractor
=======
The Python async-native multi-core system *you always wanted*.
|gh_actions|
|docs|
.. _actor model: https://en.wikipedia.org/wiki/Actor_model
.. _trio: https://github.com/python-trio/trio
.. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing
.. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles
.. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
``tractor`` is a `structured concurrent`_ "`actor model`_" built on trio_ and multi-processing_.
It is an attempt to pair trionic_ `structured concurrency`_ with
distributed Python. You can think of it as a ``trio``
*-across-processes* or simply as an opinionated replacement for the
stdlib's ``multiprocessing`` but built on async programming primitives
from the ground up.
Don't be scared off by this description. ``tractor`` **is just ``trio``**
but with nurseries for process management and cancel-able IPC.
If you understand how to work with ``trio``, ``tractor`` will give you
the parallelism you've been missing.
``tractor``'s nurseries let you spawn ``trio`` *"actors"*: new Python
processes which each run a ``trio`` scheduled task tree (also known as
an `async sandwich`_ - a call to ``trio.run()``). That is, each
"*Actor*" is a new process plus a ``trio`` runtime.
"Actors" communicate by exchanging asynchronous messages_ and avoid
sharing state. The intention of this model is to allow for highly
distributed software that, through the adherence to *structured
concurrency*, results in systems which fail in predictable and
recoverable ways.
The first step to grok ``tractor`` is to get the basics of ``trio`` down.
A great place to start is the `trio docs`_ and this `blog post`_.
.. _messages: https://en.wikipedia.org/wiki/Message_passing
.. _trio docs: https://trio.readthedocs.io/en/latest/
.. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
.. _structured concurrency: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
.. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts
.. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony
.. _async generators: https://www.python.org/dev/peps/pep-0525/
Install
-------
No PyPi release yet!
::
pip install git+git://github.com/goodboy/tractor.git
Alluring Features
-----------------
- **It's just** ``trio``, but with SC applied to processes (aka "actors")
- Infinitely nesteable process trees
- Built-in API for inter-process streaming
- A (first ever?) "native" multi-core debugger for Python using `pdb++`_
- (Soon to land) ``asyncio`` support allowing for "infected" actors where
`trio` drives the `asyncio` scheduler via the astounding "`guest mode`_"
Example: self-destruct a process tree
-------------------------------------
.. literalinclude:: ../../examples/parallelism/we_are_processes.py
:language: python
The example you're probably after...
------------------------------------
It seems the initial query from most new users is "how do I make a worker
pool thing?".
``tractor`` is built to handle any SC process tree you can
imagine; the "worker pool" pattern is a trivial special case:
.. literalinclude:: ../../examples/parallelism/concurrent_actors_primes.py
:language: python
Feel like saying hi?
--------------------
This project is very much coupled to the ongoing development of
``trio`` (i.e. ``tractor`` gets most of its ideas from that brilliant
community). If you want to help, have suggestions or just want to
say hi, please feel free to reach us in our `matrix channel`_. If
matrix seems too hip, we're also mostly all in the the `trio gitter
channel`_!
.. _trio gitter channel: https://gitter.im/python-trio/general
.. _matrix channel: https://matrix.to/#/!tractor:matrix.org
.. _pdb++: https://github.com/pdbpp/pdbpp
.. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops
.. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square
:target: https://actions-badge.atrox.dev/goodboy/tractor/goto
.. |docs| image:: https://readthedocs.org/projects/tractor/badge/?version=latest
:target: https://tractor.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status

View File

@ -1,51 +0,0 @@
# Configuration file for the Sphinx documentation builder.
# this config is for the rst generation extension and thus
# requires only basic settings:
# https://github.com/sphinx-contrib/restbuilder
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Warn about all references to unknown targets
nitpicky = True
# The master toctree document.
master_doc = '_sphinx_readme'
# -- Project information -----------------------------------------------------
project = 'tractor'
copyright = '2018, Tyler Goodlet'
author = 'Tyler Goodlet'
# The full version, including alpha/beta/rc tags
release = '0.0.0a0.dev0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinxcontrib.restbuilder',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']

View File

@ -3,13 +3,12 @@
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
``tractor``
===========
tractor
=======
A `structured concurrent`_, async-native "`actor model`_" built on trio_ and multiprocessing_.
.. toctree::
:maxdepth: 1
:maxdepth: 2
:caption: Contents:
.. _actor model: https://en.wikipedia.org/wiki/Actor_model
@ -59,6 +58,8 @@ say hi, please feel free to ping me on the `trio gitter channel`_!
.. _trio gitter channel: https://gitter.im/python-trio/general
.. contents::
Philosophy
----------
@ -144,7 +145,7 @@ and use the ``run_in_actor()`` method:
What's going on?
- an initial *actor* is started with ``trio.run()`` and told to execute
- an initial *actor* is started with ``tractor.run()`` and told to execute
its main task_: ``main()``
- inside ``main()`` an actor is *spawned* using an ``ActorNusery`` and is told
@ -181,7 +182,7 @@ Here is a similar example using the latter method:
.. literalinclude:: ../examples/actor_spawning_and_causality_with_daemon.py
The ``enable_modules`` `kwarg` above is a list of module path
The ``rpc_module_paths`` `kwarg` above is a list of module path
strings that will be loaded and made accessible for execution in the
remote actor through a call to ``Portal.run()``. For now this is
a simple mechanism to restrict the functionality of the remote
@ -384,61 +385,37 @@ as ``multiprocessing`` calls it) which is running ``main()``.
.. _remote function execution: https://codespeak.net/execnet/example/test_info.html#remote-exec-a-function-avoiding-inlined-source-part-i
Actor local (aka *process global*) variables
********************************************
Although ``tractor`` uses a *shared-nothing* architecture between
processes you can of course share state between tasks running *within*
an actor (since a `trio.run()` runtime is single threaded). ``trio``
tasks spawned via multiple RPC calls to an actor can modify
*process-global-state* defined using Python module attributes:
Actor local variables
*********************
Although ``tractor`` uses a *shared-nothing* architecture between processes
you can of course share state between tasks running *within* an actor.
``trio`` tasks spawned via multiple RPC calls to an actor can access global
state using the per actor ``statespace`` dictionary:
.. code:: python
# a per process cache
_actor_cache: dict[str, bool] = {}
statespace = {'doggy': 10}
def ping_endpoints(endpoints: List[str]):
"""Start a polling process which runs completely separate
from our root actor/process.
"""
# This runs in a new process so no changes # will propagate
# back to the parent actor
while True:
for ep in endpoints:
status = await check_endpoint_is_up(ep)
_actor_cache[ep] = status
await trio.sleep(0.5)
async def get_alive_endpoints():
nonlocal _actor_cache
return {key for key, value in _actor_cache.items() if value}
def check_statespace():
# Remember this runs in a new process so no changes
# will propagate back to the parent actor
assert tractor.current_actor().statespace == statespace
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(ping_endpoints)
# print the alive endpoints after 3 seconds
await trio.sleep(3)
# this is submitted to be run in our "ping_endpoints" actor
print(await portal.run(get_alive_endpoints))
await n.run_in_actor(
'checker',
check_statespace,
statespace=statespace
)
You can pass any kind of (`msgpack`) serializable data between actors using
function call semantics but building out a state sharing system per-actor
is totally up to you.
Of course you don't have to use the ``statespace`` variable (it's mostly
a convenience for passing simple data to newly spawned actors); building
out a state sharing system per-actor is totally up to you.
Service Discovery
@ -457,7 +434,7 @@ find an actor's socket address by name use the ``find_actor()`` function:
.. literalinclude:: ../examples/service_discovery.py
The ``name`` value you should pass to ``find_actor()`` is the one you passed as the
*first* argument to either ``trio.run()`` or ``ActorNursery.start_actor()``.
*first* argument to either ``tractor.run()`` or ``ActorNursery.start_actor()``.
Running actors standalone
@ -471,17 +448,7 @@ need to hop into a debugger. You just need to pass the existing
.. code:: python
import trio
import tractor
async def main():
async with tractor.open_root_actor(
arbiter_addr=('192.168.0.10', 1616)
):
await trio.sleep_forever()
trio.run(main)
tractor.run(main, arbiter_addr=('192.168.0.10', 1616))
Choosing a process spawning backend
@ -489,7 +456,7 @@ Choosing a process spawning backend
``tractor`` is architected to support multiple actor (sub-process)
spawning backends. Specific defaults are chosen based on your system
but you can also explicitly select a backend of choice at startup
via a ``start_method`` kwarg to ``tractor.open_nursery()``.
via a ``start_method`` kwarg to ``tractor.run()``.
Currently the options available are:
@ -545,14 +512,13 @@ main python module of the program:
.. code:: python
# application/__main__.py
import trio
import tractor
import multiprocessing
from . import tractor_app
if __name__ == '__main__':
multiprocessing.freeze_support()
trio.run(tractor_app.main)
tractor.run(tractor_app.main)
And execute as::

View File

@ -1,4 +0,0 @@
#!/bin/bash
sphinx-build -b rst ./github_readme ./
mv _sphinx_readme.rst _README.rst

View File

View File

@ -16,4 +16,4 @@ if __name__ == '__main__':
# temporary dir and name it test_example.py. We import that script
# module here and invoke it's ``main()``.
from . import test_example
test_example.trio.run(test_example.main)
test_example.tractor.run(test_example.main, start_method='spawn')

View File

@ -1,4 +1,3 @@
import trio
import tractor
_this_module = __name__
@ -14,7 +13,7 @@ async def hi():
async def say_hello(other_actor):
async with tractor.wait_for_actor(other_actor) as portal:
return await portal.run(hi)
return await portal.run(_this_module, 'hi')
async def main():
@ -25,14 +24,14 @@ async def main():
print("Alright... Action!")
donny = await n.run_in_actor(
'donny',
say_hello,
name='donny',
# arguments are always named
other_actor='gretchen',
)
gretchen = await n.run_in_actor(
'gretchen',
say_hello,
name='gretchen',
other_actor='donny',
)
print(await gretchen.result())
@ -41,4 +40,4 @@ async def main():
if __name__ == '__main__':
trio.run(main)
tractor.run(main)

View File

@ -1,10 +1,8 @@
import trio
import tractor
async def cellar_door():
assert not tractor.is_root_process()
return "Dang that's beautiful"
def cellar_door():
return "Dang that's beautiful"
async def main():
@ -12,10 +10,7 @@ async def main():
"""
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
cellar_door,
name='some_linguist',
)
portal = await n.run_in_actor('some_linguist', cellar_door)
# The ``async with`` will unblock here since the 'some_linguist'
# actor has completed its main task ``cellar_door``.
@ -24,4 +19,4 @@ async def main():
if __name__ == '__main__':
trio.run(main)
tractor.run(main)

View File

@ -1,8 +1,7 @@
import trio
import tractor
async def movie_theatre_question():
def movie_theatre_question():
"""A question asked in a dark theatre, in a tangent
(errr, I mean different) process.
"""
@ -17,12 +16,12 @@ async def main():
portal = await n.start_actor(
'frank',
# enable the actor to run funcs from this current module
enable_modules=[__name__],
rpc_module_paths=[__name__],
)
print(await portal.run(movie_theatre_question))
print(await portal.run(__name__, 'movie_theatre_question'))
# call the subactor a 2nd time
print(await portal.run(movie_theatre_question))
print(await portal.run(__name__, 'movie_theatre_question'))
# the async with will block here indefinitely waiting
# for our actor "frank" to complete, but since it's an
@ -31,4 +30,4 @@ async def main():
if __name__ == '__main__':
trio.run(main)
tractor.run(main)

View File

@ -1,151 +0,0 @@
'''
Complex edge case where during real-time streaming the IPC tranport
channels are wiped out (purposely in this example though it could have
been an outage) and we want to ensure that despite being in debug mode
(or not) the user can sent SIGINT once they notice the hang and the
actor tree will eventually be cancelled without leaving any zombies.
'''
import trio
from tractor import (
open_nursery,
context,
Context,
MsgStream,
)
async def break_channel_silently_then_error(
stream: MsgStream,
):
async for msg in stream:
await stream.send(msg)
# XXX: close the channel right after an error is raised
# purposely breaking the IPC transport to make sure the parent
# doesn't get stuck in debug or hang on the connection join.
# this more or less simulates an infinite msg-receive hang on
# the other end.
await stream._ctx.chan.send(None)
assert 0
async def close_stream_and_error(
stream: MsgStream,
):
async for msg in stream:
await stream.send(msg)
# wipe out channel right before raising
await stream._ctx.chan.send(None)
await stream.aclose()
assert 0
@context
async def recv_and_spawn_net_killers(
ctx: Context,
break_ipc_after: bool | int = False,
) -> None:
'''
Receive stream msgs and spawn some IPC killers mid-stream.
'''
await ctx.started()
async with (
ctx.open_stream() as stream,
trio.open_nursery() as n,
):
async for i in stream:
print(f'child echoing {i}')
await stream.send(i)
if (
break_ipc_after
and i > break_ipc_after
):
'#################################\n'
'Simulating child-side IPC BREAK!\n'
'#################################'
n.start_soon(break_channel_silently_then_error, stream)
n.start_soon(close_stream_and_error, stream)
async def main(
debug_mode: bool = False,
start_method: str = 'trio',
# by default we break the parent IPC first (if configured to break
# at all), but this can be changed so the child does first (even if
# both are set to break).
break_parent_ipc_after: int | bool = False,
break_child_ipc_after: int | bool = False,
) -> None:
async with (
open_nursery(
start_method=start_method,
# NOTE: even debugger is used we shouldn't get
# a hang since it never engages due to broken IPC
debug_mode=debug_mode,
loglevel='warning',
) as an,
):
portal = await an.start_actor(
'chitty_hijo',
enable_modules=[__name__],
)
async with portal.open_context(
recv_and_spawn_net_killers,
break_ipc_after=break_child_ipc_after,
) as (ctx, sent):
async with ctx.open_stream() as stream:
for i in range(1000):
if (
break_parent_ipc_after
and i > break_parent_ipc_after
):
print(
'#################################\n'
'Simulating parent-side IPC BREAK!\n'
'#################################'
)
await stream._ctx.chan.send(None)
# it actually breaks right here in the
# mp_spawn/forkserver backends and thus the zombie
# reaper never even kicks in?
print(f'parent sending {i}')
await stream.send(i)
with trio.move_on_after(2) as cs:
# NOTE: in the parent side IPC failure case this
# will raise an ``EndOfChannel`` after the child
# is killed and sends a stop msg back to it's
# caller/this-parent.
rx = await stream.receive()
print(f"I'm a happy user and echoed to me is {rx}")
if cs.cancelled_caught:
# pretend to be a user seeing no streaming action
# thinking it's a hang, and then hitting ctl-c..
print("YOO i'm a user anddd thingz hangin..")
print(
"YOO i'm mad send side dun but thingz hangin..\n"
'MASHING CTlR-C Ctl-c..'
)
raise KeyboardInterrupt
if __name__ == '__main__':
trio.run(main)

View File

@ -1,42 +1,36 @@
from typing import AsyncIterator
from itertools import repeat
import trio
import tractor
tractor.log.get_console_log("INFO")
async def stream_forever() -> AsyncIterator[int]:
async def stream_forever():
for i in repeat("I can see these little future bubble things"):
# each yielded value is sent over the ``Channel`` to the parent actor
# each yielded value is sent over the ``Channel`` to the
# parent actor
yield i
await trio.sleep(0.01)
async def main():
# stream for at most 1 seconds
with trio.move_on_after(1) as cancel_scope:
async with tractor.open_nursery() as n:
portal = await n.start_actor(
f'donny',
rpc_module_paths=[__name__],
)
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'donny',
enable_modules=[__name__],
)
# this async for loop streams values from the above
# async generator running in a separate process
async with portal.open_stream_from(stream_forever) as stream:
count = 0
async for letter in stream:
# this async for loop streams values from the above
# async generator running in a separate process
async for letter in await portal.run(__name__, 'stream_forever'):
print(letter)
count += 1
if count > 50:
break
print('stream terminated')
await portal.cancel_actor()
# we support trio's cancellation system
assert cancel_scope.cancelled_caught
assert n.cancelled
if __name__ == '__main__':
trio.run(main)
tractor.run(main, start_method='forkserver')

View File

@ -1,54 +0,0 @@
'''
Fast fail test with a context.
Ensure the partially initialized sub-actor process
doesn't cause a hang on error/cancel of the parent
nursery.
'''
import trio
import tractor
@tractor.context
async def sleep(
ctx: tractor.Context,
):
await trio.sleep(0.5)
await ctx.started()
await trio.sleep_forever()
async def open_ctx(
n: tractor._supervise.ActorNursery
):
# spawn both actors
portal = await n.start_actor(
name='sleeper',
enable_modules=[__name__],
)
async with portal.open_context(
sleep,
) as (ctx, first):
assert first is None
async def main():
async with tractor.open_nursery(
debug_mode=True,
loglevel='runtime',
) as an:
async with trio.open_nursery() as n:
n.start_soon(open_ctx, an)
await trio.sleep(0.2)
await trio.sleep(0.1)
assert 0
if __name__ == '__main__':
trio.run(main)

View File

@ -1,45 +0,0 @@
import tractor
import trio
async def breakpoint_forever():
"Indefinitely re-enter debugger in child actor."
while True:
yield 'yo'
await tractor.breakpoint()
async def name_error():
"Raise a ``NameError``"
getattr(doggypants) # noqa
async def main():
"""Test breakpoint in a streaming actor.
"""
async with tractor.open_nursery(
debug_mode=True,
loglevel='error',
) as n:
p0 = await n.start_actor('bp_forever', enable_modules=[__name__])
p1 = await n.start_actor('name_error', enable_modules=[__name__])
# retreive results
async with p0.open_stream_from(breakpoint_forever) as stream:
# triggers the first name error
try:
await p1.run(name_error)
except tractor.RemoteActorError as rae:
assert rae.type is NameError
async for i in stream:
# a second time try the failing subactor and this tie
# let error propagate up to the parent/nursery.
await p1.run(name_error)
if __name__ == '__main__':
trio.run(main)

View File

@ -1,10 +1,9 @@
import trio
import tractor
async def name_error():
"Raise a ``NameError``"
getattr(doggypants) # noqa
getattr(doggypants)
async def breakpoint_forever():
@ -12,37 +11,17 @@ async def breakpoint_forever():
while True:
await tractor.breakpoint()
# NOTE: if the test never sent 'q'/'quit' commands
# on the pdb repl, without this checkpoint line the
# repl would spin in this actor forever.
# await trio.sleep(0)
async def spawn_until(depth=0):
""""A nested nursery that triggers another ``NameError``.
"""
async with tractor.open_nursery() as n:
if depth < 1:
await n.run_in_actor(breakpoint_forever)
p = await n.run_in_actor(
name_error,
name='name_error'
)
await trio.sleep(0.5)
# rx and propagate error from child
await p.result()
# await n.run_in_actor('breakpoint_forever', breakpoint_forever)
await n.run_in_actor('name_error', name_error)
else:
# recusrive call to spawn another process branching layer of
# the tree
depth -= 1
await n.run_in_actor(
spawn_until,
depth=depth,
name=f'spawn_until_{depth}',
)
await n.run_in_actor(f'spawn_until_{depth}', spawn_until, depth=depth)
async def main():
@ -64,35 +43,16 @@ async def main():
python -m tractor._child --uid ('spawn_until_0', 'de918e6d ...)
"""
async with tractor.open_nursery(
debug_mode=True,
# loglevel='cancel',
) as n:
async with tractor.open_nursery() as n:
# spawn both actors
portal = await n.run_in_actor(
spawn_until,
depth=3,
name='spawner0',
)
portal1 = await n.run_in_actor(
spawn_until,
depth=4,
name='spawner1',
)
# TODO: test this case as well where the parent don't see
# the sub-actor errors by default and instead expect a user
# ctrl-c to kill the root.
with trio.move_on_after(3):
await trio.sleep_forever()
portal = await n.run_in_actor('spawner0', spawn_until, depth=3)
portal1 = await n.run_in_actor('spawner1', spawn_until, depth=4)
# gah still an issue here.
await portal.result()
# should never get here
await portal1.result()
# await portal.result()
# await portal1.result()
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True)

View File

@ -1,25 +1,16 @@
'''
Test that a nested nursery will avoid clobbering
the debugger latched by a broken child.
'''
import trio
import tractor
async def name_error():
"Raise a ``NameError``"
getattr(doggypants) # noqa
getattr(doggypants)
async def spawn_error():
""""A nested nursery that triggers another ``NameError``.
"""
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
name_error,
name='name_error_1',
)
portal = await n.run_in_actor('name_error_1', name_error)
return await portal.result()
@ -32,26 +23,12 @@ async def main():
python -m tractor._child --uid ('name_error', 'a7caf490 ...)
`-python -m tractor._child --uid ('spawn_error', '52ee14a5 ...)
`-python -m tractor._child --uid ('name_error', '3391222c ...)
Order of failure:
- nested name_error sub-sub-actor
- root actor should then fail on assert
- program termination
"""
async with tractor.open_nursery(
debug_mode=True,
# loglevel='cancel',
) as n:
async with tractor.open_nursery() as n:
# spawn both actors
portal = await n.run_in_actor(
name_error,
name='name_error',
)
portal1 = await n.run_in_actor(
spawn_error,
name='spawn_error',
)
portal = await n.run_in_actor('name_error', name_error)
portal1 = await n.run_in_actor('spawn_error', spawn_error)
# trigger a root actor error
assert 0
@ -63,4 +40,4 @@ async def main():
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True)

View File

@ -11,17 +11,14 @@ async def breakpoint_forever():
async def name_error():
"Raise a ``NameError``"
getattr(doggypants) # noqa
getattr(doggypants)
async def spawn_error():
""""A nested nursery that triggers another ``NameError``.
"""
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
name_error,
name='name_error_1',
)
portal = await n.run_in_actor('name_error_1', name_error)
return await portal.result()
@ -36,17 +33,15 @@ async def main():
`-python -m tractor._child --uid ('spawn_error', '52ee14a5 ...)
`-python -m tractor._child --uid ('name_error', '3391222c ...)
"""
async with tractor.open_nursery(
debug_mode=True,
) as n:
async with tractor.open_nursery() as n:
# Spawn both actors, don't bother with collecting results
# (would result in a different debugger outcome due to parent's
# cancellation).
await n.run_in_actor(breakpoint_forever)
await n.run_in_actor(name_error)
await n.run_in_actor(spawn_error)
await n.run_in_actor('bp_forever', breakpoint_forever)
await n.run_in_actor('name_error', name_error)
await n.run_in_actor('spawn_error', spawn_error)
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True)

View File

@ -1,40 +0,0 @@
import trio
import tractor
@tractor.context
async def just_sleep(
ctx: tractor.Context,
**kwargs,
) -> None:
'''
Start and sleep.
'''
await ctx.started()
await trio.sleep_forever()
async def main() -> None:
async with tractor.open_nursery(
debug_mode=True,
) as n:
portal = await n.start_actor(
'ctx_child',
# XXX: we don't enable the current module in order
# to trigger `ModuleNotFound`.
enable_modules=[],
)
async with portal.open_context(
just_sleep, # taken from pytest parameterization
) as (ctx, sent):
raise KeyboardInterrupt
if __name__ == '__main__':
trio.run(main)

View File

@ -1,27 +0,0 @@
import trio
import tractor
async def die():
raise RuntimeError
async def main():
async with tractor.open_nursery() as tn:
debug_actor = await tn.start_actor(
'debugged_boi',
enable_modules=[__name__],
debug_mode=True,
)
crash_boi = await tn.start_actor(
'crash_boi',
enable_modules=[__name__],
# debug_mode=True,
)
async with trio.open_nursery() as n:
n.start_soon(debug_actor.run, die)
n.start_soon(crash_boi.run, die)
if __name__ == '__main__':
trio.run(main)

View File

@ -1,24 +0,0 @@
import os
import sys
import trio
import tractor
async def main() -> None:
async with tractor.open_nursery(debug_mode=True) as an:
assert os.environ['PYTHONBREAKPOINT'] == 'tractor._debug._set_trace'
# TODO: an assert that verifies the hook has indeed been, hooked
# XD
assert sys.breakpointhook is not tractor._debug._set_trace
breakpoint()
# TODO: an assert that verifies the hook is unhooked..
assert sys.breakpointhook
breakpoint()
if __name__ == '__main__':
trio.run(main)

View File

@ -4,16 +4,12 @@ import tractor
async def main():
async with tractor.open_root_actor(
debug_mode=True,
):
await trio.sleep(0.1)
await trio.sleep(0.1)
await tractor.breakpoint()
await tractor.breakpoint()
await trio.sleep(0.1)
await trio.sleep(0.1)
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True)

View File

@ -1,15 +1,11 @@
import trio
import tractor
async def main():
async with tractor.open_root_actor(
debug_mode=True,
):
while True:
await tractor.breakpoint()
while True:
await tractor.breakpoint()
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True)

View File

@ -1,13 +1,9 @@
import trio
import tractor
async def main():
async with tractor.open_root_actor(
debug_mode=True,
):
assert 0
assert 0
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True)

View File

@ -1,10 +1,9 @@
import trio
import tractor
async def name_error():
"Raise a ``NameError``"
getattr(doggypants) # noqa
getattr(doggypants)
async def spawn_until(depth=0):
@ -13,14 +12,10 @@ async def spawn_until(depth=0):
async with tractor.open_nursery() as n:
if depth < 1:
# await n.run_in_actor('breakpoint_forever', breakpoint_forever)
await n.run_in_actor(name_error)
await n.run_in_actor('name_error', name_error)
else:
depth -= 1
await n.run_in_actor(
spawn_until,
depth=depth,
name=f'spawn_until_{depth}',
)
await n.run_in_actor(f'spawn_until_{depth}', spawn_until, depth=depth)
async def main():
@ -38,28 +33,16 @@ async def main():
python -m tractor._child --uid ('name_error', '6c2733b8 ...)
"""
async with tractor.open_nursery(
debug_mode=True,
loglevel='warning'
) as n:
async with tractor.open_nursery() as n:
# spawn both actors
portal = await n.run_in_actor(
spawn_until,
depth=0,
name='spawner0',
)
portal1 = await n.run_in_actor(
spawn_until,
depth=1,
name='spawner1',
)
portal = await n.run_in_actor('spawner0', spawn_until, depth=0)
portal1 = await n.run_in_actor('spawner1', spawn_until, depth=1)
# nursery cancellation should be triggered due to propagated
# error from child.
# nursery cancellation should be triggered due to propagated error
await portal.result()
await portal1.result()
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True, loglevel='warning')

View File

@ -1,31 +0,0 @@
import trio
import tractor
async def key_error():
"Raise a ``NameError``"
return {}['doggy']
async def main():
"""Root dies
"""
async with tractor.open_nursery(
debug_mode=True,
loglevel='debug'
) as n:
# spawn both actors
portal = await n.run_in_actor(key_error)
# XXX: originally a bug caused by this is where root would enter
# the debugger and clobber the tty used by the repl even though
# child should have it locked.
with trio.fail_after(1):
await trio.Event().wait()
if __name__ == '__main__':
trio.run(main)

View File

@ -1,50 +0,0 @@
import tractor
import trio
async def gen():
yield 'yo'
await tractor.breakpoint()
yield 'yo'
await tractor.breakpoint()
@tractor.context
async def just_bp(
ctx: tractor.Context,
) -> None:
await ctx.started()
await tractor.breakpoint()
# TODO: bps and errors in this call..
async for val in gen():
print(val)
# await trio.sleep(0.5)
# prematurely destroy the connection
await ctx.chan.aclose()
# THIS CAUSES AN UNRECOVERABLE HANG
# without latest ``pdbpp``:
assert 0
async def main():
async with tractor.open_nursery(
debug_mode=True,
) as n:
p = await n.start_actor(
'bp_boi',
enable_modules=[__name__],
)
async with p.open_context(
just_bp,
) as (ctx, first):
await trio.sleep_forever()
if __name__ == '__main__':
trio.run(main)

View File

@ -12,15 +12,14 @@ async def breakpoint_forever():
async def main():
async with tractor.open_nursery(
debug_mode=True,
) as n:
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
'breakpoint_forever',
breakpoint_forever,
)
await portal.result()
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True)

View File

@ -1,4 +1,3 @@
import trio
import tractor
@ -7,13 +6,11 @@ async def name_error():
async def main():
async with tractor.open_nursery(
debug_mode=True,
) as n:
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(name_error)
portal = await n.run_in_actor('name_error', name_error)
await portal.result()
if __name__ == '__main__':
trio.run(main)
tractor.run(main, debug_mode=True)

View File

@ -7,7 +7,7 @@ import tractor
async def stream_data(seed):
for i in range(seed):
yield i
await trio.sleep(0.0001) # trigger scheduler
await trio.sleep(0) # trigger scheduler
# this is the third actor; the aggregator
@ -21,7 +21,7 @@ async def aggregate(seed):
# fork point
portal = await nursery.start_actor(
name=f'streamer_{i}',
enable_modules=[__name__],
rpc_module_paths=[__name__],
)
portals.append(portal)
@ -29,13 +29,12 @@ async def aggregate(seed):
send_chan, recv_chan = trio.open_memory_channel(500)
async def push_to_chan(portal, send_chan):
# TODO: https://github.com/goodboy/tractor/issues/207
async with send_chan:
async with portal.open_stream_from(stream_data, seed=seed) as stream:
async for value in stream:
# leverage trio's built-in backpressure
await send_chan.send(value)
async for value in await portal.run(
__name__, 'stream_data', seed=seed
):
# leverage trio's built-in backpressure
await send_chan.send(value)
print(f"FINISHED ITERATING {portal.channel.uid}")
@ -68,31 +67,24 @@ async def aggregate(seed):
# this is the main actor and *arbiter*
async def main():
# a nursery which spawns "actors"
async with tractor.open_nursery(
arbiter_addr=('127.0.0.1', 1616)
) as nursery:
async with tractor.open_nursery() as nursery:
seed = int(1e3)
import time
pre_start = time.time()
portal = await nursery.start_actor(
name='aggregator',
enable_modules=[__name__],
)
async with portal.open_stream_from(
portal = await nursery.run_in_actor(
'aggregator',
aggregate,
seed=seed,
) as stream:
)
start = time.time()
# the portal call returns exactly what you'd expect
# as if the remote "aggregate" function was called locally
result_stream = []
async for value in stream:
result_stream.append(value)
await portal.cancel_actor()
start = time.time()
# the portal call returns exactly what you'd expect
# as if the remote "aggregate" function was called locally
result_stream = []
async for value in await portal.result():
result_stream.append(value)
print(f"STREAM TIME = {time.time() - start}")
print(f"STREAM + SPAWN TIME = {time.time() - pre_start}")
@ -101,4 +93,4 @@ async def main():
if __name__ == '__main__':
final_stream = trio.run(main)
final_stream = tractor.run(main, arbiter_addr=('127.0.0.1', 1616))

View File

@ -1,92 +0,0 @@
'''
An SC compliant infected ``asyncio`` echo server.
'''
import asyncio
from statistics import mean
import time
import trio
import tractor
async def aio_echo_server(
to_trio: trio.MemorySendChannel,
from_trio: asyncio.Queue,
) -> None:
# a first message must be sent **from** this ``asyncio``
# task or the ``trio`` side will never unblock from
# ``tractor.to_asyncio.open_channel_from():``
to_trio.send_nowait('start')
# XXX: this uses an ``from_trio: asyncio.Queue`` currently but we
# should probably offer something better.
while True:
# echo the msg back
to_trio.send_nowait(await from_trio.get())
await asyncio.sleep(0)
@tractor.context
async def trio_to_aio_echo_server(
ctx: tractor.Context,
):
# this will block until the ``asyncio`` task sends a "first"
# message.
async with tractor.to_asyncio.open_channel_from(
aio_echo_server,
) as (first, chan):
assert first == 'start'
await ctx.started(first)
async with ctx.open_stream() as stream:
async for msg in stream:
await chan.send(msg)
out = await chan.receive()
# echo back to parent actor-task
await stream.send(out)
async def main():
async with tractor.open_nursery() as n:
p = await n.start_actor(
'aio_server',
enable_modules=[__name__],
infect_asyncio=True,
)
async with p.open_context(
trio_to_aio_echo_server,
) as (ctx, first):
assert first == 'start'
count = 0
async with ctx.open_stream() as stream:
delays = []
send = time.time()
await stream.send(count)
async for msg in stream:
recv = time.time()
delays.append(recv - send)
assert msg == count
count += 1
send = time.time()
await stream.send(count)
if count >= 1e3:
break
print(f'mean round trip rate (Hz): {1/mean(delays)}')
await p.cancel_actor()
if __name__ == '__main__':
trio.run(main)

View File

@ -1,49 +0,0 @@
import trio
import click
import tractor
import pydantic
# from multiprocessing import shared_memory
@tractor.context
async def just_sleep(
ctx: tractor.Context,
**kwargs,
) -> None:
'''
Test a small ping-pong 2-way streaming server.
'''
await ctx.started()
await trio.sleep_forever()
async def main() -> None:
proc = await trio.open_process( (
'python',
'-c',
'import trio; trio.run(trio.sleep_forever)',
))
await proc.wait()
# await trio.sleep_forever()
# async with tractor.open_nursery() as n:
# portal = await n.start_actor(
# 'rpc_server',
# enable_modules=[__name__],
# )
# async with portal.open_context(
# just_sleep, # taken from pytest parameterization
# ) as (ctx, sent):
# await trio.sleep_forever()
if __name__ == '__main__':
import time
# time.sleep(999)
trio.run(main)

View File

@ -1,46 +0,0 @@
import trio
import tractor
log = tractor.log.get_logger('multiportal')
async def stream_data(seed=10):
log.info("Starting stream task")
for i in range(seed):
yield i
await trio.sleep(0) # trigger scheduler
async def stream_from_portal(p, consumed):
async with p.open_stream_from(stream_data) as stream:
async for item in stream:
if item in consumed:
consumed.remove(item)
else:
consumed.append(item)
async def main():
async with tractor.open_nursery(loglevel='info') as an:
p = await an.start_actor('stream_boi', enable_modules=[__name__])
consumed = []
async with trio.open_nursery() as n:
for i in range(2):
n.start_soon(stream_from_portal, p, consumed)
# both streaming consumer tasks have completed and so we should
# have nothing in our list thanks to single threadedness
assert not consumed
await an.cancel()
if __name__ == '__main__':
trio.run(main)

View File

@ -1,43 +0,0 @@
import time
import concurrent.futures
import math
PRIMES = [
112272535095293,
112582705942171,
112272535095293,
115280095190773,
115797848077099,
1099726899285419]
def is_prime(n):
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, sqrt_n + 1, 2):
if n % i == 0:
return False
return True
def main():
with concurrent.futures.ProcessPoolExecutor() as executor:
start = time.time()
for number, prime in zip(PRIMES, executor.map(is_prime, PRIMES)):
print('%d is prime: %s' % (number, prime))
print(f'processing took {time.time() - start} seconds')
if __name__ == '__main__':
start = time.time()
main()
print(f'script took {time.time() - start} seconds')

View File

@ -1,119 +0,0 @@
"""
Demonstration of the prime number detector example from the
``concurrent.futures`` docs:
https://docs.python.org/3/library/concurrent.futures.html#processpoolexecutor-example
This uses no extra threads, fancy semaphores or futures; all we need
is ``tractor``'s channels.
"""
from contextlib import asynccontextmanager
from typing import Callable
import itertools
import math
import time
import tractor
import trio
from async_generator import aclosing
PRIMES = [
112272535095293,
112582705942171,
112272535095293,
115280095190773,
115797848077099,
1099726899285419,
]
async def is_prime(n):
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, sqrt_n + 1, 2):
if n % i == 0:
return False
return True
@asynccontextmanager
async def worker_pool(workers=4):
"""Though it's a trivial special case for ``tractor``, the well
known "worker pool" seems to be the defacto "but, I want this
process pattern!" for most parallelism pilgrims.
Yes, the workers stay alive (and ready for work) until you close
the context.
"""
async with tractor.open_nursery() as tn:
portals = []
snd_chan, recv_chan = trio.open_memory_channel(len(PRIMES))
for i in range(workers):
# this starts a new sub-actor (process + trio runtime) and
# stores it's "portal" for later use to "submit jobs" (ugh).
portals.append(
await tn.start_actor(
f'worker_{i}',
enable_modules=[__name__],
)
)
async def _map(
worker_func: Callable[[int], bool],
sequence: list[int]
) -> list[bool]:
# define an async (local) task to collect results from workers
async def send_result(func, value, portal):
await snd_chan.send((value, await portal.run(func, n=value)))
async with trio.open_nursery() as n:
for value, portal in zip(sequence, itertools.cycle(portals)):
n.start_soon(
send_result,
worker_func,
value,
portal
)
# deliver results as they arrive
for _ in range(len(sequence)):
yield await recv_chan.receive()
# deliver the parallel "worker mapper" to user code
yield _map
# tear down all "workers" on pool close
await tn.cancel()
async def main():
async with worker_pool() as actor_map:
start = time.time()
async with aclosing(actor_map(is_prime, PRIMES)) as results:
async for number, prime in results:
print(f'{number} is prime: {prime}')
print(f'processing took {time.time() - start} seconds')
if __name__ == '__main__':
start = time.time()
trio.run(main)
print(f'script took {time.time() - start} seconds')

View File

@ -1,43 +0,0 @@
"""
Run with a process monitor from a terminal using::
$TERM -e watch -n 0.1 "pstree -a $$" \
& python examples/parallelism/single_func.py \
&& kill $!
"""
import os
import tractor
import trio
async def burn_cpu():
pid = os.getpid()
# burn a core @ ~ 50kHz
for _ in range(50000):
await trio.sleep(1/50000/50)
return os.getpid()
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(burn_cpu)
# burn rubber in the parent too
await burn_cpu()
# wait on result from target function
pid = await portal.result()
# end of nursery block
print(f"Collected subproc {pid}")
if __name__ == '__main__':
trio.run(main)

View File

@ -1,43 +0,0 @@
"""
Run with a process monitor from a terminal using::
$TERM -e watch -n 0.1 "pstree -a $$" \
& python examples/parallelism/we_are_processes.py \
&& kill $!
"""
from multiprocessing import cpu_count
import os
import tractor
import trio
async def target():
print(
f"Yo, i'm '{tractor.current_actor().name}' "
f"running in pid {os.getpid()}"
)
await trio.sleep_forever()
async def main():
async with tractor.open_nursery() as n:
for i in range(cpu_count()):
await n.run_in_actor(target, name=f'worker_{i}')
print('This process tree will self-destruct in 1 sec...')
await trio.sleep(1)
# you could have done this yourself
raise Exception('Self Destructed')
if __name__ == '__main__':
try:
trio.run(main)
except Exception:
print('Zombies Contained')

View File

@ -1,44 +0,0 @@
import trio
import tractor
async def sleepy_jane():
uid = tractor.current_actor().uid
print(f'Yo i am actor {uid}')
await trio.sleep_forever()
async def main():
'''
Spawn a flat actor cluster, with one process per
detected core.
'''
portal_map: dict[str, tractor.Portal]
results: dict[str, str]
# look at this hip new syntax!
async with (
tractor.open_actor_cluster(
modules=[__name__]
) as portal_map,
trio.open_nursery() as n,
):
for (name, portal) in portal_map.items():
n.start_soon(portal.run, sleepy_jane)
await trio.sleep(0.5)
# kill the cluster with a cancel
raise KeyboardInterrupt
if __name__ == '__main__':
try:
trio.run(main)
except KeyboardInterrupt:
pass

View File

@ -1,4 +1,3 @@
import trio
import tractor
@ -12,11 +11,11 @@ async def main():
for i in range(3):
real_actors.append(await n.start_actor(
f'actor_{i}',
enable_modules=[__name__],
rpc_module_paths=[__name__],
))
# start one actor that will fail immediately
await n.run_in_actor(assert_err)
await n.run_in_actor('extra', assert_err)
# should error here with a ``RemoteActorError`` containing
# an ``AssertionError`` and all the other actors have been cancelled
@ -25,6 +24,6 @@ async def main():
if __name__ == '__main__':
try:
# also raises
trio.run(main)
tractor.run(main)
except tractor.RemoteActorError:
print("Look Maa that actor failed hard, hehhh!")

View File

@ -1,72 +0,0 @@
import trio
import tractor
@tractor.context
async def simple_rpc(
ctx: tractor.Context,
data: int,
) -> None:
'''Test a small ping-pong 2-way streaming server.
'''
# signal to parent that we're up much like
# ``trio_typing.TaskStatus.started()``
await ctx.started(data + 1)
async with ctx.open_stream() as stream:
count = 0
async for msg in stream:
assert msg == 'ping'
await stream.send('pong')
count += 1
else:
assert count == 10
async def main() -> None:
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'rpc_server',
enable_modules=[__name__],
)
# XXX: syntax requires py3.9
async with (
portal.open_context(
simple_rpc, # taken from pytest parameterization
data=10,
) as (ctx, sent),
ctx.open_stream() as stream,
):
assert sent == 11
count = 0
# receive msgs using async for style
await stream.send('ping')
async for msg in stream:
assert msg == 'pong'
await stream.send('ping')
count += 1
if count >= 9:
break
# explicitly teardown the daemon-actor
await portal.cancel_actor()
if __name__ == '__main__':
trio.run(main)

View File

@ -1,9 +1,7 @@
import trio
import tractor
tractor.log.get_console_log("INFO")
async def main(service_name):
async with tractor.open_nursery() as an:
@ -19,4 +17,4 @@ async def main(service_name):
if __name__ == '__main__':
trio.run(main, 'some_actor_name')
tractor.run(main, 'some_actor_name')

1
nooz/.gitignore vendored
View File

@ -1 +0,0 @@
!.gitignore

View File

@ -1,16 +0,0 @@
Strictly support Python 3.10+, start runtime machinery reorg
Since we want to push forward using the new `match:` syntax for our
internal RPC-msg loops, we officially drop 3.9 support for the next
release which should coincide well with the first release of 3.11.
This patch set also officially removes the ``tractor.run()`` API (which
has been deprecated for some time) as well as starts an initial re-org
of the internal runtime core by:
- renaming ``tractor._actor`` -> ``._runtime``
- moving the ``._runtime.ActorActor._process_messages()`` and
``._async_main()`` to be module level singleton-task-functions since
they are only started once for each connection and actor spawn
respectively; this internal API thus looks more similar to (at the
time of writing) the ``trio``-internals in ``trio._core._run``.
- officially remove ``tractor.run()``, now deprecated for some time.

View File

@ -1,4 +0,0 @@
Only set `._debug.Lock.local_pdb_complete` if has been created.
This can be triggered by a very rare race condition (and thus we have no
working test yet) but it is known to exist in (a) consumer project(s).

View File

@ -1,25 +0,0 @@
Add support for ``trio >= 0.22`` and support for the new Python 3.11
``[Base]ExceptionGroup`` from `pep 654`_ via the backported
`exceptiongroup`_ package and some final fixes to the debug mode
subsystem.
This port ended up driving some (hopefully) final fixes to our debugger
subsystem including the solution to all lingering stdstreams locking
race-conditions and deadlock scenarios. This includes extending the
debugger tests suite as well as cancellation and ``asyncio`` mode cases.
Some of the notable details:
- always reverting to the ``trio`` SIGINT handler when leaving debug
mode.
- bypassing child attempts to acquire the debug lock when detected
to be amdist actor-runtime-cancellation.
- allowing the root actor to cancel local but IPC-stale subactor
requests-tasks for the debug lock when in a "no IPC peers" state.
Further we refined our ``ActorNursery`` semantics to be more similar to
``trio`` in the sense that parent task errors are always packed into the
actor-nursery emitted exception group and adjusted all tests and
examples accordingly.
.. _pep 654: https://peps.python.org/pep-0654/#handling-exception-groups
.. _exceptiongroup: https://github.com/python-trio/exceptiongroup

View File

@ -1,5 +0,0 @@
Establish an explicit "backend spawning" method table; use it from CI
More clearly lays out the current set of (3) backends: ``['trio',
'mp_spawn', 'mp_forkserver']`` and adjusts the ``._spawn.py`` internals
as well as the test suite to accommodate.

View File

@ -1,4 +0,0 @@
Add ``key: Callable[..., Hashable]`` support to ``.trionics.maybe_open_context()``
Gives users finer grained control over cache hit behaviour using
a callable which receives the input ``kwargs: dict``.

View File

@ -1,41 +0,0 @@
Add support for debug-lock blocking using a ``._debug.Lock._blocked:
set[tuple]`` and add ids when no-more IPC connections with the
root actor are detected.
This is an enhancement which (mostly) solves a lingering debugger
locking race case we needed to handle:
- child crashes acquires TTY lock in root and attaches to ``pdb``
- child IPC goes down such that all channels to the root are broken
/ non-functional.
- root is stuck thinking the child is still in debug even though it
can't be contacted and the child actor machinery hasn't been
cancelled by its parent.
- root get's stuck in deadlock with child since it won't send a cancel
request until the child is finished debugging (to avoid clobbering
a child that is actually using the debugger), but the child can't
unlock the debugger bc IPC is down and it can't contact the root.
To avoid this scenario add debug lock blocking list via
`._debug.Lock._blocked: set[tuple]` which holds actor uids for any actor
that is detected by the root as having no transport channel connections
(of which at least one should exist if this sub-actor at some point
acquired the debug lock). The root consequently checks this list for any
actor that tries to (re)acquire the lock and blocks with
a ``ContextCancelled``. Further, when a debug condition is tested in
``._runtime._invoke``, the context's ``._enter_debugger_on_cancel`` is
set to `False` if the actor was put on the block list then all
post-mortem / crash handling will be bypassed for that task.
In theory this approach to block list management may cause problems
where some nested child actor acquires and releases the lock multiple
times and it gets stuck on the block list after the first use? If this
turns out to be an issue we can try changing the strat so blocks are
only added when the root has zero IPC peers left?
Further, this adds a root-locking-task side cancel scope,
``Lock._root_local_task_cs_in_debug``, which can be ``.cancel()``-ed by the root
runtime when a stale lock is detected during the IPC channel testing.
However, right now we're NOT using this since it seems to cause test
failures likely due to causing pre-mature cancellation and maybe needs
a bit more experimenting?

View File

@ -1,19 +0,0 @@
Rework our ``.trionics.BroadcastReceiver`` internals to avoid method
recursion and approach a design and interface closer to ``trio``'s
``MemoryReceiveChannel``.
The details of the internal changes include:
- implementing a ``BroadcastReceiver.receive_nowait()`` and using it
within the async ``.receive()`` thus avoiding recursion from
``.receive()``.
- failing over to an internal ``._receive_from_underlying()`` when the
``_nowait()`` call raises ``trio.WouldBlock``
- adding ``BroadcastState.statistics()`` for debugging and testing both
internals and by users.
- add an internal ``BroadcastReceiver._raise_on_lag: bool`` which can be
set to avoid ``Lagged`` raising for possible use cases where a user
wants to choose between a [cheap or nasty
pattern](https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern)
the the particular stream (we use this in ``piker``'s dark clearing
engine to avoid fast feeds breaking during HFT periods).

View File

@ -1,11 +0,0 @@
Always ``list``-cast the ``mngrs`` input to
``.trionics.gather_contexts()`` and ensure its size otherwise raise
a ``ValueError``.
Turns out that trying to pass an inline-style generator comprehension
doesn't seem to work inside the ``async with`` expression? Further, in
such a case we can get a hang waiting on the all-entered event
completion when the internal mngrs iteration is a noop. Instead we
always greedily check a size and error on empty input; the lazy
iteration of a generator input is not beneficial anyway since we're
entering all manager instances in concurrent tasks.

View File

@ -1,15 +0,0 @@
Fixes to ensure IPC (channel) breakage doesn't result in hung actor
trees; the zombie reaping and general supervision machinery will always
clean up and terminate.
This includes not only the (mostly minor) fixes to solve these cases but
also a new extensive test suite in `test_advanced_faults.py` with an
accompanying highly configurable example module-script in
`examples/advanced_faults/ipc_failure_during_stream.py`. Tests ensure we
never get hang or zombies despite operating in debug mode and attempt to
simulate all possible IPC transport failure cases for a local-host actor
tree.
Further we simplify `Context.open_stream.__aexit__()` to just call
`MsgStream.aclose()` directly more or less avoiding a pure duplicate
code path.

View File

@ -1,10 +0,0 @@
Always redraw the `pdbpp` prompt on `SIGINT` during REPL use.
There was recent changes todo with Python 3.10 that required us to pin
to a specific commit in `pdbpp` which have recently been fixed minus
this last issue with `SIGINT` shielding: not clobbering or not
showing the `(Pdb++)` prompt on ctlr-c by the user. This repairs all
that by firstly removing the standard KBI intercepting of the std lib's
`pdb.Pdb._cmdloop()` as well as ensuring that only the actor with REPL
control ever reports `SIGINT` handler log msgs and prompt redraws. With
this we move back to using pypi `pdbpp` release.

View File

@ -1,7 +0,0 @@
Drop `trio.Process.aclose()` usage, copy into our spawning code.
The details are laid out in https://github.com/goodboy/tractor/issues/330.
`trio` changed is process running quite some time ago, this just copies
out the small bit we needed (from the old `.aclose()`) for hard kills
where a soft runtime cancel request fails and our "zombie killer"
implementation kicks in.

View File

@ -1,15 +0,0 @@
Switch to using the fork & fix of `pdb++`, `pdbp`:
https://github.com/mdmintz/pdbp
Allows us to sidestep a variety of issues that aren't being maintained
in the upstream project thanks to the hard work of @mdmintz!
We also include some default settings adjustments as per recent
development on the fork:
- sticky mode is still turned on by default but now activates when
a using the `ll` repl command.
- turn off line truncation by default to avoid inter-line gaps when
resizing the terimnal during use.
- when using the backtrace cmd either by `w` or `bt`, the config
automatically switches to non-sticky mode.

View File

@ -1,8 +0,0 @@
See both the `towncrier docs`_ and the `pluggy release readme`_ for hot
tips. We basically have the most minimal setup and release process right
now and use the default `fragment set`_.
.. _towncrier docs: https://github.com/twisted/towncrier#quick-start
.. _pluggy release readme: https://github.com/pytest-dev/pluggy/blob/main/changelog/README.rst
.. _fragment set: https://github.com/twisted/towncrier#news-fragments

View File

@ -1,37 +0,0 @@
{% for section in sections %}
{% set underline = "-" %}
{% if section %}
{{section}}
{{ underline * section|length }}{% set underline = "~" %}
{% endif %}
{% if sections[section] %}
{% for category, val in definitions.items() if category in sections[section] %}
{{ definitions[category]['name'] }}
{{ underline * definitions[category]['name']|length }}
{% if definitions[category]['showcontent'] %}
{% for text, values in sections[section][category]|dictsort(by='value') %}
{% set issue_joiner = joiner(', ') %}
- {% for value in values|sort %}{{ issue_joiner() }}`{{ value }} <https://github.com/goodboy/tractor/issues/{{ value[1:] }}>`_{% endfor %}: {{ text }}
{% endfor %}
{% else %}
- {{ sections[section][category]['']|sort|join(', ') }}
{% endif %}
{% if sections[section][category]|length == 0 %}
No significant changes.
{% else %}
{% endif %}
{% endfor %}
{% else %}
No significant changes.
{% endif %}
{% endfor %}

View File

@ -1,28 +0,0 @@
[tool.towncrier]
package = "tractor"
filename = "NEWS.rst"
directory = "nooz/"
version = "0.1.0a6"
title_format = "tractor {version} ({project_date})"
template = "nooz/_template.rst"
all_bullets = true
[[tool.towncrier.type]]
directory = "feature"
name = "Features"
showcontent = true
[[tool.towncrier.type]]
directory = "bugfix"
name = "Bug Fixes"
showcontent = true
[[tool.towncrier.type]]
directory = "doc"
name = "Improved Documentation"
showcontent = true
[[tool.towncrier.type]]
directory = "trivial"
name = "Trivial/Internal Changes"
showcontent = true

View File

@ -1,2 +1,2 @@
sphinx
sphinx_book_theme
sphinx_typlog_theme

View File

@ -1,8 +1,6 @@
pytest
pytest-trio
pytest-timeout
pdbp
pdbpp
mypy
trio_typing
pexpect
towncrier

View File

@ -1,97 +1,62 @@
#!/usr/bin/env python
#
# tractor: structured concurrent "actors".
# tractor: a trionic actor model built on `multiprocessing` and `trio`
#
# Copyright 2018-eternity Tyler Goodlet.
# Copyright (C) 2018-2020 Tyler Goodlet
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from setuptools import setup
with open('docs/README.rst', encoding='utf-8') as f:
with open('README.rst', encoding='utf-8') as f:
readme = f.read()
setup(
name="tractor",
version='0.1.0a6dev0', # alpha zone
description='structured concurrrent `trio`-"actors"',
version='0.1.0.alpha0',
description='A trionic actor model built on `multiprocessing` and `trio`',
long_description=readme,
license='AGPLv3',
license='GPLv3',
author='Tyler Goodlet',
maintainer='Tyler Goodlet',
maintainer_email='goodboy_foss@protonmail.com',
maintainer_email='jgbt@protonmail.com',
url='https://github.com/goodboy/tractor',
platforms=['linux', 'windows'],
packages=[
'tractor',
'tractor.experimental',
'tractor.trionics',
'tractor.testing',
],
install_requires=[
# trio related
# proper range spec:
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5
'trio >= 0.22',
'async_generator',
'trio_typing',
'exceptiongroup',
# tooling
'tricycle',
'trio_typing',
'colorlog',
'wrapt',
# IPC serialization
'msgspec',
# debug mode REPL
'pdbp',
# pip ref docs on these specs:
# https://pip.pypa.io/en/stable/reference/requirement-specifiers/#examples
# and pep:
# https://peps.python.org/pep-0440/#version-specifiers
# windows deps workaround for ``pdbpp``
# https://github.com/pdbpp/pdbpp/issues/498
# https://github.com/pdbpp/fancycompleter/issues/37
'pyreadline3 ; platform_system == "Windows"',
'msgpack', 'trio>0.8', 'async_generator', 'colorlog', 'wrapt',
'trio_typing', 'pdbpp',
],
tests_require=['pytest'],
python_requires=">=3.10",
python_requires=">=3.7",
keywords=[
'trio',
'async',
'concurrency',
'structured concurrency',
'actor model',
'distributed',
'multiprocessing'
"async", "concurrency", "actor model", "distributed",
'trio', 'multiprocessing'
],
classifiers=[
"Development Status :: 3 - Alpha",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)'
'Operating System :: POSIX :: Linux',
"Framework :: Trio",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: System :: Distributed Computing",

View File

@ -7,91 +7,16 @@ import os
import random
import signal
import platform
import pathlib
import time
import inspect
from functools import partial, wraps
import pytest
import trio
import tractor
# export for tests
from tractor.testing import tractor_test # noqa
pytest_plugins = ['pytester']
def tractor_test(fn):
"""
Use:
@tractor_test
async def test_whatever():
await ...
If fixtures:
- ``arb_addr`` (a socket addr tuple where arbiter is listening)
- ``loglevel`` (logging level passed to tractor internals)
- ``start_method`` (subprocess spawning backend)
are defined in the `pytest` fixture space they will be automatically
injected to tests declaring these funcargs.
"""
@wraps(fn)
def wrapper(
*args,
loglevel=None,
arb_addr=None,
start_method=None,
**kwargs
):
# __tracebackhide__ = True
if 'arb_addr' in inspect.signature(fn).parameters:
# injects test suite fixture value to test as well
# as `run()`
kwargs['arb_addr'] = arb_addr
if 'loglevel' in inspect.signature(fn).parameters:
# allows test suites to define a 'loglevel' fixture
# that activates the internal logging
kwargs['loglevel'] = loglevel
if start_method is None:
if platform.system() == "Windows":
start_method = 'trio'
if 'start_method' in inspect.signature(fn).parameters:
# set of subprocess spawning backends
kwargs['start_method'] = start_method
if kwargs:
# use explicit root actor start
async def _main():
async with tractor.open_root_actor(
# **kwargs,
arbiter_addr=arb_addr,
loglevel=loglevel,
start_method=start_method,
# TODO: only enable when pytest is passed --pdb
# debug_mode=True,
):
await fn(*args, **kwargs)
main = _main
else:
# use implicit root actor start
main = partial(fn, *args, **kwargs)
return trio.run(main)
return wrapper
_arb_addr = '127.0.0.1', random.randint(1000, 9999)
@ -114,27 +39,20 @@ no_windows = pytest.mark.skipif(
)
def repodir() -> pathlib.Path:
'''
Return the abspath to the repo directory.
'''
# 2 parents up to step up through tests/<repo_dir>
return pathlib.Path(__file__).parent.parent.absolute()
def examples_dir() -> pathlib.Path:
'''
Return the abspath to the examples directory as `pathlib.Path`.
'''
return repodir() / 'examples'
def repodir():
"""Return the abspath to the repo directory.
"""
dirname = os.path.dirname
dirpath = os.path.abspath(
dirname(dirname(os.path.realpath(__file__)))
)
return dirpath
def pytest_addoption(parser):
parser.addoption(
"--ll", action="store", dest='loglevel',
default='ERROR', help="logging level to set when testing"
default=None, help="logging level to set when testing"
)
parser.addoption(
@ -146,31 +64,31 @@ def pytest_addoption(parser):
def pytest_configure(config):
backend = config.option.spawn_backend
tractor._spawn.try_set_start_method(backend)
if backend == 'mp':
tractor._spawn.try_set_start_method('spawn')
elif backend == 'trio':
tractor._spawn.try_set_start_method(backend)
@pytest.fixture(scope='session', autouse=True)
def loglevel(request):
orig = tractor.log._default_loglevel
level = tractor.log._default_loglevel = request.config.option.loglevel
tractor.log.get_console_log(level)
yield level
tractor.log._default_loglevel = orig
@pytest.fixture(scope='session')
def spawn_backend(request) -> str:
def spawn_backend(request):
return request.config.option.spawn_backend
_ci_env: bool = os.environ.get('CI', False)
@pytest.fixture(scope='session')
def ci_env() -> bool:
"""Detect CI envoirment.
"""
return _ci_env
return os.environ.get('TRAVIS', False) or os.environ.get('CI', False)
@pytest.fixture(scope='session')
@ -180,24 +98,24 @@ def arb_addr():
def pytest_generate_tests(metafunc):
spawn_backend = metafunc.config.option.spawn_backend
if not spawn_backend:
# XXX some weird windows bug with `pytest`?
spawn_backend = 'trio'
spawn_backend = 'mp'
assert spawn_backend in ('mp', 'trio')
# TODO: maybe just use the literal `._spawn.SpawnMethodKey`?
assert spawn_backend in (
'mp_spawn',
'mp_forkserver',
'trio',
)
# NOTE: used to be used to dyanmically parametrize tests for when
# you just passed --spawn-backend=`mp` on the cli, but now we expect
# that cli input to be manually specified, BUT, maybe we'll do
# something like this again in the future?
if 'start_method' in metafunc.fixturenames:
metafunc.parametrize("start_method", [spawn_backend], scope='module')
if spawn_backend == 'mp':
from multiprocessing import get_all_start_methods
methods = get_all_start_methods()
if 'fork' in methods:
# fork not available on windows, so check before
# removing XXX: the fork method is in general
# incompatible with trio's global scheduler state
methods.remove('fork')
elif spawn_backend == 'trio':
methods = ['trio']
metafunc.parametrize("start_method", methods, scope='module')
def sig_prog(proc, sig):
@ -213,22 +131,16 @@ def sig_prog(proc, sig):
@pytest.fixture
def daemon(
loglevel: str,
testdir,
arb_addr: tuple[str, int],
):
'''
Run a daemon actor as a "remote arbiter".
'''
def daemon(loglevel, testdir, arb_addr):
"""Run a daemon actor as a "remote arbiter".
"""
if loglevel in ('trace', 'debug'):
# too much logging will lock up the subproc (smh)
loglevel = 'info'
cmdargs = [
sys.executable, '-c',
"import tractor; tractor.run_daemon([], registry_addr={}, loglevel={})"
"import tractor; tractor.run_daemon([], arbiter_addr={}, loglevel={})"
.format(
arb_addr,
"'{}'".format(loglevel) if loglevel else None)

View File

@ -1,129 +0,0 @@
"""
Bidirectional streaming.
"""
import pytest
import trio
import tractor
@tractor.context
async def simple_rpc(
ctx: tractor.Context,
data: int,
) -> None:
'''
Test a small ping-pong server.
'''
# signal to parent that we're up
await ctx.started(data + 1)
print('opening stream in callee')
async with ctx.open_stream() as stream:
count = 0
while True:
try:
await stream.receive() == 'ping'
except trio.EndOfChannel:
assert count == 10
break
else:
print('pong')
await stream.send('pong')
count += 1
@tractor.context
async def simple_rpc_with_forloop(
ctx: tractor.Context,
data: int,
) -> None:
"""Same as previous test but using ``async for`` syntax/api.
"""
# signal to parent that we're up
await ctx.started(data + 1)
print('opening stream in callee')
async with ctx.open_stream() as stream:
count = 0
async for msg in stream:
assert msg == 'ping'
print('pong')
await stream.send('pong')
count += 1
else:
assert count == 10
@pytest.mark.parametrize(
'use_async_for',
[True, False],
)
@pytest.mark.parametrize(
'server_func',
[simple_rpc, simple_rpc_with_forloop],
)
def test_simple_rpc(server_func, use_async_for):
'''
The simplest request response pattern.
'''
async def main():
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'rpc_server',
enable_modules=[__name__],
)
async with portal.open_context(
server_func, # taken from pytest parameterization
data=10,
) as (ctx, sent):
assert sent == 11
async with ctx.open_stream() as stream:
if use_async_for:
count = 0
# receive msgs using async for style
print('ping')
await stream.send('ping')
async for msg in stream:
assert msg == 'pong'
print('ping')
await stream.send('ping')
count += 1
if count >= 9:
break
else:
# classic send/receive style
for _ in range(10):
print('ping')
await stream.send('ping')
assert await stream.receive() == 'pong'
# stream should terminate here
# final context result(s) should be consumed here in __aexit__()
await portal.cancel_actor()
trio.run(main)

View File

@ -1,193 +0,0 @@
'''
Sketchy network blackoutz, ugly byzantine gens, puedes eschuchar la
cancelacion?..
'''
from functools import partial
import pytest
from _pytest.pathlib import import_path
import trio
import tractor
from conftest import (
examples_dir,
)
@pytest.mark.parametrize(
'debug_mode',
[False, True],
ids=['no_debug_mode', 'debug_mode'],
)
@pytest.mark.parametrize(
'ipc_break',
[
# no breaks
{
'break_parent_ipc_after': False,
'break_child_ipc_after': False,
},
# only parent breaks
{
'break_parent_ipc_after': 500,
'break_child_ipc_after': False,
},
# only child breaks
{
'break_parent_ipc_after': False,
'break_child_ipc_after': 500,
},
# both: break parent first
{
'break_parent_ipc_after': 500,
'break_child_ipc_after': 800,
},
# both: break child first
{
'break_parent_ipc_after': 800,
'break_child_ipc_after': 500,
},
],
ids=[
'no_break',
'break_parent',
'break_child',
'break_both_parent_first',
'break_both_child_first',
],
)
def test_ipc_channel_break_during_stream(
debug_mode: bool,
spawn_backend: str,
ipc_break: dict | None,
):
'''
Ensure we can have an IPC channel break its connection during
streaming and it's still possible for the (simulated) user to kill
the actor tree using SIGINT.
We also verify the type of connection error expected in the parent
depending on which side if the IPC breaks first.
'''
if spawn_backend != 'trio':
if debug_mode:
pytest.skip('`debug_mode` only supported on `trio` spawner')
# non-`trio` spawners should never hit the hang condition that
# requires the user to do ctl-c to cancel the actor tree.
expect_final_exc = trio.ClosedResourceError
mod = import_path(
examples_dir() / 'advanced_faults' / 'ipc_failure_during_stream.py',
root=examples_dir(),
)
expect_final_exc = KeyboardInterrupt
# when ONLY the child breaks we expect the parent to get a closed
# resource error on the next `MsgStream.receive()` and then fail out
# and cancel the child from there.
if (
# only child breaks
(
ipc_break['break_child_ipc_after']
and ipc_break['break_parent_ipc_after'] is False
)
# both break but, parent breaks first
or (
ipc_break['break_child_ipc_after'] is not False
and (
ipc_break['break_parent_ipc_after']
> ipc_break['break_child_ipc_after']
)
)
):
expect_final_exc = trio.ClosedResourceError
# when the parent IPC side dies (even if the child's does as well
# but the child fails BEFORE the parent) we expect the channel to be
# sent a stop msg from the child at some point which will signal the
# parent that the stream has been terminated.
# NOTE: when the parent breaks "after" the child you get this same
# case as well, the child breaks the IPC channel with a stop msg
# before any closure takes place.
elif (
# only parent breaks
(
ipc_break['break_parent_ipc_after']
and ipc_break['break_child_ipc_after'] is False
)
# both break but, child breaks first
or (
ipc_break['break_parent_ipc_after'] is not False
and (
ipc_break['break_child_ipc_after']
> ipc_break['break_parent_ipc_after']
)
)
):
expect_final_exc = trio.EndOfChannel
with pytest.raises(expect_final_exc):
trio.run(
partial(
mod.main,
debug_mode=debug_mode,
start_method=spawn_backend,
**ipc_break,
)
)
@tractor.context
async def break_ipc_after_started(
ctx: tractor.Context,
) -> None:
await ctx.started()
async with ctx.open_stream() as stream:
await stream.aclose()
await trio.sleep(0.2)
await ctx.chan.send(None)
print('child broke IPC and terminating')
def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages():
'''
Verify that is a subactor's IPC goes down just after bringing up a stream
the parent can trigger a SIGINT and the child will be reaped out-of-IPC by
the localhost process supervision machinery: aka "zombie lord".
'''
async def main():
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'ipc_breaker',
enable_modules=[__name__],
)
with trio.move_on_after(1):
async with (
portal.open_context(
break_ipc_after_started
) as (ctx, sent),
):
async with ctx.open_stream():
await trio.sleep(0.5)
print('parent waiting on context')
print('parent exited context')
raise KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
trio.run(main)

View File

@ -1,380 +0,0 @@
'''
Advanced streaming patterns using bidirectional streams and contexts.
'''
from collections import Counter
import itertools
import platform
import trio
import tractor
def is_win():
return platform.system() == 'Windows'
_registry: dict[str, set[tractor.MsgStream]] = {
'even': set(),
'odd': set(),
}
async def publisher(
seed: int = 0,
) -> None:
global _registry
def is_even(i):
return i % 2 == 0
for val in itertools.count(seed):
sub = 'even' if is_even(val) else 'odd'
for sub_stream in _registry[sub].copy():
await sub_stream.send(val)
# throttle send rate to ~1kHz
# making it readable to a human user
await trio.sleep(1/1000)
@tractor.context
async def subscribe(
ctx: tractor.Context,
) -> None:
global _registry
# syn caller
await ctx.started(None)
async with ctx.open_stream() as stream:
# update subs list as consumer requests
async for new_subs in stream:
new_subs = set(new_subs)
remove = new_subs - _registry.keys()
print(f'setting sub to {new_subs} for {ctx.chan.uid}')
# remove old subs
for sub in remove:
_registry[sub].remove(stream)
# add new subs for consumer
for sub in new_subs:
_registry[sub].add(stream)
async def consumer(
subs: list[str],
) -> None:
uid = tractor.current_actor().uid
async with tractor.wait_for_actor('publisher') as portal:
async with portal.open_context(subscribe) as (ctx, first):
async with ctx.open_stream() as stream:
# flip between the provided subs dynamically
if len(subs) > 1:
for sub in itertools.cycle(subs):
print(f'setting dynamic sub to {sub}')
await stream.send([sub])
count = 0
async for value in stream:
print(f'{uid} got: {value}')
if count > 5:
break
count += 1
else: # static sub
await stream.send(subs)
async for value in stream:
print(f'{uid} got: {value}')
def test_dynamic_pub_sub():
global _registry
from multiprocessing import cpu_count
cpus = cpu_count()
async def main():
async with tractor.open_nursery() as n:
# name of this actor will be same as target func
await n.run_in_actor(publisher)
for i, sub in zip(
range(cpus - 2),
itertools.cycle(_registry.keys())
):
await n.run_in_actor(
consumer,
name=f'consumer_{sub}',
subs=[sub],
)
# make one dynamic subscriber
await n.run_in_actor(
consumer,
name='consumer_dynamic',
subs=list(_registry.keys()),
)
# block until cancelled by user
with trio.fail_after(3):
await trio.sleep_forever()
try:
trio.run(main)
except trio.TooSlowError:
pass
@tractor.context
async def one_task_streams_and_one_handles_reqresp(
ctx: tractor.Context,
) -> None:
await ctx.started()
async with ctx.open_stream() as stream:
async def pingpong():
'''Run a simple req/response service.
'''
async for msg in stream:
print('rpc server ping')
assert msg == 'ping'
print('rpc server pong')
await stream.send('pong')
async with trio.open_nursery() as n:
n.start_soon(pingpong)
for _ in itertools.count():
await stream.send('yo')
await trio.sleep(0.01)
def test_reqresp_ontopof_streaming():
'''
Test a subactor that both streams with one task and
spawns another which handles a small requests-response
dialogue over the same bidir-stream.
'''
async def main():
# flat to make sure we get at least one pong
got_pong: bool = False
timeout: int = 2
if is_win(): # smh
timeout = 4
with trio.move_on_after(timeout):
async with tractor.open_nursery() as n:
# name of this actor will be same as target func
portal = await n.start_actor(
'dual_tasks',
enable_modules=[__name__]
)
async with portal.open_context(
one_task_streams_and_one_handles_reqresp,
) as (ctx, first):
assert first is None
async with ctx.open_stream() as stream:
await stream.send('ping')
async for msg in stream:
print(f'client received: {msg}')
assert msg in {'pong', 'yo'}
if msg == 'pong':
got_pong = True
await stream.send('ping')
print('client sent ping')
assert got_pong
try:
trio.run(main)
except trio.TooSlowError:
pass
async def async_gen_stream(sequence):
for i in sequence:
yield i
await trio.sleep(0.1)
@tractor.context
async def echo_ctx_stream(
ctx: tractor.Context,
) -> None:
await ctx.started()
async with ctx.open_stream() as stream:
async for msg in stream:
await stream.send(msg)
def test_sigint_both_stream_types():
'''Verify that running a bi-directional and recv only stream
side-by-side will cancel correctly from SIGINT.
'''
timeout: float = 2
if is_win(): # smh
timeout += 1
async def main():
with trio.fail_after(timeout):
async with tractor.open_nursery() as n:
# name of this actor will be same as target func
portal = await n.start_actor(
'2_way',
enable_modules=[__name__]
)
async with portal.open_context(echo_ctx_stream) as (ctx, _):
async with ctx.open_stream() as stream:
async with portal.open_stream_from(
async_gen_stream,
sequence=list(range(1)),
) as gen_stream:
msg = await gen_stream.receive()
await stream.send(msg)
resp = await stream.receive()
assert resp == msg
raise KeyboardInterrupt
try:
trio.run(main)
assert 0, "Didn't receive KBI!?"
except KeyboardInterrupt:
pass
@tractor.context
async def inf_streamer(
ctx: tractor.Context,
) -> None:
'''
Stream increasing ints until terminated with a 'done' msg.
'''
await ctx.started()
async with (
ctx.open_stream() as stream,
trio.open_nursery() as n,
):
async def bail_on_sentinel():
async for msg in stream:
if msg == 'done':
await stream.aclose()
else:
print(f'streamer received {msg}')
# start termination detector
n.start_soon(bail_on_sentinel)
for val in itertools.count():
try:
await stream.send(val)
except trio.ClosedResourceError:
# close out the stream gracefully
break
print('terminating streamer')
def test_local_task_fanout_from_stream():
'''
Single stream with multiple local consumer tasks using the
``MsgStream.subscribe()` api.
Ensure all tasks receive all values after stream completes sending.
'''
consumers = 22
async def main():
counts = Counter()
async with tractor.open_nursery() as tn:
p = await tn.start_actor(
'inf_streamer',
enable_modules=[__name__],
)
async with (
p.open_context(inf_streamer) as (ctx, _),
ctx.open_stream() as stream,
):
async def pull_and_count(name: str):
# name = trio.lowlevel.current_task().name
async with stream.subscribe() as recver:
assert isinstance(
recver,
tractor.trionics.BroadcastReceiver
)
async for val in recver:
# print(f'{name}: {val}')
counts[name] += 1
print(f'{name} bcaster ended')
print(f'{name} completed')
with trio.fail_after(3):
async with trio.open_nursery() as nurse:
for i in range(consumers):
nurse.start_soon(pull_and_count, i)
await trio.sleep(0.5)
print('\nterminating')
await stream.send('done')
print('closed stream connection')
assert len(counts) == consumers
mx = max(counts.values())
# make sure each task received all stream values
assert all(val == mx for val in counts.values())
await p.cancel_actor()
trio.run(main)

View File

@ -1,6 +1,5 @@
"""
Cancellation and error propagation
"""
import os
import signal
@ -8,10 +7,6 @@ import platform
import time
from itertools import repeat
from exceptiongroup import (
BaseExceptionGroup,
ExceptionGroup,
)
import pytest
import trio
import tractor
@ -19,10 +14,6 @@ import tractor
from conftest import tractor_test, no_windows
def is_win():
return platform.system() == 'Windows'
async def assert_err(delay=0):
await trio.sleep(delay)
assert 0
@ -56,60 +47,34 @@ def test_remote_error(arb_addr, args_err):
args, errtype = args_err
async def main():
async with tractor.open_nursery(
arbiter_addr=arb_addr,
) as nursery:
async with tractor.open_nursery() as nursery:
# on a remote type error caused by bad input args
# this should raise directly which means we **don't** get
# an exception group outside the nursery since the error
# here and the far end task error are one in the same?
portal = await nursery.run_in_actor(
assert_err, name='errorer', **args
)
portal = await nursery.run_in_actor('errorer', assert_err, **args)
# get result(s) from main task
try:
# this means the root actor will also raise a local
# parent task error and thus an eg will propagate out
# of this actor nursery.
await portal.result()
except tractor.RemoteActorError as err:
assert err.type == errtype
print("Look Maa that actor failed hard, hehh")
raise
# ensure boxed errors
if args:
with pytest.raises(tractor.RemoteActorError) as excinfo:
trio.run(main)
with pytest.raises(tractor.RemoteActorError) as excinfo:
tractor.run(main, arbiter_addr=arb_addr)
assert excinfo.value.type == errtype
else:
# the root task will also error on the `.result()` call
# so we expect an error from there AND the child.
with pytest.raises(BaseExceptionGroup) as excinfo:
trio.run(main)
# ensure boxed errors
for exc in excinfo.value.exceptions:
assert exc.type == errtype
# ensure boxed error is correct
assert excinfo.value.type == errtype
def test_multierror(arb_addr):
'''
Verify we raise a ``BaseExceptionGroup`` out of a nursery where
"""Verify we raise a ``trio.MultiError`` out of a nursery where
more then one actor errors.
'''
"""
async def main():
async with tractor.open_nursery(
arbiter_addr=arb_addr,
) as nursery:
async with tractor.open_nursery() as nursery:
await nursery.run_in_actor(assert_err, name='errorer1')
portal2 = await nursery.run_in_actor(assert_err, name='errorer2')
await nursery.run_in_actor('errorer1', assert_err)
portal2 = await nursery.run_in_actor('errorer2', assert_err)
# get result(s) from main task
try:
@ -119,11 +84,11 @@ def test_multierror(arb_addr):
print("Look Maa that first actor failed hard, hehh")
raise
# here we should get a ``BaseExceptionGroup`` containing exceptions
# here we should get a `trio.MultiError` containing exceptions
# from both subactors
with pytest.raises(BaseExceptionGroup):
trio.run(main)
with pytest.raises(trio.MultiError):
tractor.run(main, arbiter_addr=arb_addr)
@pytest.mark.parametrize('delay', (0, 0.5))
@ -131,46 +96,28 @@ def test_multierror(arb_addr):
'num_subactors', range(25, 26),
)
def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay):
"""Verify we raise a ``BaseExceptionGroup`` out of a nursery where
"""Verify we raise a ``trio.MultiError`` out of a nursery where
more then one actor errors and also with a delay before failure
to test failure during an ongoing spawning.
"""
async def main():
async with tractor.open_nursery(
arbiter_addr=arb_addr,
) as nursery:
async with tractor.open_nursery() as nursery:
for i in range(num_subactors):
await nursery.run_in_actor(
assert_err,
name=f'errorer{i}',
delay=delay
)
f'errorer{i}', assert_err, delay=delay)
# with pytest.raises(trio.MultiError) as exc_info:
with pytest.raises(BaseExceptionGroup) as exc_info:
trio.run(main)
with pytest.raises(trio.MultiError) as exc_info:
tractor.run(main, arbiter_addr=arb_addr)
assert exc_info.type == ExceptionGroup
assert exc_info.type == tractor.MultiError
err = exc_info.value
exceptions = err.exceptions
if len(exceptions) == 2:
# sometimes oddly now there's an embedded BrokenResourceError ?
for exc in exceptions:
excs = getattr(exc, 'exceptions', None)
if excs:
exceptions = excs
break
assert len(exceptions) == num_subactors
for exc in exceptions:
assert len(err.exceptions) == num_subactors
for exc in err.exceptions:
assert isinstance(exc, tractor.RemoteActorError)
assert exc.type == AssertionError
async def do_nothing():
def do_nothing():
pass
@ -182,14 +129,12 @@ def test_cancel_single_subactor(arb_addr, mechanism):
async def spawn_actor():
"""Spawn an actor that blocks indefinitely.
"""
async with tractor.open_nursery(
arbiter_addr=arb_addr,
) as nursery:
async with tractor.open_nursery() as nursery:
portal = await nursery.start_actor(
'nothin', enable_modules=[__name__],
'nothin', rpc_module_paths=[__name__],
)
assert (await portal.run(do_nothing)) is None
assert (await portal.run(__name__, 'do_nothing')) is None
if mechanism == 'nursery_cancel':
# would hang otherwise
@ -198,10 +143,10 @@ def test_cancel_single_subactor(arb_addr, mechanism):
raise mechanism
if mechanism == 'nursery_cancel':
trio.run(spawn_actor)
tractor.run(spawn_actor, arbiter_addr=arb_addr)
else:
with pytest.raises(mechanism):
trio.run(spawn_actor)
tractor.run(spawn_actor, arbiter_addr=arb_addr)
async def stream_forever():
@ -220,14 +165,13 @@ async def test_cancel_infinite_streamer(start_method):
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'donny',
enable_modules=[__name__],
rpc_module_paths=[__name__],
)
# this async for loop streams values from the above
# async generator running in a separate process
async with portal.open_stream_from(stream_forever) as stream:
async for letter in stream:
print(letter)
async for letter in await portal.run(__name__, 'stream_forever'):
print(letter)
# we support trio's cancellation system
assert cancel_scope.cancelled_caught
@ -239,8 +183,8 @@ async def test_cancel_infinite_streamer(start_method):
[
# daemon actors sit idle while single task actors error out
(1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None),
(2, BaseExceptionGroup, AssertionError, (assert_err, {}), None),
(3, BaseExceptionGroup, AssertionError, (assert_err, {}), None),
(2, tractor.MultiError, AssertionError, (assert_err, {}), None),
(3, tractor.MultiError, AssertionError, (assert_err, {}), None),
# 1 daemon actor errors out while single task actors sleep forever
(3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}),
@ -251,7 +195,7 @@ async def test_cancel_infinite_streamer(start_method):
(do_nuthin, {}), (assert_err, {'delay': 1}, True)),
# daemon complete quickly delay while single task
# actors error after brief delay
(3, BaseExceptionGroup, AssertionError,
(3, tractor.MultiError, AssertionError,
(assert_err, {'delay': 1}), (do_nuthin, {}, False)),
],
ids=[
@ -279,7 +223,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
for i in range(num_actors):
dactor_portals.append(await n.start_actor(
f'deamon_{i}',
enable_modules=[__name__],
rpc_module_paths=[__name__],
))
func, kwargs = ria_func
@ -287,12 +231,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
for i in range(num_actors):
# start actor(s) that will fail immediately
riactor_portals.append(
await n.run_in_actor(
func,
name=f'actor_{i}',
**kwargs
)
)
await n.run_in_actor(f'actor_{i}', func, **kwargs))
if da_func:
func, kwargs, expect_error = da_func
@ -300,8 +239,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
# if this function fails then we should error here
# and the nursery should teardown all other actors
try:
await portal.run(func, **kwargs)
await portal.run(__name__, func.__name__, **kwargs)
except tractor.RemoteActorError as err:
assert err.type == err_type
# we only expect this first error to propogate
@ -318,7 +256,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
# should error here with a ``RemoteActorError`` or ``MultiError``
except first_err as err:
if isinstance(err, BaseExceptionGroup):
if isinstance(err, tractor.MultiError):
assert len(err.exceptions) == num_actors
for exc in err.exceptions:
if isinstance(exc, tractor.RemoteActorError):
@ -338,35 +276,30 @@ async def spawn_and_error(breadth, depth) -> None:
name = tractor.current_actor().name
async with tractor.open_nursery() as nursery:
for i in range(breadth):
if depth > 0:
args = (
f'spawner_{i}_depth_{depth}',
spawn_and_error,
)
kwargs = {
'name': f'spawner_{i}_depth_{depth}',
'breadth': breadth,
'depth': depth - 1,
}
else:
args = (
f'{name}_errorer_{i}',
assert_err,
)
kwargs = {
'name': f'{name}_errorer_{i}',
}
kwargs = {}
await nursery.run_in_actor(*args, **kwargs)
@tractor_test
async def test_nested_multierrors(loglevel, start_method):
'''
Test that failed actor sets are wrapped in `BaseExceptionGroup`s. This
test goes only 2 nurseries deep but we should eventually have tests
"""Test that failed actor sets are wrapped in `trio.MultiError`s.
This test goes only 2 nurseries deep but we should eventually have tests
for arbitrary n-depth actor trees.
'''
"""
if start_method == 'trio':
depth = 3
subactor_breadth = 2
@ -385,42 +318,30 @@ async def test_nested_multierrors(loglevel, start_method):
async with tractor.open_nursery() as nursery:
for i in range(subactor_breadth):
await nursery.run_in_actor(
f'spawner_{i}',
spawn_and_error,
name=f'spawner_{i}',
breadth=subactor_breadth,
depth=depth,
)
except BaseExceptionGroup as err:
except trio.MultiError as err:
assert len(err.exceptions) == subactor_breadth
for subexc in err.exceptions:
# verify first level actor errors are wrapped as remote
if is_win():
if platform.system() == 'Windows':
# windows is often too slow and cancellation seems
# to happen before an actor is spawned
if isinstance(subexc, trio.Cancelled):
continue
elif isinstance(subexc, tractor.RemoteActorError):
else:
# on windows it seems we can't exactly be sure wtf
# will happen..
assert subexc.type in (
tractor.RemoteActorError,
trio.Cancelled,
BaseExceptionGroup,
trio.MultiError
)
elif isinstance(subexc, BaseExceptionGroup):
for subsub in subexc.exceptions:
if subsub in (tractor.RemoteActorError,):
subsub = subsub.type
assert type(subsub) in (
trio.Cancelled,
BaseExceptionGroup,
)
else:
assert isinstance(subexc, tractor.RemoteActorError)
@ -428,21 +349,14 @@ async def test_nested_multierrors(loglevel, start_method):
# XXX not sure what's up with this..
# on windows sometimes spawning is just too slow and
# we get back the (sent) cancel signal instead
if is_win():
if isinstance(subexc, tractor.RemoteActorError):
assert subexc.type in (
BaseExceptionGroup,
tractor.RemoteActorError
)
else:
assert isinstance(subexc, BaseExceptionGroup)
if platform.system() == 'Windows':
assert (subexc.type is trio.MultiError) or (
subexc.type is tractor.RemoteActorError)
else:
assert subexc.type is ExceptionGroup
assert subexc.type is trio.MultiError
else:
assert subexc.type in (
tractor.RemoteActorError,
trio.Cancelled
)
assert (subexc.type is tractor.RemoteActorError) or (
subexc.type is trio.Cancelled)
@no_windows
@ -460,13 +374,13 @@ def test_cancel_via_SIGINT(
with trio.fail_after(2):
async with tractor.open_nursery() as tn:
await tn.start_actor('sucka')
if 'mp' in spawn_backend:
if spawn_backend == 'mp':
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
await trio.sleep_forever()
with pytest.raises(KeyboardInterrupt):
trio.run(main)
tractor.run(main)
@no_windows
@ -480,31 +394,26 @@ def test_cancel_via_SIGINT_other_task(
from a seperate ``trio`` child task.
"""
pid = os.getpid()
timeout: float = 2
if is_win(): # smh
timeout += 1
async def spawn_and_sleep_forever(task_status=trio.TASK_STATUS_IGNORED):
async with tractor.open_nursery() as tn:
for i in range(3):
await tn.run_in_actor(
sleep_forever,
name='namesucka',
)
await tn.run_in_actor('sucka', sleep_forever)
task_status.started()
await trio.sleep_forever()
async def main():
# should never timeout since SIGINT should cancel the current program
with trio.fail_after(timeout):
with trio.fail_after(2):
async with trio.open_nursery() as n:
await n.start(spawn_and_sleep_forever)
if 'mp' in spawn_backend:
if spawn_backend == 'mp':
time.sleep(0.1)
os.kill(pid, signal.SIGINT)
with pytest.raises(KeyboardInterrupt):
trio.run(main)
tractor.run(main)
async def spin_for(period=3):
@ -514,10 +423,7 @@ async def spin_for(period=3):
async def spawn():
async with tractor.open_nursery() as tn:
await tn.run_in_actor(
spin_for,
name='sleeper',
)
portal = await tn.run_in_actor('sleeper', spin_for)
@no_windows
@ -536,66 +442,9 @@ def test_cancel_while_childs_child_in_sync_sleep(
async def main():
with trio.fail_after(2):
async with tractor.open_nursery() as tn:
await tn.run_in_actor(
spawn,
name='spawn',
)
portal = await tn.run_in_actor('spawn', spawn)
await trio.sleep(1)
assert 0
with pytest.raises(AssertionError):
trio.run(main)
def test_fast_graceful_cancel_when_spawn_task_in_soft_proc_wait_for_daemon(
start_method,
):
'''
This is a very subtle test which demonstrates how cancellation
during process collection can result in non-optimal teardown
performance on daemon actors. The fix for this test was to handle
``trio.Cancelled`` specially in the spawn task waiting in
`proc.wait()` such that ``Portal.cancel_actor()`` is called before
executing the "hard reap" sequence (which has an up to 3 second
delay currently).
In other words, if we can cancel the actor using a graceful remote
cancellation, and it's faster, we might as well do it.
'''
kbi_delay = 0.5
timeout: float = 2.9
if is_win(): # smh
timeout += 1
async def main():
start = time.time()
try:
async with trio.open_nursery() as nurse:
async with tractor.open_nursery() as tn:
p = await tn.start_actor(
'fast_boi',
enable_modules=[__name__],
)
async def delayed_kbi():
await trio.sleep(kbi_delay)
print(f'RAISING KBI after {kbi_delay} s')
raise KeyboardInterrupt
# start task which raises a kbi **after**
# the actor nursery ``__aexit__()`` has
# been run.
nurse.start_soon(delayed_kbi)
await p.run(do_nuthin)
finally:
duration = time.time() - start
if duration > timeout:
raise trio.TooSlowError(
'daemon cancel was slower then necessary..'
)
with pytest.raises(KeyboardInterrupt):
trio.run(main)
tractor.run(main)

View File

@ -1,173 +0,0 @@
'''
Test a service style daemon that maintains a nursery for spawning
"remote async tasks" including both spawning other long living
sub-sub-actor daemons.
'''
from typing import Optional
import asyncio
from contextlib import asynccontextmanager as acm
import pytest
import trio
from trio_typing import TaskStatus
import tractor
from tractor import RemoteActorError
from async_generator import aclosing
async def aio_streamer(
from_trio: asyncio.Queue,
to_trio: trio.abc.SendChannel,
) -> trio.abc.ReceiveChannel:
# required first msg to sync caller
to_trio.send_nowait(None)
from itertools import cycle
for i in cycle(range(10)):
to_trio.send_nowait(i)
await asyncio.sleep(0.01)
async def trio_streamer():
from itertools import cycle
for i in cycle(range(10)):
yield i
await trio.sleep(0.01)
async def trio_sleep_and_err(delay: float = 0.5):
await trio.sleep(delay)
# name error
doggy() # noqa
_cached_stream: Optional[
trio.abc.ReceiveChannel
] = None
@acm
async def wrapper_mngr(
):
from tractor.trionics import broadcast_receiver
global _cached_stream
in_aio = tractor.current_actor().is_infected_aio()
if in_aio:
if _cached_stream:
from_aio = _cached_stream
# if we already have a cached feed deliver a rx side clone
# to consumer
async with broadcast_receiver(from_aio, 6) as from_aio:
yield from_aio
return
else:
async with tractor.to_asyncio.open_channel_from(
aio_streamer,
) as (first, from_aio):
assert not first
# cache it so next task uses broadcast receiver
_cached_stream = from_aio
yield from_aio
else:
async with aclosing(trio_streamer()) as stream:
# cache it so next task uses broadcast receiver
_cached_stream = stream
yield stream
_nursery: trio.Nursery = None
@tractor.context
async def trio_main(
ctx: tractor.Context,
):
# sync
await ctx.started()
# stash a "service nursery" as "actor local" (aka a Python global)
global _nursery
n = _nursery
assert n
async def consume_stream():
async with wrapper_mngr() as stream:
async for msg in stream:
print(msg)
# run 2 tasks to ensure broadcaster chan use
n.start_soon(consume_stream)
n.start_soon(consume_stream)
n.start_soon(trio_sleep_and_err)
await trio.sleep_forever()
@tractor.context
async def open_actor_local_nursery(
ctx: tractor.Context,
):
global _nursery
async with trio.open_nursery() as n:
_nursery = n
await ctx.started()
await trio.sleep(10)
# await trio.sleep(1)
# XXX: this causes the hang since
# the caller does not unblock from its own
# ``trio.sleep_forever()``.
# TODO: we need to test a simple ctx task starting remote tasks
# that error and then blocking on a ``Nursery.start()`` which
# never yields back.. aka a scenario where the
# ``tractor.context`` task IS NOT in the service n's cancel
# scope.
n.cancel_scope.cancel()
@pytest.mark.parametrize(
'asyncio_mode',
[True, False],
ids='asyncio_mode={}'.format,
)
def test_actor_managed_trio_nursery_task_error_cancels_aio(
asyncio_mode: bool,
arb_addr
):
'''
Verify that a ``trio`` nursery created managed in a child actor
correctly relays errors to the parent actor when one of its spawned
tasks errors even when running in infected asyncio mode and using
broadcast receivers for multi-task-per-actor subscription.
'''
async def main():
# cancel the nursery shortly after boot
async with tractor.open_nursery() as n:
p = await n.start_actor(
'nursery_mngr',
infect_asyncio=asyncio_mode,
enable_modules=[__name__],
)
async with (
p.open_context(open_actor_local_nursery) as (ctx, first),
p.open_context(trio_main) as (ctx, first),
):
await trio.sleep_forever()
with pytest.raises(RemoteActorError) as excinfo:
trio.run(main)
# verify boxed error
err = excinfo.value
assert isinstance(err.type(), NameError)

View File

@ -1,84 +0,0 @@
import itertools
import pytest
import trio
import tractor
from tractor import open_actor_cluster
from tractor.trionics import gather_contexts
from conftest import tractor_test
MESSAGE = 'tractoring at full speed'
def test_empty_mngrs_input_raises() -> None:
async def main():
with trio.fail_after(1):
async with (
open_actor_cluster(
modules=[__name__],
# NOTE: ensure we can passthrough runtime opts
loglevel='info',
# debug_mode=True,
) as portals,
gather_contexts(
# NOTE: it's the use of inline-generator syntax
# here that causes the empty input.
mngrs=(
p.open_context(worker) for p in portals.values()
),
),
):
assert 0
with pytest.raises(ValueError):
trio.run(main)
@tractor.context
async def worker(
ctx: tractor.Context,
) -> None:
await ctx.started()
async with ctx.open_stream(
backpressure=True,
) as stream:
# TODO: this with the below assert causes a hang bug?
# with trio.move_on_after(1):
async for msg in stream:
# do something with msg
print(msg)
assert msg == MESSAGE
# TODO: does this ever cause a hang
# assert 0
@tractor_test
async def test_streaming_to_actor_cluster() -> None:
async with (
open_actor_cluster(modules=[__name__]) as portals,
gather_contexts(
mngrs=[p.open_context(worker) for p in portals.values()],
) as contexts,
gather_contexts(
mngrs=[ctx[0].open_stream() for ctx in contexts],
) as streams,
):
with trio.move_on_after(1):
for stream in itertools.cycle(streams):
await stream.send(MESSAGE)

View File

@ -1,798 +0,0 @@
'''
``async with ():`` inlined context-stream cancellation testing.
Verify the we raise errors when streams are opened prior to sync-opening
a ``tractor.Context`` beforehand.
'''
from contextlib import asynccontextmanager as acm
from itertools import count
import platform
from typing import Optional
import pytest
import trio
import tractor
from tractor._exceptions import StreamOverrun
from conftest import tractor_test
# ``Context`` semantics are as follows,
# ------------------------------------
# - standard setup/teardown:
# ``Portal.open_context()`` starts a new
# remote task context in another actor. The target actor's task must
# call ``Context.started()`` to unblock this entry on the caller side.
# the callee task executes until complete and returns a final value
# which is delivered to the caller side and retreived via
# ``Context.result()``.
# - cancel termination:
# context can be cancelled on either side where either end's task can
# call ``Context.cancel()`` which raises a local ``trio.Cancelled``
# and sends a task cancel request to the remote task which in turn
# raises a ``trio.Cancelled`` in that scope, catches it, and re-raises
# as ``ContextCancelled``. This is then caught by
# ``Portal.open_context()``'s exit and we get a graceful termination
# of the linked tasks.
# - error termination:
# error is caught after all context-cancel-scope tasks are cancelled
# via regular ``trio`` cancel scope semantics, error is sent to other
# side and unpacked as a `RemoteActorError`.
# ``Context.open_stream() as stream: MsgStream:`` msg semantics are:
# -----------------------------------------------------------------
# - either side can ``.send()`` which emits a 'yield' msgs and delivers
# a value to the a ``MsgStream.receive()`` call.
# - stream closure: one end relays a 'stop' message which terminates an
# ongoing ``MsgStream`` iteration.
# - cancel/error termination: as per the context semantics above but
# with implicit stream closure on the cancelling end.
_state: bool = False
@tractor.context
async def too_many_starteds(
ctx: tractor.Context,
) -> None:
'''
Call ``Context.started()`` more then once (an error).
'''
await ctx.started()
try:
await ctx.started()
except RuntimeError:
raise
@tractor.context
async def not_started_but_stream_opened(
ctx: tractor.Context,
) -> None:
'''
Enter ``Context.open_stream()`` without calling ``.started()``.
'''
try:
async with ctx.open_stream():
assert 0
except RuntimeError:
raise
@pytest.mark.parametrize(
'target',
[too_many_starteds, not_started_but_stream_opened],
ids='misuse_type={}'.format,
)
def test_started_misuse(target):
async def main():
async with tractor.open_nursery() as n:
portal = await n.start_actor(
target.__name__,
enable_modules=[__name__],
)
async with portal.open_context(target) as (ctx, sent):
await trio.sleep(1)
with pytest.raises(tractor.RemoteActorError):
trio.run(main)
@tractor.context
async def simple_setup_teardown(
ctx: tractor.Context,
data: int,
block_forever: bool = False,
) -> None:
# startup phase
global _state
_state = True
# signal to parent that we're up
await ctx.started(data + 1)
try:
if block_forever:
# block until cancelled
await trio.sleep_forever()
else:
return 'yo'
finally:
_state = False
async def assert_state(value: bool):
global _state
assert _state == value
@pytest.mark.parametrize(
'error_parent',
[False, ValueError, KeyboardInterrupt],
)
@pytest.mark.parametrize(
'callee_blocks_forever',
[False, True],
ids=lambda item: f'callee_blocks_forever={item}'
)
@pytest.mark.parametrize(
'pointlessly_open_stream',
[False, True],
ids=lambda item: f'open_stream={item}'
)
def test_simple_context(
error_parent,
callee_blocks_forever,
pointlessly_open_stream,
):
timeout = 1.5 if not platform.system() == 'Windows' else 4
async def main():
with trio.fail_after(timeout):
async with tractor.open_nursery() as nursery:
portal = await nursery.start_actor(
'simple_context',
enable_modules=[__name__],
)
try:
async with portal.open_context(
simple_setup_teardown,
data=10,
block_forever=callee_blocks_forever,
) as (ctx, sent):
assert sent == 11
if callee_blocks_forever:
await portal.run(assert_state, value=True)
else:
assert await ctx.result() == 'yo'
if not error_parent:
await ctx.cancel()
if pointlessly_open_stream:
async with ctx.open_stream():
if error_parent:
raise error_parent
if callee_blocks_forever:
await ctx.cancel()
else:
# in this case the stream will send a
# 'stop' msg to the far end which needs
# to be ignored
pass
else:
if error_parent:
raise error_parent
finally:
# after cancellation
if not error_parent:
await portal.run(assert_state, value=False)
# shut down daemon
await portal.cancel_actor()
if error_parent:
try:
trio.run(main)
except error_parent:
pass
except trio.MultiError as me:
# XXX: on windows it seems we may have to expect the group error
from tractor._exceptions import is_multi_cancelled
assert is_multi_cancelled(me)
else:
trio.run(main)
# basic stream terminations:
# - callee context closes without using stream
# - caller context closes without using stream
# - caller context calls `Context.cancel()` while streaming
# is ongoing resulting in callee being cancelled
# - callee calls `Context.cancel()` while streaming and caller
# sees stream terminated in `RemoteActorError`
# TODO: future possible features
# - restart request: far end raises `ContextRestart`
@tractor.context
async def close_ctx_immediately(
ctx: tractor.Context,
) -> None:
await ctx.started()
global _state
async with ctx.open_stream():
pass
@tractor_test
async def test_callee_closes_ctx_after_stream_open():
'callee context closes without using stream'
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'fast_stream_closer',
enable_modules=[__name__],
)
with trio.fail_after(2):
async with portal.open_context(
close_ctx_immediately,
# flag to avoid waiting the final result
# cancel_on_exit=True,
) as (ctx, sent):
assert sent is None
with trio.fail_after(0.5):
async with ctx.open_stream() as stream:
# should fall through since ``StopAsyncIteration``
# should be raised through translation of
# a ``trio.EndOfChannel`` by
# ``trio.abc.ReceiveChannel.__anext__()``
async for _ in stream:
assert 0
else:
# verify stream is now closed
try:
await stream.receive()
except trio.EndOfChannel:
pass
# TODO: should be just raise the closed resource err
# directly here to enforce not allowing a re-open
# of a stream to the context (at least until a time of
# if/when we decide that's a good idea?)
try:
with trio.fail_after(0.5):
async with ctx.open_stream() as stream:
pass
except trio.ClosedResourceError:
pass
await portal.cancel_actor()
@tractor.context
async def expect_cancelled(
ctx: tractor.Context,
) -> None:
global _state
_state = True
await ctx.started()
try:
async with ctx.open_stream() as stream:
async for msg in stream:
await stream.send(msg) # echo server
except trio.Cancelled:
# expected case
_state = False
raise
else:
assert 0, "Wasn't cancelled!?"
@pytest.mark.parametrize(
'use_ctx_cancel_method',
[False, True],
)
@tractor_test
async def test_caller_closes_ctx_after_callee_opens_stream(
use_ctx_cancel_method: bool,
):
'caller context closes without using stream'
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'ctx_cancelled',
enable_modules=[__name__],
)
async with portal.open_context(
expect_cancelled,
) as (ctx, sent):
await portal.run(assert_state, value=True)
assert sent is None
# call cancel explicitly
if use_ctx_cancel_method:
await ctx.cancel()
try:
async with ctx.open_stream() as stream:
async for msg in stream:
pass
except tractor.ContextCancelled:
raise # XXX: must be propagated to __aexit__
else:
assert 0, "Should have context cancelled?"
# channel should still be up
assert portal.channel.connected()
# ctx is closed here
await portal.run(assert_state, value=False)
else:
try:
with trio.fail_after(0.2):
await ctx.result()
assert 0, "Callee should have blocked!?"
except trio.TooSlowError:
await ctx.cancel()
try:
async with ctx.open_stream() as stream:
async for msg in stream:
pass
except tractor.ContextCancelled:
pass
else:
assert 0, "Should have received closed resource error?"
# ctx is closed here
await portal.run(assert_state, value=False)
# channel should not have been destroyed yet, only the
# inter-actor-task context
assert portal.channel.connected()
# teardown the actor
await portal.cancel_actor()
@tractor_test
async def test_multitask_caller_cancels_from_nonroot_task():
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'ctx_cancelled',
enable_modules=[__name__],
)
async with portal.open_context(
expect_cancelled,
) as (ctx, sent):
await portal.run(assert_state, value=True)
assert sent is None
async with ctx.open_stream() as stream:
async def send_msg_then_cancel():
await stream.send('yo')
await portal.run(assert_state, value=True)
await ctx.cancel()
await portal.run(assert_state, value=False)
async with trio.open_nursery() as n:
n.start_soon(send_msg_then_cancel)
try:
async for msg in stream:
assert msg == 'yo'
except tractor.ContextCancelled:
raise # XXX: must be propagated to __aexit__
# channel should still be up
assert portal.channel.connected()
# ctx is closed here
await portal.run(assert_state, value=False)
# channel should not have been destroyed yet, only the
# inter-actor-task context
assert portal.channel.connected()
# teardown the actor
await portal.cancel_actor()
@tractor.context
async def cancel_self(
ctx: tractor.Context,
) -> None:
global _state
_state = True
await ctx.cancel()
# should inline raise immediately
try:
async with ctx.open_stream():
pass
except tractor.ContextCancelled:
# suppress for now so we can do checkpoint tests below
pass
else:
raise RuntimeError('Context didnt cancel itself?!')
# check a real ``trio.Cancelled`` is raised on a checkpoint
try:
with trio.fail_after(0.1):
await trio.sleep_forever()
except trio.Cancelled:
raise
except trio.TooSlowError:
# should never get here
assert 0
@tractor_test
async def test_callee_cancels_before_started():
'''
Callee calls `Context.cancel()` while streaming and caller
sees stream terminated in `ContextCancelled`.
'''
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'cancels_self',
enable_modules=[__name__],
)
try:
async with portal.open_context(
cancel_self,
) as (ctx, sent):
async with ctx.open_stream():
await trio.sleep_forever()
# raises a special cancel signal
except tractor.ContextCancelled as ce:
ce.type == trio.Cancelled
# the traceback should be informative
assert 'cancelled itself' in ce.msgdata['tb_str']
# teardown the actor
await portal.cancel_actor()
@tractor.context
async def never_open_stream(
ctx: tractor.Context,
) -> None:
'''
Context which never opens a stream and blocks.
'''
await ctx.started()
await trio.sleep_forever()
@tractor.context
async def keep_sending_from_callee(
ctx: tractor.Context,
msg_buffer_size: Optional[int] = None,
) -> None:
'''
Send endlessly on the calleee stream.
'''
await ctx.started()
async with ctx.open_stream(
msg_buffer_size=msg_buffer_size,
) as stream:
for msg in count():
print(f'callee sending {msg}')
await stream.send(msg)
await trio.sleep(0.01)
@pytest.mark.parametrize(
'overrun_by',
[
('caller', 1, never_open_stream),
('cancel_caller_during_overrun', 1, never_open_stream),
('callee', 0, keep_sending_from_callee),
],
ids='overrun_condition={}'.format,
)
def test_one_end_stream_not_opened(overrun_by):
'''
This should exemplify the bug from:
https://github.com/goodboy/tractor/issues/265
'''
overrunner, buf_size_increase, entrypoint = overrun_by
from tractor._runtime import Actor
buf_size = buf_size_increase + Actor.msg_buffer_size
async def main():
async with tractor.open_nursery() as n:
portal = await n.start_actor(
entrypoint.__name__,
enable_modules=[__name__],
)
async with portal.open_context(
entrypoint,
) as (ctx, sent):
assert sent is None
if 'caller' in overrunner:
async with ctx.open_stream() as stream:
for i in range(buf_size):
print(f'sending {i}')
await stream.send(i)
if 'cancel' in overrunner:
# without this we block waiting on the child side
await ctx.cancel()
else:
# expect overrun error to be relayed back
# and this sleep interrupted
await trio.sleep_forever()
else:
# callee overruns caller case so we do nothing here
await trio.sleep_forever()
await portal.cancel_actor()
# 2 overrun cases and the no overrun case (which pushes right up to
# the msg limit)
if overrunner == 'caller' or 'cance' in overrunner:
with pytest.raises(tractor.RemoteActorError) as excinfo:
trio.run(main)
assert excinfo.value.type == StreamOverrun
elif overrunner == 'callee':
with pytest.raises(tractor.RemoteActorError) as excinfo:
trio.run(main)
# TODO: embedded remote errors so that we can verify the source
# error? the callee delivers an error which is an overrun
# wrapped in a remote actor error.
assert excinfo.value.type == tractor.RemoteActorError
else:
trio.run(main)
@tractor.context
async def echo_back_sequence(
ctx: tractor.Context,
seq: list[int],
msg_buffer_size: Optional[int] = None,
) -> None:
'''
Send endlessly on the calleee stream.
'''
await ctx.started()
async with ctx.open_stream(
msg_buffer_size=msg_buffer_size,
) as stream:
seq = list(seq) # bleh, `msgpack`...
count = 0
while count < 3:
batch = []
async for msg in stream:
batch.append(msg)
if batch == seq:
break
for msg in batch:
print(f'callee sending {msg}')
await stream.send(msg)
count += 1
return 'yo'
def test_stream_backpressure():
'''
Demonstrate small overruns of each task back and forth
on a stream not raising any errors by default.
'''
async def main():
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'callee_sends_forever',
enable_modules=[__name__],
)
seq = list(range(3))
async with portal.open_context(
echo_back_sequence,
seq=seq,
msg_buffer_size=1,
) as (ctx, sent):
assert sent is None
async with ctx.open_stream(msg_buffer_size=1) as stream:
count = 0
while count < 3:
for msg in seq:
print(f'caller sending {msg}')
await stream.send(msg)
await trio.sleep(0.1)
batch = []
async for msg in stream:
batch.append(msg)
if batch == seq:
break
count += 1
# here the context should return
assert await ctx.result() == 'yo'
# cancel the daemon
await portal.cancel_actor()
trio.run(main)
@tractor.context
async def sleep_forever(
ctx: tractor.Context,
) -> None:
await ctx.started()
async with ctx.open_stream():
await trio.sleep_forever()
@acm
async def attach_to_sleep_forever():
'''
Cancel a context **before** any underlying error is raised in order
to trigger a local reception of a ``ContextCancelled`` which **should not**
be re-raised in the local surrounding ``Context`` *iff* the cancel was
requested by **this** side of the context.
'''
async with tractor.wait_for_actor('sleeper') as p2:
async with (
p2.open_context(sleep_forever) as (peer_ctx, first),
peer_ctx.open_stream(),
):
try:
yield
finally:
# XXX: previously this would trigger local
# ``ContextCancelled`` to be received and raised in the
# local context overriding any local error due to
# logic inside ``_invoke()`` which checked for
# an error set on ``Context._error`` and raised it in
# under a cancellation scenario.
# The problem is you can have a remote cancellation
# that is part of a local error and we shouldn't raise
# ``ContextCancelled`` **iff** we weren't the side of
# the context to initiate it, i.e.
# ``Context._cancel_called`` should **NOT** have been
# set. The special logic to handle this case is now
# inside ``Context._may_raise_from_remote_msg()`` XD
await peer_ctx.cancel()
@tractor.context
async def error_before_started(
ctx: tractor.Context,
) -> None:
'''
This simulates exactly an original bug discovered in:
https://github.com/pikers/piker/issues/244
'''
async with attach_to_sleep_forever():
# send an unserializable type which should raise a type error
# here and **NOT BE SWALLOWED** by the surrounding acm!!?!
await ctx.started(object())
def test_do_not_swallow_error_before_started_by_remote_contextcancelled():
'''
Verify that an error raised in a remote context which itself opens another
remote context, which it cancels, does not ovverride the original error that
caused the cancellation of the secondardy context.
'''
async def main():
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'errorer',
enable_modules=[__name__],
)
await n.start_actor(
'sleeper',
enable_modules=[__name__],
)
async with (
portal.open_context(
error_before_started
) as (ctx, sent),
):
await trio.sleep_forever()
with pytest.raises(tractor.RemoteActorError) as excinfo:
trio.run(main)
assert excinfo.value.type == TypeError

View File

@ -1,34 +1,18 @@
"""
That "native" debug mode better work!
That native debug better work!
All these tests can be understood (somewhat) by running the equivalent
`examples/debugging/` scripts manually.
TODO:
- none of these tests have been run successfully on windows yet but
there's been manual testing that verified it works.
- wonder if any of it'll work on OS X?
TODO: None of these tests have been run successfully on windows yet.
"""
import itertools
from os import path
from typing import Optional
import platform
import pathlib
import sys
import time
import pytest
import pexpect
from pexpect.exceptions import (
TIMEOUT,
EOF,
)
from conftest import (
examples_dir,
_ci_env,
)
from conftest import repodir
# TODO: The next great debugger audit could be done by you!
# - recurrent entry to breakpoint() from single actor *after* and an
@ -40,52 +24,27 @@ from conftest import (
# - recurrent root errors
if platform.system() == 'Windows':
pytest.skip(
'Debugger tests have no windows support (yet)',
allow_module_level=True,
)
def examples_dir():
"""Return the abspath to the examples directory.
"""
return path.join(repodir(), 'examples', 'debugging/')
def mk_cmd(ex_name: str) -> str:
'''
Generate a command suitable to pass to ``pexpect.spawn()``.
'''
script_path: pathlib.Path = examples_dir() / 'debugging' / f'{ex_name}.py'
return ' '.join(['python', str(script_path)])
# TODO: was trying to this xfail style but some weird bug i see in CI
# that's happening at collect time.. pretty soon gonna dump actions i'm
# thinkin...
# in CI we skip tests which >= depth 1 actor trees due to there
# still being an oustanding issue with relaying the debug-mode-state
# through intermediary parents.
has_nested_actors = pytest.mark.has_nested_actors
# .xfail(
# os.environ.get('CI', False),
# reason=(
# 'This test uses nested actors and fails in CI\n'
# 'The test seems to run fine locally but until we solve the '
# 'following issue this CI test will be xfail:\n'
# 'https://github.com/goodboy/tractor/issues/320'
# )
# )
"""Generate a command suitable to pass to ``pexpect.spawn()``.
"""
return ' '.join(
['python',
path.join(examples_dir(), f'{ex_name}.py')]
)
@pytest.fixture
def spawn(
start_method,
testdir,
arb_addr,
) -> 'pexpect.spawn':
if start_method != 'trio':
pytest.skip(
"Debugger tests are only supported on the trio backend"
)
def _spawn(cmd):
return testdir.spawn(
cmd=mk_cmd(cmd),
@ -95,83 +54,6 @@ def spawn(
return _spawn
PROMPT = r"\(Pdb\+\)"
def expect(
child,
# prompt by default
patt: str = PROMPT,
**kwargs,
) -> None:
'''
Expect wrapper that prints last seen console
data before failing.
'''
try:
child.expect(
patt,
**kwargs,
)
except TIMEOUT:
before = str(child.before.decode())
print(before)
raise
def assert_before(
child,
patts: list[str],
) -> None:
before = str(child.before.decode())
for patt in patts:
try:
assert patt in before
except AssertionError:
print(before)
raise
@pytest.fixture(
params=[False, True],
ids='ctl-c={}'.format,
)
def ctlc(
request,
ci_env: bool,
) -> bool:
use_ctlc = request.param
node = request.node
markers = node.own_markers
for mark in markers:
if mark.name == 'has_nested_actors':
pytest.skip(
f'Test {node} has nested actors and fails with Ctrl-C.\n'
f'The test can sometimes run fine locally but until'
' we solve' 'this issue this CI test will be xfail:\n'
'https://github.com/goodboy/tractor/issues/320'
)
if use_ctlc:
# XXX: disable pygments highlighting for auto-tests
# since some envs (like actions CI) will struggle
# the the added color-char encoding..
from tractor._debug import TractorConfig
TractorConfig.use_pygements = False
yield use_ctlc
@pytest.mark.parametrize(
'user_in_out',
[
@ -181,28 +63,26 @@ def ctlc(
ids=lambda item: f'{item[0]} -> {item[1]}',
)
def test_root_actor_error(spawn, user_in_out):
'''
Demonstrate crash handler entering pdb from basic error in root actor.
'''
"""Demonstrate crash handler entering pdbpp from basic error in root actor.
"""
user_input, expect_err_str = user_in_out
child = spawn('root_actor_error')
# scan for the prompt
expect(child, PROMPT)
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
# make sure expected logging and error arrives
assert "Attaching to pdb in crashed actor: ('root'" in before
assert "Attaching to pdb in crashed actor: ('arbiter'" in before
assert 'AssertionError' in before
# send user command
child.sendline(user_input)
# process should exit
expect(child, EOF)
child.expect(pexpect.EOF)
assert expect_err_str in str(child.before)
@ -220,8 +100,8 @@ def test_root_actor_bp(spawn, user_in_out):
user_input, expect_err_str = user_in_out
child = spawn('root_actor_breakpoint')
# scan for the prompt
child.expect(PROMPT)
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
assert 'Error' not in str(child.before)
@ -238,129 +118,56 @@ def test_root_actor_bp(spawn, user_in_out):
assert expect_err_str in str(child.before)
def do_ctlc(
child,
count: int = 3,
delay: float = 0.1,
patt: Optional[str] = None,
# expect repl UX to reprint the prompt after every
# ctrl-c send.
# XXX: no idea but, in CI this never seems to work even on 3.10 so
# needs some further investigation potentially...
expect_prompt: bool = not _ci_env,
) -> None:
# make sure ctl-c sends don't do anything but repeat output
for _ in range(count):
time.sleep(delay)
child.sendcontrol('c')
# TODO: figure out why this makes CI fail..
# if you run this test manually it works just fine..
if expect_prompt:
before = str(child.before.decode())
time.sleep(delay)
child.expect(PROMPT)
time.sleep(delay)
if patt:
# should see the last line on console
assert patt in before
def test_root_actor_bp_forever(
spawn,
ctlc: bool,
):
def test_root_actor_bp_forever(spawn):
"Re-enter a breakpoint from the root actor-task."
child = spawn('root_actor_breakpoint_forever')
# do some "next" commands to demonstrate recurrent breakpoint
# entries
for _ in range(10):
child.expect(PROMPT)
if ctlc:
do_ctlc(child)
child.sendline('next')
child.expect(r"\(Pdb\+\+\)")
# do one continue which should trigger a
# new task to lock the tty
# do one continue which should trigger a new task to lock the tty
child.sendline('continue')
child.expect(PROMPT)
# seems that if we hit ctrl-c too fast the
# sigint guard machinery might not kick in..
time.sleep(0.001)
if ctlc:
do_ctlc(child)
child.expect(r"\(Pdb\+\+\)")
# XXX: this previously caused a bug!
child.sendline('n')
child.expect(PROMPT)
child.expect(r"\(Pdb\+\+\)")
child.sendline('n')
child.expect(PROMPT)
# quit out of the loop
child.sendline('q')
child.expect(pexpect.EOF)
child.expect(r"\(Pdb\+\+\)")
@pytest.mark.parametrize(
'do_next',
(True, False),
ids='do_next={}'.format,
)
def test_subactor_error(
spawn,
ctlc: bool,
do_next: bool,
):
'''
Single subactor raising an error
def test_subactor_error(spawn):
"Single subactor raising an error"
'''
child = spawn('subactor_error')
# scan for the prompt
child.expect(PROMPT)
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('name_error'" in before
if do_next:
child.sendline('n')
# send user command
# (in this case it's the same for 'continue' vs. 'quit')
child.sendline('continue')
else:
# make sure ctl-c sends don't do anything but repeat output
if ctlc:
do_ctlc(
child,
)
# the debugger should enter a second time in the nursery
# creating actor
# send user command and (in this case it's the same for 'continue'
# vs. 'quit') the debugger should enter a second time in the nursery
# creating actor
child.sendline('continue')
child.expect(r"\(Pdb\+\+\)")
child.expect(PROMPT)
before = str(child.before.decode())
# root actor gets debugger engaged
assert "Attaching to pdb in crashed actor: ('root'" in before
assert "Attaching to pdb in crashed actor: ('arbiter'" in before
# error is a remote error propagated from the subactor
assert "RemoteActorError: ('name_error'" in before
# another round
if ctlc:
do_ctlc(child)
child.sendline('c')
child.expect('\r\n')
@ -368,16 +175,13 @@ def test_subactor_error(
child.expect(pexpect.EOF)
def test_subactor_breakpoint(
spawn,
ctlc: bool,
):
def test_subactor_breakpoint(spawn):
"Single subactor with an infinite breakpoint loop"
child = spawn('subactor_breakpoint')
# scan for the prompt
child.expect(PROMPT)
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
@ -386,34 +190,25 @@ def test_subactor_breakpoint(
# entries
for _ in range(10):
child.sendline('next')
child.expect(PROMPT)
if ctlc:
do_ctlc(child)
child.expect(r"\(Pdb\+\+\)")
# now run some "continues" to show re-entries
for _ in range(5):
child.sendline('continue')
child.expect(PROMPT)
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
if ctlc:
do_ctlc(child)
# finally quit the loop
child.sendline('q')
# child process should exit but parent will capture pdb.BdbQuit
child.expect(PROMPT)
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "RemoteActorError: ('breakpoint_forever'" in before
assert 'bdb.BdbQuit' in before
if ctlc:
do_ctlc(child)
# quit the parent
child.sendline('c')
@ -425,344 +220,106 @@ def test_subactor_breakpoint(
assert 'bdb.BdbQuit' in before
@has_nested_actors
def test_multi_subactors(
spawn,
ctlc: bool,
):
'''
Multiple subactors, both erroring and
breakpointing as well as a nested subactor erroring.
'''
def test_multi_subactors(spawn):
"""Multiple subactors, both erroring and breakpointing as well as
a nested subactor erroring.
"""
child = spawn(r'multi_subactors')
# scan for the prompt
child.expect(PROMPT)
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
if ctlc:
do_ctlc(child)
assert "Attaching pdb to actor: ('bp_forever'" in before
# do some "next" commands to demonstrate recurrent breakpoint
# entries
for _ in range(10):
child.sendline('next')
child.expect(PROMPT)
if ctlc:
do_ctlc(child)
child.expect(r"\(Pdb\+\+\)")
# continue to next error
child.sendline('c')
# first name_error failure
child.expect(PROMPT)
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('name_error'" in before
assert "NameError" in before
if ctlc:
do_ctlc(child)
# continue again
child.sendline('c')
# 2nd name_error failure
child.expect(PROMPT)
# TODO: will we ever get the race where this crash will show up?
# blocklist strat now prevents this crash
# assert_before(child, [
# "Attaching to pdb in crashed actor: ('name_error_1'",
# "NameError",
# ])
if ctlc:
do_ctlc(child)
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "NameError" in before
# breakpoint loop should re-engage
child.sendline('c')
child.expect(PROMPT)
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
if ctlc:
do_ctlc(child)
# wait for spawn error to show up
spawn_err = "Attaching to pdb in crashed actor: ('spawn_error'"
start = time.time()
while (
spawn_err not in before
and (time.time() - start) < 3 # timeout eventually
):
child.sendline('c')
time.sleep(0.1)
child.expect(PROMPT)
before = str(child.before.decode())
if ctlc:
do_ctlc(child)
# 2nd depth nursery should trigger
# (XXX: this below if guard is technically a hack that makes the
# nested case seem to work locally on linux but ideally in the long
# run this can be dropped.)
if not ctlc:
assert_before(child, [
spawn_err,
"RemoteActorError: ('name_error_1'",
])
assert "Attaching pdb to actor: ('bp_forever'" in before
# now run some "continues" to show re-entries
for _ in range(5):
child.sendline('c')
child.expect(PROMPT)
child.expect(r"\(Pdb\+\+\)")
# quit the loop and expect parent to attach
child.sendline('q')
child.expect(PROMPT)
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert_before(child, [
# debugger attaches to root
"Attaching to pdb in crashed actor: ('root'",
# expect a multierror with exceptions for each sub-actor
"RemoteActorError: ('breakpoint_forever'",
"RemoteActorError: ('name_error'",
"RemoteActorError: ('spawn_error'",
"RemoteActorError: ('name_error_1'",
'bdb.BdbQuit',
])
if ctlc:
do_ctlc(child)
assert "Attaching to pdb in crashed actor: ('arbiter'" in before
assert "RemoteActorError: ('bp_forever'" in before
assert 'bdb.BdbQuit' in before
# process should exit
child.sendline('c')
child.expect(pexpect.EOF)
# repeat of previous multierror for final output
assert_before(child, [
"RemoteActorError: ('breakpoint_forever'",
"RemoteActorError: ('name_error'",
"RemoteActorError: ('spawn_error'",
"RemoteActorError: ('name_error_1'",
'bdb.BdbQuit',
])
def test_multi_daemon_subactors(
spawn,
loglevel: str,
ctlc: bool
):
'''
Multiple daemon subactors, both erroring and breakpointing within a
stream.
'''
child = spawn('multi_daemon_subactors')
child.expect(PROMPT)
# there can be a race for which subactor will acquire
# the root's tty lock first so anticipate either crash
# message on the first entry.
bp_forever_msg = "Attaching pdb to actor: ('bp_forever'"
name_error_msg = "NameError: name 'doggypants' is not defined"
before = str(child.before.decode())
if bp_forever_msg in before:
next_msg = name_error_msg
elif name_error_msg in before:
next_msg = bp_forever_msg
else:
raise ValueError("Neither log msg was found !?")
if ctlc:
do_ctlc(child)
# NOTE: previously since we did not have clobber prevention
# in the root actor this final resume could result in the debugger
# tearing down since both child actors would be cancelled and it was
# unlikely that `bp_forever` would re-acquire the tty lock again.
# Now, we should have a final resumption in the root plus a possible
# second entry by `bp_forever`.
child.sendline('c')
child.expect(PROMPT)
assert_before(child, [next_msg])
# XXX: hooray the root clobbering the child here was fixed!
# IMO, this demonstrates the true power of SC system design.
# now the root actor won't clobber the bp_forever child
# during it's first access to the debug lock, but will instead
# wait for the lock to release, by the edge triggered
# ``_debug.Lock.no_remote_has_tty`` event before sending cancel messages
# (via portals) to its underlings B)
# at some point here there should have been some warning msg from
# the root announcing it avoided a clobber of the child's lock, but
# it seems unreliable in testing here to gnab it:
# assert "in use by child ('bp_forever'," in before
if ctlc:
do_ctlc(child)
# expect another breakpoint actor entry
child.sendline('c')
child.expect(PROMPT)
try:
assert_before(child, [bp_forever_msg])
except AssertionError:
assert_before(child, [name_error_msg])
else:
if ctlc:
do_ctlc(child)
# should crash with the 2nd name error (simulates
# a retry) and then the root eventually (boxed) errors
# after 1 or more further bp actor entries.
child.sendline('c')
child.expect(PROMPT)
assert_before(child, [name_error_msg])
# wait for final error in root
# where it crashs with boxed error
while True:
try:
child.sendline('c')
child.expect(PROMPT)
assert_before(
child,
[bp_forever_msg]
)
except AssertionError:
break
assert_before(
child,
[
# boxed error raised in root task
"Attaching to pdb in crashed actor: ('root'",
"_exceptions.RemoteActorError: ('name_error'",
]
)
child.sendline('c')
child.expect(pexpect.EOF)
assert "RemoteActorError: ('bp_forever'" in before
assert 'bdb.BdbQuit' in before
@has_nested_actors
def test_multi_subactors_root_errors(
spawn,
ctlc: bool
):
'''
Multiple subactors, both erroring and breakpointing as well as
def test_multi_subactors_root_errors(spawn):
"""Multiple subactors, both erroring and breakpointing as well as
a nested subactor erroring.
'''
"""
child = spawn('multi_subactor_root_errors')
# scan for the prompt
child.expect(PROMPT)
# scan for the pdbpp prompt
child.expect(r"\(Pdb\+\+\)")
# at most one subactor should attach before the root is cancelled
before = str(child.before.decode())
assert "NameError: name 'doggypants' is not defined" in before
if ctlc:
do_ctlc(child)
# continue again to catch 2nd name error from
# actor 'name_error_1' (which is 2nd depth).
# continue again
child.sendline('c')
child.expect(r"\(Pdb\+\+\)")
# due to block list strat from #337, this will no longer
# propagate before the root errors and cancels the spawner sub-tree.
child.expect(PROMPT)
# only if the blocking condition doesn't kick in fast enough
# should now get attached in root with assert error
before = str(child.before.decode())
if "Debug lock blocked for ['name_error_1'" not in before:
assert_before(child, [
"Attaching to pdb in crashed actor: ('name_error_1'",
"NameError",
])
if ctlc:
do_ctlc(child)
child.sendline('c')
child.expect(PROMPT)
# check if the spawner crashed or was blocked from debug
# and if this intermediary attached check the boxed error
before = str(child.before.decode())
if "Attaching to pdb in crashed actor: ('spawn_error'" in before:
assert_before(child, [
# boxed error from spawner's child
"RemoteActorError: ('name_error_1'",
"NameError",
])
if ctlc:
do_ctlc(child)
child.sendline('c')
child.expect(PROMPT)
# expect a root actor crash
assert_before(child, [
"RemoteActorError: ('name_error'",
"NameError",
# error from root actor and root task that created top level nursery
"Attaching to pdb in crashed actor: ('root'",
"AssertionError",
])
# should have come just after priot prompt
assert "Cancelling nursery in ('spawn_error'," in before
assert "Attaching to pdb in crashed actor: ('arbiter'" in before
assert "AssertionError" in before
# continue again
child.sendline('c')
child.expect(pexpect.EOF)
assert_before(child, [
# "Attaching to pdb in crashed actor: ('root'",
# boxed error from previous step
"RemoteActorError: ('name_error'",
"NameError",
"AssertionError",
'assert 0',
])
before = str(child.before.decode())
assert "AssertionError" in before
@has_nested_actors
def test_multi_nested_subactors_error_through_nurseries(
spawn,
# TODO: address debugger issue for nested tree:
# https://github.com/goodboy/tractor/issues/320
# ctlc: bool,
):
def test_multi_nested_subactors_error_through_nurseries(spawn):
"""Verify deeply nested actors that error trigger debugger entries
at each actor nurserly (level) all the way up the tree.
"""
# NOTE: previously, inside this script was a bug where if the
# NOTE: previously, inside this script was a a bug where if the
# parent errors before a 2-levels-lower actor has released the lock,
# the parent tries to cancel it but it's stuck in the debugger?
# A test (below) has now been added to explicitly verify this is
@ -770,164 +327,40 @@ def test_multi_nested_subactors_error_through_nurseries(
child = spawn('multi_nested_subactors_error_up_through_nurseries')
timed_out_early: bool = False
for _ in range(12):
child.expect(r"\(Pdb\+\+\)")
child.sendline('c')
for send_char in itertools.cycle(['c', 'q']):
try:
child.expect(PROMPT)
child.sendline(send_char)
time.sleep(0.01)
child.expect(pexpect.EOF)
except EOF:
break
assert_before(child, [
# boxed source errors
"NameError: name 'doggypants' is not defined",
"tractor._exceptions.RemoteActorError: ('name_error'",
"bdb.BdbQuit",
# first level subtrees
"tractor._exceptions.RemoteActorError: ('spawner0'",
# "tractor._exceptions.RemoteActorError: ('spawner1'",
# propagation of errors up through nested subtrees
"tractor._exceptions.RemoteActorError: ('spawn_until_0'",
"tractor._exceptions.RemoteActorError: ('spawn_until_1'",
"tractor._exceptions.RemoteActorError: ('spawn_until_2'",
])
before = str(child.before.decode())
assert "NameError" in before
@pytest.mark.timeout(15)
@has_nested_actors
def test_root_nursery_cancels_before_child_releases_tty_lock(
spawn,
start_method,
ctlc: bool,
):
'''
Test that when the root sends a cancel message before a nested child
has unblocked (which can happen when it has the tty lock and is
engaged in pdb) it is indeed cancelled after exiting the debugger.
'''
timed_out_early = False
def test_root_nursery_cancels_before_child_releases_tty_lock(spawn):
"""Test that when the root sends a cancel message before a nested
child has unblocked (which can happen when it has the tty lock and
is engaged in pdb) it is indeed cancelled after exiting the debugger.
"""
child = spawn('root_cancelled_but_child_is_in_tty_lock')
child.expect(PROMPT)
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "NameError: name 'doggypants' is not defined" in before
assert "tractor._exceptions.RemoteActorError: ('name_error'" not in before
time.sleep(0.5)
if ctlc:
do_ctlc(child)
child.sendline('c')
for i in range(4):
time.sleep(0.5)
try:
child.expect(PROMPT)
except (
EOF,
TIMEOUT,
):
# races all over..
print(f"Failed early on {i}?")
before = str(child.before.decode())
timed_out_early = True
# race conditions on how fast the continue is sent?
break
for _ in range(4):
child.expect(r"\(Pdb\+\+\)")
before = str(child.before.decode())
assert "NameError: name 'doggypants' is not defined" in before
if ctlc:
do_ctlc(child)
child.sendline('c')
time.sleep(0.1)
for i in range(3):
try:
child.expect(pexpect.EOF, timeout=0.5)
break
except TIMEOUT:
child.sendline('c')
time.sleep(0.1)
print('child was able to grab tty lock again?')
else:
print('giving up on child releasing, sending `quit` cmd')
child.sendline('q')
expect(child, EOF)
if not timed_out_early:
before = str(child.before.decode())
assert_before(child, [
"tractor._exceptions.RemoteActorError: ('spawner0'",
"tractor._exceptions.RemoteActorError: ('name_error'",
"NameError: name 'doggypants' is not defined",
])
def test_root_cancels_child_context_during_startup(
spawn,
ctlc: bool,
):
'''Verify a fast fail in the root doesn't lock up the child reaping
and all while using the new context api.
'''
child = spawn('fast_error_in_root_after_spawn')
child.expect(PROMPT)
before = str(child.before.decode())
assert "AssertionError" in before
if ctlc:
do_ctlc(child)
child.sendline('c')
child.expect(pexpect.EOF)
def test_different_debug_mode_per_actor(
spawn,
ctlc: bool,
):
child = spawn('per_actor_debug')
child.expect(PROMPT)
# only one actor should enter the debugger
before = str(child.before.decode())
assert "Attaching to pdb in crashed actor: ('debugged_boi'" in before
assert "RuntimeError" in before
if ctlc:
do_ctlc(child)
child.sendline('c')
child.expect(pexpect.EOF)
before = str(child.before.decode())
# NOTE: this debugged actor error currently WON'T show up since the
# root will actually cancel and terminate the nursery before the error
# msg reported back from the debug mode actor is processed.
# assert "tractor._exceptions.RemoteActorError: ('debugged_boi'" in before
assert "tractor._exceptions.RemoteActorError: ('crash_boi'" in before
# the crash boi should not have made a debugger request but
# instead crashed completely
assert "tractor._exceptions.RemoteActorError: ('crash_boi'" in before
assert "RuntimeError" in before
assert "tractor._exceptions.RemoteActorError: ('spawner0'" in before
assert "tractor._exceptions.RemoteActorError: ('name_error'" in before
assert "NameError: name 'doggypants' is not defined" in before

View File

@ -20,11 +20,8 @@ async def test_reg_then_unreg(arb_addr):
assert actor.is_arbiter
assert len(actor._registry) == 1 # only self is registered
async with tractor.open_nursery(
arbiter_addr=arb_addr,
) as n:
portal = await n.start_actor('actor', enable_modules=[__name__])
async with tractor.open_nursery() as n:
portal = await n.start_actor('actor', rpc_module_paths=[__name__])
uid = portal.channel.uid
async with tractor.get_arbiter(*arb_addr) as aportal:
@ -42,7 +39,7 @@ async def test_reg_then_unreg(arb_addr):
await trio.sleep(0.1)
assert uid not in aportal.actor._registry
sockaddrs = actor._registry.get(uid)
sockaddrs = actor._registry[uid]
assert not sockaddrs
@ -69,7 +66,7 @@ async def say_hello_use_wait(other_actor):
@tractor_test
@pytest.mark.parametrize('func', [say_hello, say_hello_use_wait])
async def test_trynamic_trio(func, start_method, arb_addr):
async def test_trynamic_trio(func, start_method):
"""Main tractor entry point, the "master" process (for now
acts as the "director").
"""
@ -77,14 +74,14 @@ async def test_trynamic_trio(func, start_method, arb_addr):
print("Alright... Action!")
donny = await n.run_in_actor(
'donny',
func,
other_actor='gretchen',
name='donny',
)
gretchen = await n.run_in_actor(
'gretchen',
func,
other_actor='donny',
name='gretchen',
)
print(await gretchen.result())
print(await donny.result())
@ -111,23 +108,8 @@ async def cancel(use_signal, delay=0):
async def stream_from(portal):
async with portal.open_stream_from(stream_forever) as stream:
async for value in stream:
print(value)
async def unpack_reg(actor_or_portal):
'''
Get and unpack a "registry" RPC request from the "arbiter" registry
system.
'''
if getattr(actor_or_portal, 'get_registry', None):
msg = await actor_or_portal.get_registry()
else:
msg = await actor_or_portal.run_from_ns('self', 'get_registry')
return {tuple(key.split('.')): val for key, val in msg.items()}
async for value in await portal.result():
print(value)
async def spawn_and_check_registry(
@ -135,77 +117,70 @@ async def spawn_and_check_registry(
use_signal: bool,
remote_arbiter: bool = False,
with_streaming: bool = False,
) -> None:
actor = tractor.current_actor()
async with tractor.open_root_actor(
arbiter_addr=arb_addr,
):
async with tractor.get_arbiter(*arb_addr) as portal:
# runtime needs to be up to call this
actor = tractor.current_actor()
if remote_arbiter:
assert not actor.is_arbiter
if remote_arbiter:
assert not actor.is_arbiter
async with tractor.get_arbiter(*arb_addr) as portal:
if actor.is_arbiter:
async def get_reg():
return actor._registry
extra = 1 # arbiter is local root actor
else:
get_reg = partial(portal.run, 'self', 'get_registry')
extra = 2 # local root actor + remote arbiter
if actor.is_arbiter:
extra = 1 # arbiter is local root actor
get_reg = partial(unpack_reg, actor)
# ensure current actor is registered
registry = await get_reg()
assert actor.uid in registry
else:
get_reg = partial(unpack_reg, portal)
extra = 2 # local root actor + remote arbiter
# ensure current actor is registered
registry = await get_reg()
assert actor.uid in registry
if with_streaming:
to_run = stream_forever
else:
to_run = trio.sleep_forever
async with trio.open_nursery() as trion:
try:
async with tractor.open_nursery() as n:
async with trio.open_nursery() as trion:
portals = {}
for i in range(3):
name = f'a{i}'
portals[name] = await n.run_in_actor(name, to_run)
portals = {}
for i in range(3):
name = f'a{i}'
if with_streaming:
portals[name] = await n.start_actor(
name=name, enable_modules=[__name__])
# wait on last actor to come up
async with tractor.wait_for_actor(name):
registry = await get_reg()
for uid in n._children:
assert uid in registry
else: # no streaming
portals[name] = await n.run_in_actor(
trio.sleep_forever, name=name)
assert len(portals) + extra == len(registry)
# wait on last actor to come up
async with tractor.wait_for_actor(name):
registry = await get_reg()
for uid in n._children:
assert uid in registry
if with_streaming:
await trio.sleep(0.1)
assert len(portals) + extra == len(registry)
pts = list(portals.values())
for p in pts[:-1]:
trion.start_soon(stream_from, p)
if with_streaming:
await trio.sleep(0.1)
# stream for 1 sec
trion.start_soon(cancel, use_signal, 1)
pts = list(portals.values())
for p in pts[:-1]:
trion.start_soon(stream_from, p)
# stream for 1 sec
trion.start_soon(cancel, use_signal, 1)
last_p = pts[-1]
await stream_from(last_p)
else:
await cancel(use_signal)
last_p = pts[-1]
async for value in await last_p.result():
print(value)
else:
await cancel(use_signal)
finally:
await trio.sleep(0.5)
with trio.CancelScope(shield=True):
await trio.sleep(0.5)
# all subactors should have de-registered
registry = await get_reg()
assert len(registry) == extra
assert actor.uid in registry
# all subactors should have de-registered
registry = await get_reg()
assert len(registry) == extra
assert actor.uid in registry
@pytest.mark.parametrize('use_signal', [False, True])
@ -220,7 +195,7 @@ def test_subactors_unregister_on_cancel(
deregistering themselves with the arbiter.
"""
with pytest.raises(KeyboardInterrupt):
trio.run(
tractor.run(
partial(
spawn_and_check_registry,
arb_addr,
@ -228,6 +203,7 @@ def test_subactors_unregister_on_cancel(
remote_arbiter=False,
with_streaming=with_streaming,
),
arbiter_addr=arb_addr
)
@ -245,7 +221,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
tree) arbiter.
"""
with pytest.raises(KeyboardInterrupt):
trio.run(
tractor.run(
partial(
spawn_and_check_registry,
arb_addr,
@ -253,6 +229,8 @@ def test_subactors_unregister_on_cancel_remote_daemon(
remote_arbiter=True,
with_streaming=with_streaming,
),
# XXX: required to use remote daemon!
arbiter_addr=arb_addr
)
@ -274,55 +252,44 @@ async def close_chans_before_nursery(
else:
entries_at_end = 1
async with tractor.open_root_actor(
arbiter_addr=arb_addr,
):
async with tractor.get_arbiter(*arb_addr) as aportal:
try:
get_reg = partial(unpack_reg, aportal)
async with tractor.get_arbiter(*arb_addr) as aportal:
try:
get_reg = partial(aportal.run, 'self', 'get_registry')
async with tractor.open_nursery() as tn:
portal1 = await tn.start_actor(
name='consumer1', enable_modules=[__name__])
portal2 = await tn.start_actor(
'consumer2', enable_modules=[__name__])
async with tractor.open_nursery() as tn:
portal1 = await tn.run_in_actor('consumer1', stream_forever)
agen1 = await portal1.result()
# TODO: compact this back as was in last commit once
# 3.9+, see https://github.com/goodboy/tractor/issues/207
async with portal1.open_stream_from(
stream_forever
) as agen1:
async with portal2.open_stream_from(
stream_forever
) as agen2:
async with trio.open_nursery() as n:
n.start_soon(streamer, agen1)
n.start_soon(cancel, use_signal, .5)
try:
await streamer(agen2)
finally:
# Kill the root nursery thus resulting in
# normal arbiter channel ops to fail during
# teardown. It doesn't seem like this is
# reliably triggered by an external SIGINT.
# tractor.current_actor()._root_nursery.cancel_scope.cancel()
portal2 = await tn.start_actor('consumer2', rpc_module_paths=[__name__])
agen2 = await portal2.run(__name__, 'stream_forever')
# XXX: THIS IS THE KEY THING that
# happens **before** exiting the
# actor nursery block
async with trio.open_nursery() as n:
n.start_soon(streamer, agen1)
n.start_soon(cancel, use_signal, .5)
try:
await streamer(agen2)
finally:
# Kill the root nursery thus resulting in
# normal arbiter channel ops to fail during
# teardown. It doesn't seem like this is
# reliably triggered by an external SIGINT.
# tractor.current_actor()._root_nursery.cancel_scope.cancel()
# also kill off channels cuz why not
await agen1.aclose()
await agen2.aclose()
finally:
with trio.CancelScope(shield=True):
await trio.sleep(1)
# XXX: THIS IS THE KEY THING that happens
# **before** exiting the actor nursery block
# all subactors should have de-registered
registry = await get_reg()
assert portal1.channel.uid not in registry
assert portal2.channel.uid not in registry
assert len(registry) == entries_at_end
# also kill off channels cuz why not
await agen1.aclose()
await agen2.aclose()
finally:
with trio.CancelScope(shield=True):
await trio.sleep(.5)
# all subactors should have de-registered
registry = await get_reg()
assert portal1.channel.uid not in registry
assert portal2.channel.uid not in registry
assert len(registry) == entries_at_end
@pytest.mark.parametrize('use_signal', [False, True])
@ -336,13 +303,15 @@ def test_close_channel_explicit(
results in subactor(s) deregistering from the arbiter.
"""
with pytest.raises(KeyboardInterrupt):
trio.run(
tractor.run(
partial(
close_chans_before_nursery,
arb_addr,
use_signal,
remote_arbiter=False,
),
# XXX: required to use remote daemon!
arbiter_addr=arb_addr
)
@ -358,11 +327,13 @@ def test_close_channel_explicit_remote_arbiter(
results in subactor(s) deregistering from the arbiter.
"""
with pytest.raises(KeyboardInterrupt):
trio.run(
tractor.run(
partial(
close_chans_before_nursery,
arb_addr,
use_signal,
remote_arbiter=True,
),
# XXX: required to use remote daemon!
arbiter_addr=arb_addr
)

View File

@ -1,7 +1,6 @@
'''
"""
Let's make sure them docs work yah?
'''
"""
from contextlib import contextmanager
import itertools
import os
@ -12,17 +11,17 @@ import shutil
import pytest
from conftest import (
examples_dir,
)
from conftest import repodir
def examples_dir():
"""Return the abspath to the examples directory.
"""
return os.path.join(repodir(), 'examples')
@pytest.fixture
def run_example_in_subproc(
loglevel: str,
testdir,
arb_addr: tuple[str, int],
):
def run_example_in_subproc(loglevel, testdir, arb_addr):
@contextmanager
def run(script_code):
@ -32,8 +31,8 @@ def run_example_in_subproc(
# on windows we need to create a special __main__.py which will
# be executed with ``python -m <modulename>`` on windows..
shutil.copyfile(
examples_dir() / '__main__.py',
str(testdir / '__main__.py'),
os.path.join(examples_dir(), '__main__.py'),
os.path.join(str(testdir), '__main__.py')
)
# drop the ``if __name__ == '__main__'`` guard onwards from
@ -62,11 +61,10 @@ def run_example_in_subproc(
str(script_file),
]
# XXX: BE FOREVER WARNED: if you enable lots of tractor logging
# in the subprocess it may cause infinite blocking on the pipes
# due to backpressure!!!
proc = testdir.popen(
cmdargs,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs,
)
assert not proc.returncode
@ -79,19 +77,13 @@ def run_example_in_subproc(
@pytest.mark.parametrize(
'example_script',
# walk yields: (dirpath, dirnames, filenames)
[
(p[0], f) for p in os.walk(examples_dir()) for f in p[2]
if '__' not in f
and f[0] != '_'
and 'debugging' not in p[0]
and 'integration' not in p[0]
and 'advanced_faults' not in p[0]
f for f in os.listdir(examples_dir())
if (
('__' not in f) and
('debugging' not in f)
)
],
ids=lambda t: t[1],
)
def test_example(run_example_in_subproc, example_script):
"""Load and run scripts from this repo's ``examples/`` dir as a user
@ -102,34 +94,18 @@ def test_example(run_example_in_subproc, example_script):
test directory and invoke the script as a module with ``python -m
test_example``.
"""
ex_file = os.path.join(*example_script)
if 'rpc_bidir_streaming' in ex_file and sys.version_info < (3, 9):
pytest.skip("2-way streaming example requires py3.9 async with syntax")
ex_file = os.path.join(examples_dir(), example_script)
with open(ex_file, 'r') as ex:
code = ex.read()
with run_example_in_subproc(code) as proc:
proc.wait()
err, _ = proc.stderr.read(), proc.stdout.read()
# print(f'STDERR: {err}')
# print(f'STDOUT: {out}')
# if we get some gnarly output let's aggregate and raise
if err:
errmsg = err.decode()
errlines = errmsg.splitlines()
last_error = errlines[-1]
if (
'Error' in last_error
# XXX: currently we print this to console, but maybe
# shouldn't eventually once we figure out what's
# a better way to be explicit about aio side
# cancels?
and 'asyncio.exceptions.CancelledError' not in last_error
):
raise Exception(errmsg)
errmsg = err.decode()
errlines = errmsg.splitlines()
if err and 'Error' in errlines[-1]:
raise Exception(errmsg)
assert proc.returncode == 0

View File

@ -1,564 +0,0 @@
'''
The hipster way to force SC onto the stdlib's "async": 'infection mode'.
'''
from typing import Optional, Iterable, Union
import asyncio
import builtins
import itertools
import importlib
from exceptiongroup import BaseExceptionGroup
import pytest
import trio
import tractor
from tractor import (
to_asyncio,
RemoteActorError,
)
from tractor.trionics import BroadcastReceiver
async def sleep_and_err(
sleep_for: float = 0.1,
# just signature placeholders for compat with
# ``to_asyncio.open_channel_from()``
to_trio: Optional[trio.MemorySendChannel] = None,
from_trio: Optional[asyncio.Queue] = None,
):
if to_trio:
to_trio.send_nowait('start')
await asyncio.sleep(sleep_for)
assert 0
async def sleep_forever():
await asyncio.sleep(float('inf'))
async def trio_cancels_single_aio_task():
# spawn an ``asyncio`` task to run a func and return result
with trio.move_on_after(.2):
await tractor.to_asyncio.run_task(sleep_forever)
def test_trio_cancels_aio_on_actor_side(arb_addr):
'''
Spawn an infected actor that is cancelled by the ``trio`` side
task using std cancel scope apis.
'''
async def main():
async with tractor.open_nursery(
arbiter_addr=arb_addr
) as n:
await n.run_in_actor(
trio_cancels_single_aio_task,
infect_asyncio=True,
)
trio.run(main)
async def asyncio_actor(
target: str,
expect_err: Optional[Exception] = None
) -> None:
assert tractor.current_actor().is_infected_aio()
target = globals()[target]
if '.' in expect_err:
modpath, _, name = expect_err.rpartition('.')
mod = importlib.import_module(modpath)
error_type = getattr(mod, name)
else: # toplevel builtin error type
error_type = builtins.__dict__.get(expect_err)
try:
# spawn an ``asyncio`` task to run a func and return result
await tractor.to_asyncio.run_task(target)
except BaseException as err:
if expect_err:
assert isinstance(err, error_type)
raise
def test_aio_simple_error(arb_addr):
'''
Verify a simple remote asyncio error propagates back through trio
to the parent actor.
'''
async def main():
async with tractor.open_nursery(
arbiter_addr=arb_addr
) as n:
await n.run_in_actor(
asyncio_actor,
target='sleep_and_err',
expect_err='AssertionError',
infect_asyncio=True,
)
with pytest.raises(RemoteActorError) as excinfo:
trio.run(main)
err = excinfo.value
assert isinstance(err, RemoteActorError)
assert err.type == AssertionError
def test_tractor_cancels_aio(arb_addr):
'''
Verify we can cancel a spawned asyncio task gracefully.
'''
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
asyncio_actor,
target='sleep_forever',
expect_err='trio.Cancelled',
infect_asyncio=True,
)
# cancel the entire remote runtime
await portal.cancel_actor()
trio.run(main)
def test_trio_cancels_aio(arb_addr):
'''
Much like the above test with ``tractor.Portal.cancel_actor()``
except we just use a standard ``trio`` cancellation api.
'''
async def main():
with trio.move_on_after(1):
# cancel the nursery shortly after boot
async with tractor.open_nursery() as n:
await n.run_in_actor(
asyncio_actor,
target='sleep_forever',
expect_err='trio.Cancelled',
infect_asyncio=True,
)
trio.run(main)
@tractor.context
async def trio_ctx(
ctx: tractor.Context,
):
await ctx.started('start')
# this will block until the ``asyncio`` task sends a "first"
# message.
with trio.fail_after(2):
async with (
trio.open_nursery() as n,
tractor.to_asyncio.open_channel_from(
sleep_and_err,
) as (first, chan),
):
assert first == 'start'
# spawn another asyncio task for the cuck of it.
n.start_soon(
tractor.to_asyncio.run_task,
sleep_forever,
)
await trio.sleep_forever()
@pytest.mark.parametrize(
'parent_cancels', [False, True],
ids='parent_actor_cancels_child={}'.format
)
def test_context_spawns_aio_task_that_errors(
arb_addr,
parent_cancels: bool,
):
'''
Verify that spawning a task via an intertask channel ctx mngr that
errors correctly propagates the error back from the `asyncio`-side
task.
'''
async def main():
with trio.fail_after(2):
async with tractor.open_nursery() as n:
p = await n.start_actor(
'aio_daemon',
enable_modules=[__name__],
infect_asyncio=True,
# debug_mode=True,
loglevel='cancel',
)
async with p.open_context(
trio_ctx,
) as (ctx, first):
assert first == 'start'
if parent_cancels:
await p.cancel_actor()
await trio.sleep_forever()
with pytest.raises(RemoteActorError) as excinfo:
trio.run(main)
err = excinfo.value
assert isinstance(err, RemoteActorError)
if parent_cancels:
assert err.type == trio.Cancelled
else:
assert err.type == AssertionError
async def aio_cancel():
''''
Cancel urself boi.
'''
await asyncio.sleep(0.5)
task = asyncio.current_task()
# cancel and enter sleep
task.cancel()
await sleep_forever()
def test_aio_cancelled_from_aio_causes_trio_cancelled(arb_addr):
async def main():
async with tractor.open_nursery() as n:
await n.run_in_actor(
asyncio_actor,
target='aio_cancel',
expect_err='tractor.to_asyncio.AsyncioCancelled',
infect_asyncio=True,
)
with pytest.raises(RemoteActorError) as excinfo:
trio.run(main)
# ensure boxed error is correct
assert excinfo.value.type == to_asyncio.AsyncioCancelled
# TODO: verify open_channel_from will fail on this..
async def no_to_trio_in_args():
pass
async def push_from_aio_task(
sequence: Iterable,
to_trio: trio.abc.SendChannel,
expect_cancel: False,
fail_early: bool,
) -> None:
try:
# sync caller ctx manager
to_trio.send_nowait(True)
for i in sequence:
print(f'asyncio sending {i}')
to_trio.send_nowait(i)
await asyncio.sleep(0.001)
if i == 50 and fail_early:
raise Exception
print('asyncio streamer complete!')
except asyncio.CancelledError:
if not expect_cancel:
pytest.fail("aio task was cancelled unexpectedly")
raise
else:
if expect_cancel:
pytest.fail("aio task wasn't cancelled as expected!?")
async def stream_from_aio(
exit_early: bool = False,
raise_err: bool = False,
aio_raise_err: bool = False,
fan_out: bool = False,
) -> None:
seq = range(100)
expect = list(seq)
try:
pulled = []
async with to_asyncio.open_channel_from(
push_from_aio_task,
sequence=seq,
expect_cancel=raise_err or exit_early,
fail_early=aio_raise_err,
) as (first, chan):
assert first is True
async def consume(
chan: Union[
to_asyncio.LinkedTaskChannel,
BroadcastReceiver,
],
):
async for value in chan:
print(f'trio received {value}')
pulled.append(value)
if value == 50:
if raise_err:
raise Exception
elif exit_early:
break
if fan_out:
# start second task that get's the same stream value set.
async with (
# NOTE: this has to come first to avoid
# the channel being closed before the nursery
# tasks are joined..
chan.subscribe() as br,
trio.open_nursery() as n,
):
n.start_soon(consume, br)
await consume(chan)
else:
await consume(chan)
finally:
if (
not raise_err and
not exit_early and
not aio_raise_err
):
if fan_out:
# we get double the pulled values in the
# ``.subscribe()`` fan out case.
doubled = list(itertools.chain(*zip(expect, expect)))
expect = doubled[:len(pulled)]
assert list(sorted(pulled)) == expect
else:
assert pulled == expect
else:
assert not fan_out
assert pulled == expect[:51]
print('trio guest mode task completed!')
@pytest.mark.parametrize(
'fan_out', [False, True],
ids='fan_out_w_chan_subscribe={}'.format
)
def test_basic_interloop_channel_stream(arb_addr, fan_out):
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
stream_from_aio,
infect_asyncio=True,
fan_out=fan_out,
)
await portal.result()
trio.run(main)
# TODO: parametrize the above test and avoid the duplication here?
def test_trio_error_cancels_intertask_chan(arb_addr):
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
stream_from_aio,
raise_err=True,
infect_asyncio=True,
)
# should trigger remote actor error
await portal.result()
with pytest.raises(BaseExceptionGroup) as excinfo:
trio.run(main)
# ensure boxed errors
for exc in excinfo.value.exceptions:
assert exc.type == Exception
def test_trio_closes_early_and_channel_exits(arb_addr):
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
stream_from_aio,
exit_early=True,
infect_asyncio=True,
)
# should trigger remote actor error
await portal.result()
# should be a quiet exit on a simple channel exit
trio.run(main)
def test_aio_errors_and_channel_propagates_and_closes(arb_addr):
async def main():
async with tractor.open_nursery() as n:
portal = await n.run_in_actor(
stream_from_aio,
aio_raise_err=True,
infect_asyncio=True,
)
# should trigger remote actor error
await portal.result()
with pytest.raises(BaseExceptionGroup) as excinfo:
trio.run(main)
# ensure boxed errors
for exc in excinfo.value.exceptions:
assert exc.type == Exception
@tractor.context
async def trio_to_aio_echo_server(
ctx: tractor.Context,
):
async def aio_echo_server(
to_trio: trio.MemorySendChannel,
from_trio: asyncio.Queue,
) -> None:
to_trio.send_nowait('start')
while True:
msg = await from_trio.get()
# echo the msg back
to_trio.send_nowait(msg)
# if we get the terminate sentinel
# break the echo loop
if msg is None:
print('breaking aio echo loop')
break
print('exiting asyncio task')
async with to_asyncio.open_channel_from(
aio_echo_server,
) as (first, chan):
assert first == 'start'
await ctx.started(first)
async with ctx.open_stream() as stream:
async for msg in stream:
print(f'asyncio echoing {msg}')
await chan.send(msg)
out = await chan.receive()
# echo back to parent actor-task
await stream.send(out)
if out is None:
try:
out = await chan.receive()
except trio.EndOfChannel:
break
else:
raise RuntimeError('aio channel never stopped?')
@pytest.mark.parametrize(
'raise_error_mid_stream',
[False, Exception, KeyboardInterrupt],
ids='raise_error={}'.format,
)
def test_echoserver_detailed_mechanics(
arb_addr,
raise_error_mid_stream,
):
async def main():
async with tractor.open_nursery() as n:
p = await n.start_actor(
'aio_server',
enable_modules=[__name__],
infect_asyncio=True,
)
async with p.open_context(
trio_to_aio_echo_server,
) as (ctx, first):
assert first == 'start'
async with ctx.open_stream() as stream:
for i in range(100):
await stream.send(i)
out = await stream.receive()
assert i == out
if raise_error_mid_stream and i == 50:
raise raise_error_mid_stream
# send terminate msg
await stream.send(None)
out = await stream.receive()
assert out is None
if out is None:
# ensure the stream is stopped
# with trio.fail_after(0.1):
try:
await stream.receive()
except trio.EndOfChannel:
pass
else:
pytest.fail(
"stream wasn't stopped after sentinel?!")
# TODO: the case where this blocks and
# is cancelled by kbi or out of task cancellation
await p.cancel_actor()
if raise_error_mid_stream:
with pytest.raises(raise_error_mid_stream):
trio.run(main)
else:
trio.run(main)

View File

@ -11,26 +11,32 @@ from conftest import tractor_test
@pytest.mark.trio
async def test_no_runtime():
async def test_no_arbitter():
"""An arbitter must be established before any nurseries
can be created.
(In other words ``tractor.open_root_actor()`` must be engaged at
some point?)
(In other words ``tractor.run`` must be used instead of ``trio.run`` as is
done by the ``pytest-trio`` plugin.)
"""
with pytest.raises(RuntimeError) :
async with tractor.find_actor('doggy'):
with pytest.raises(RuntimeError):
with tractor.open_nursery():
pass
def test_no_main():
"""An async function **must** be passed to ``tractor.run()``.
"""
with pytest.raises(TypeError):
tractor.run(None)
@tractor_test
async def test_self_is_registered(arb_addr):
"Verify waiting on the arbiter to register itself using the standard api."
actor = tractor.current_actor()
assert actor.is_arbiter
with trio.fail_after(0.2):
async with tractor.wait_for_actor('root') as portal:
assert portal.channel.uid[0] == 'root'
async with tractor.wait_for_actor('arbiter') as portal:
assert portal.channel.uid[0] == 'arbiter'
@tractor_test
@ -40,11 +46,8 @@ async def test_self_is_registered_localportal(arb_addr):
assert actor.is_arbiter
async with tractor.get_arbiter(*arb_addr) as portal:
assert isinstance(portal, tractor._portal.LocalPortal)
with trio.fail_after(0.2):
sockaddr = await portal.run_from_ns(
'self', 'wait_for_actor', name='root')
assert sockaddr[0] == arb_addr
sockaddr = await portal.run('self', 'wait_for_actor', name='arbiter')
assert sockaddr[0] == arb_addr
def test_local_actor_async_func(arb_addr):
@ -53,19 +56,15 @@ def test_local_actor_async_func(arb_addr):
nums = []
async def print_loop():
# arbiter is started in-proc if dne
assert tractor.current_actor().is_arbiter
async with tractor.open_root_actor(
arbiter_addr=arb_addr,
):
# arbiter is started in-proc if dne
assert tractor.current_actor().is_arbiter
for i in range(10):
nums.append(i)
await trio.sleep(0.1)
for i in range(10):
nums.append(i)
await trio.sleep(0.1)
start = time.time()
trio.run(print_loop)
tractor.run(print_loop, arbiter_addr=arb_addr)
# ensure the sleeps were actually awaited
assert time.time() - start >= 1

View File

@ -1,11 +1,10 @@
"""
Multiple python programs invoking the runtime.
Multiple python programs invoking ``tractor.run()``
"""
import platform
import time
import pytest
import trio
import tractor
from conftest import (
tractor_test,
@ -46,13 +45,8 @@ async def test_cancel_remote_arbiter(daemon, arb_addr):
def test_register_duplicate_name(daemon, arb_addr):
async def main():
async with tractor.open_nursery(
arbiter_addr=arb_addr,
) as n:
assert not tractor.current_actor().is_arbiter
assert not tractor.current_actor().is_arbiter
async with tractor.open_nursery() as n:
p1 = await n.start_actor('doggy')
p2 = await n.start_actor('doggy')
@ -63,4 +57,4 @@ def test_register_duplicate_name(daemon, arb_addr):
# run it manually since we want to start **after**
# the other "daemon" program
trio.run(main)
tractor.run(main, arbiter_addr=arb_addr)

View File

@ -4,22 +4,20 @@ from itertools import cycle
import pytest
import trio
import tractor
from tractor.experimental import msgpub
from conftest import tractor_test
from tractor.testing import tractor_test
def test_type_checks():
with pytest.raises(TypeError) as err:
@msgpub
@tractor.msg.pub
async def no_get_topics(yo):
yield
assert "must define a `get_topics`" in str(err.value)
with pytest.raises(TypeError) as err:
@msgpub
@tractor.msg.pub
def not_async_gen(yo):
pass
@ -30,27 +28,22 @@ def is_even(i):
return i % 2 == 0
# placeholder for topics getter
_get_topics = None
@msgpub
@tractor.msg.pub
async def pubber(get_topics, seed=10):
# ensure topic subscriptions are as expected
global _get_topics
_get_topics = get_topics
ss = tractor.current_actor().statespace
for i in cycle(range(seed)):
# ensure topic subscriptions are as expected
ss['get_topics'] = get_topics
yield {'even' if is_even(i) else 'odd': i}
await trio.sleep(0.1)
async def subs(
which,
pub_actor_name,
seed=10,
which, pub_actor_name, seed=10,
portal=None,
task_status=trio.TASK_STATUS_IGNORED,
):
if len(which) == 1:
@ -63,49 +56,47 @@ async def subs(
def pred(i):
return isinstance(i, int)
# TODO: https://github.com/goodboy/tractor/issues/207
async with tractor.wait_for_actor(pub_actor_name) as portal:
assert portal
async with portal.open_stream_from(
pubber,
async with tractor.find_actor(pub_actor_name) as portal:
stream = await portal.run(
__name__, 'pubber',
topics=which,
seed=seed,
) as stream:
task_status.started(stream)
times = 10
count = 0
await stream.__anext__()
)
task_status.started(stream)
times = 10
count = 0
await stream.__anext__()
async for pkt in stream:
for topic, value in pkt.items():
assert pred(value)
count += 1
if count >= times:
break
await stream.aclose()
stream = await portal.run(
__name__, 'pubber',
topics=['odd'],
seed=seed,
)
await stream.__anext__()
count = 0
# async with aclosing(stream) as stream:
try:
async for pkt in stream:
for topic, value in pkt.items():
assert pred(value)
pass
# assert pred(value)
count += 1
if count >= times:
break
finally:
await stream.aclose()
async with portal.open_stream_from(
pubber,
topics=['odd'],
seed=seed,
) as stream:
await stream.__anext__()
count = 0
# async with aclosing(stream) as stream:
try:
async for pkt in stream:
for topic, value in pkt.items():
pass
# assert pred(value)
count += 1
if count >= times:
break
finally:
await stream.aclose()
@msgpub(tasks=['one', 'two'])
@tractor.msg.pub(tasks=['one', 'two'])
async def multilock_pubber(get_topics):
yield {'doggy': 10}
@ -133,25 +124,17 @@ async def test_required_args(callwith_expecterror):
await func(**kwargs)
else:
async with tractor.open_nursery() as n:
portal = await n.start_actor(
name='pubber',
enable_modules=[__name__],
)
# await func(**kwargs)
portal = await n.run_in_actor(
'pubber', multilock_pubber, **kwargs)
async with tractor.wait_for_actor('pubber'):
pass
await trio.sleep(0.5)
async with portal.open_stream_from(
multilock_pubber,
**kwargs
) as stream:
async for val in stream:
assert val == {'doggy': 10}
await portal.cancel_actor()
async for val in await portal.result():
assert val == {'doggy': 10}
@pytest.mark.parametrize(
@ -165,49 +148,35 @@ def test_multi_actor_subs_arbiter_pub(
):
"""Try out the neato @pub decorator system.
"""
global _get_topics
async def main():
ss = tractor.current_actor().statespace
async with tractor.open_nursery(
arbiter_addr=arb_addr,
enable_modules=[__name__],
) as n:
async with tractor.open_nursery() as n:
name = 'root'
name = 'arbiter'
if pub_actor == 'streamer':
# start the publisher as a daemon
master_portal = await n.start_actor(
'streamer',
enable_modules=[__name__],
rpc_module_paths=[__name__],
)
name = 'streamer'
even_portal = await n.run_in_actor(
subs,
which=['even'],
name='evens',
pub_actor_name=name
)
'evens', subs, which=['even'], pub_actor_name=name)
odd_portal = await n.run_in_actor(
subs,
which=['odd'],
name='odds',
pub_actor_name=name
)
'odds', subs, which=['odd'], pub_actor_name=name)
async with tractor.wait_for_actor('evens'):
# block until 2nd actor is initialized
pass
if pub_actor == 'arbiter':
# wait for publisher task to be spawned in a local RPC task
while _get_topics is None:
while not ss.get('get_topics'):
await trio.sleep(0.1)
get_topics = _get_topics
get_topics = ss.get('get_topics')
assert 'even' in get_topics()
@ -241,6 +210,7 @@ def test_multi_actor_subs_arbiter_pub(
assert 'even' not in get_topics()
await odd_portal.cancel_actor()
await trio.sleep(2)
if pub_actor == 'arbiter':
while get_topics():
@ -250,7 +220,11 @@ def test_multi_actor_subs_arbiter_pub(
else:
await master_portal.cancel_actor()
trio.run(main)
tractor.run(
main,
arbiter_addr=arb_addr,
rpc_module_paths=[__name__],
)
def test_single_subactor_pub_multitask_subs(
@ -259,14 +233,11 @@ def test_single_subactor_pub_multitask_subs(
):
async def main():
async with tractor.open_nursery(
arbiter_addr=arb_addr,
enable_modules=[__name__],
) as n:
async with tractor.open_nursery() as n:
portal = await n.start_actor(
'streamer',
enable_modules=[__name__],
rpc_module_paths=[__name__],
)
async with tractor.wait_for_actor('streamer'):
# block until 2nd actor is initialized
@ -290,4 +261,8 @@ def test_single_subactor_pub_multitask_subs(
await portal.cancel_actor()
trio.run(main)
tractor.run(
main,
arbiter_addr=arb_addr,
rpc_module_paths=[__name__],
)

View File

@ -1,182 +0,0 @@
'''
Async context manager cache api testing: ``trionics.maybe_open_context():``
'''
from contextlib import asynccontextmanager as acm
import platform
from typing import Awaitable
import pytest
import trio
import tractor
_resource: int = 0
@acm
async def maybe_increment_counter(task_name: str):
global _resource
_resource += 1
await trio.lowlevel.checkpoint()
yield _resource
await trio.lowlevel.checkpoint()
_resource -= 1
@pytest.mark.parametrize(
'key_on',
['key_value', 'kwargs'],
ids="key_on={}".format,
)
def test_resource_only_entered_once(key_on):
global _resource
_resource = 0
kwargs = {}
key = None
if key_on == 'key_value':
key = 'some_common_key'
async def main():
cache_active: bool = False
async def enter_cached_mngr(name: str):
nonlocal cache_active
if key_on == 'kwargs':
# make a common kwargs input to key on it
kwargs = {'task_name': 'same_task_name'}
assert key is None
else:
# different task names per task will be used
kwargs = {'task_name': name}
async with tractor.trionics.maybe_open_context(
maybe_increment_counter,
kwargs=kwargs,
key=key,
) as (cache_hit, resource):
if cache_hit:
try:
cache_active = True
assert resource == 1
await trio.sleep_forever()
finally:
cache_active = False
else:
assert resource == 1
await trio.sleep_forever()
with trio.move_on_after(0.5):
async with (
tractor.open_root_actor(),
trio.open_nursery() as n,
):
for i in range(10):
n.start_soon(enter_cached_mngr, f'task_{i}')
await trio.sleep(0.001)
trio.run(main)
@tractor.context
async def streamer(
ctx: tractor.Context,
seq: list[int] = list(range(1000)),
) -> None:
await ctx.started()
async with ctx.open_stream() as stream:
for val in seq:
await stream.send(val)
await trio.sleep(0.001)
print('producer finished')
@acm
async def open_stream() -> Awaitable[tractor.MsgStream]:
async with tractor.open_nursery() as tn:
portal = await tn.start_actor('streamer', enable_modules=[__name__])
async with (
portal.open_context(streamer) as (ctx, first),
ctx.open_stream() as stream,
):
yield stream
await portal.cancel_actor()
print('CANCELLED STREAMER')
@acm
async def maybe_open_stream(taskname: str):
async with tractor.trionics.maybe_open_context(
# NOTE: all secondary tasks should cache hit on the same key
acm_func=open_stream,
) as (cache_hit, stream):
if cache_hit:
print(f'{taskname} loaded from cache')
# add a new broadcast subscription for the quote stream
# if this feed is already allocated by the first
# task that entereed
async with stream.subscribe() as bstream:
yield bstream
else:
# yield the actual stream
yield stream
def test_open_local_sub_to_stream():
'''
Verify a single inter-actor stream can can be fanned-out shared to
N local tasks using ``trionics.maybe_open_context():``.
'''
timeout = 3 if platform.system() != "Windows" else 10
async def main():
full = list(range(1000))
async def get_sub_and_pull(taskname: str):
async with (
maybe_open_stream(taskname) as stream,
):
if '0' in taskname:
assert isinstance(stream, tractor.MsgStream)
else:
assert isinstance(
stream,
tractor.trionics.BroadcastReceiver
)
first = await stream.receive()
print(f'{taskname} started with value {first}')
seq = []
async for msg in stream:
seq.append(msg)
assert set(seq).issubset(set(full))
print(f'{taskname} finished')
with trio.fail_after(timeout):
# TODO: turns out this isn't multi-task entrant XD
# We probably need an indepotent entry semantic?
async with tractor.open_root_actor():
async with (
trio.open_nursery() as nurse,
):
for i in range(10):
nurse.start_soon(get_sub_and_pull, f'task_{i}')
await trio.sleep(0.001)
print('all consumer tasks finished')
trio.run(main)

View File

@ -53,7 +53,7 @@ def test_rpc_errors(arb_addr, to_call, testdir):
exposed_mods, funcname, inside_err = to_call
subactor_exposed_mods = []
func_defined = globals().get(funcname, False)
subactor_requests_to = 'root'
subactor_requests_to = 'arbiter'
remote_err = tractor.RemoteActorError
# remote module that fails at import time
@ -74,31 +74,29 @@ def test_rpc_errors(arb_addr, to_call, testdir):
remote_err = inside_err
async def main():
actor = tractor.current_actor()
assert actor.is_arbiter
# spawn a subactor which calls us back
async with tractor.open_nursery(
arbiter_addr=arb_addr,
enable_modules=exposed_mods.copy(),
) as n:
actor = tractor.current_actor()
assert actor.is_arbiter
async with tractor.open_nursery() as n:
await n.run_in_actor(
'subactor',
sleep_back_actor,
actor_name=subactor_requests_to,
name='subactor',
# function from the local exposed module space
# the subactor will invoke when it RPCs back to this actor
func_name=funcname,
exposed_mods=exposed_mods,
func_defined=True if func_defined else False,
enable_modules=subactor_exposed_mods,
rpc_module_paths=subactor_exposed_mods,
)
def run():
trio.run(main)
tractor.run(
main,
arbiter_addr=arb_addr,
rpc_module_paths=exposed_mods.copy(),
)
# handle both parameterized cases
if exposed_mods and func_defined:

View File

@ -1,73 +0,0 @@
"""
Verifying internal runtime state and undocumented extras.
"""
import os
import pytest
import trio
import tractor
from conftest import tractor_test
_file_path: str = ''
def unlink_file():
print('Removing tmp file!')
os.remove(_file_path)
async def crash_and_clean_tmpdir(
tmp_file_path: str,
error: bool = True,
):
global _file_path
_file_path = tmp_file_path
actor = tractor.current_actor()
actor.lifetime_stack.callback(unlink_file)
assert os.path.isfile(tmp_file_path)
await trio.sleep(0.1)
if error:
assert 0
else:
actor.cancel_soon()
@pytest.mark.parametrize(
'error_in_child',
[True, False],
)
@tractor_test
async def test_lifetime_stack_wipes_tmpfile(
tmp_path,
error_in_child: bool,
):
child_tmp_file = tmp_path / "child.txt"
child_tmp_file.touch()
assert child_tmp_file.exists()
path = str(child_tmp_file)
try:
with trio.move_on_after(0.5):
async with tractor.open_nursery() as n:
await ( # inlined portal
await n.run_in_actor(
crash_and_clean_tmpdir,
tmp_file_path=path,
error=error_in_child,
)
).result()
except (
tractor.RemoteActorError,
tractor.BaseExceptionGroup,
):
pass
# tmp file should have been wiped by
# teardown stack.
assert not child_tmp_file.exists()

View File

@ -1,71 +1,56 @@
"""
Spawning basics
"""
from typing import Optional
import pytest
import trio
import tractor
from conftest import tractor_test
data_to_pass_down = {'doggy': 10, 'kitty': 4}
statespace = {'doggy': 10, 'kitty': 4}
async def spawn(
is_arbiter: bool,
data: dict,
arb_addr: tuple[str, int],
):
async def spawn(is_arbiter):
namespaces = [__name__]
await trio.sleep(0.1)
actor = tractor.current_actor()
assert actor.is_arbiter == is_arbiter
assert actor.statespace == statespace
async with tractor.open_root_actor(
arbiter_addr=arb_addr,
):
if actor.is_arbiter:
async with tractor.open_nursery() as nursery:
# forks here
portal = await nursery.run_in_actor(
'sub-actor',
spawn,
is_arbiter=False,
statespace=statespace,
rpc_module_paths=namespaces,
)
actor = tractor.current_actor()
assert actor.is_arbiter == is_arbiter
data = data_to_pass_down
if actor.is_arbiter:
async with tractor.open_nursery(
) as nursery:
# forks here
portal = await nursery.run_in_actor(
spawn,
is_arbiter=False,
name='sub-actor',
data=data,
arb_addr=arb_addr,
enable_modules=namespaces,
)
assert len(nursery._children) == 1
assert portal.channel.uid in tractor.current_actor()._peers
# be sure we can still get the result
result = await portal.result()
assert result == 10
return result
else:
return 10
assert len(nursery._children) == 1
assert portal.channel.uid in tractor.current_actor()._peers
# be sure we can still get the result
result = await portal.result()
assert result == 10
return result
else:
return 10
def test_local_arbiter_subactor_global_state(arb_addr):
result = trio.run(
result = tractor.run(
spawn,
True,
data_to_pass_down,
arb_addr,
name='arbiter',
statespace=statespace,
arbiter_addr=arb_addr,
)
assert result == 10
async def movie_theatre_question():
def movie_theatre_question():
"""A question asked in a dark theatre, in a tangent
(errr, I mean different) process.
"""
@ -81,12 +66,12 @@ async def test_movie_theatre_convo(start_method):
portal = await n.start_actor(
'frank',
# enable the actor to run funcs from this current module
enable_modules=[__name__],
rpc_module_paths=[__name__],
)
print(await portal.run(movie_theatre_question))
print(await portal.run(__name__, 'movie_theatre_question'))
# call the subactor a 2nd time
print(await portal.run(movie_theatre_question))
print(await portal.run(__name__, 'movie_theatre_question'))
# the async with will block here indefinitely waiting
# for our actor "frank" to complete, we cancel 'frank'
@ -94,38 +79,21 @@ async def test_movie_theatre_convo(start_method):
await portal.cancel_actor()
async def cellar_door(return_value: Optional[str]):
return return_value
def cellar_door():
return "Dang that's beautiful"
@pytest.mark.parametrize(
'return_value', ["Dang that's beautiful", None],
ids=['return_str', 'return_None'],
)
@tractor_test
async def test_most_beautiful_word(
start_method,
return_value
):
'''
The main ``tractor`` routine.
async def test_most_beautiful_word(start_method):
"""The main ``tractor`` routine.
"""
async with tractor.open_nursery() as n:
'''
with trio.fail_after(1):
async with tractor.open_nursery() as n:
portal = await n.run_in_actor('some_linguist', cellar_door)
portal = await n.run_in_actor(
cellar_door,
return_value=return_value,
name='some_linguist',
)
print(await portal.result())
# The ``async with`` will unblock here since the 'some_linguist'
# actor has completed its main task ``cellar_door``.
# this should pull the cached final result already captured during
# the nursery block exit.
print(await portal.result())
@ -142,27 +110,27 @@ def test_loglevel_propagated_to_subactor(
capfd,
arb_addr,
):
if start_method == 'mp_forkserver':
if start_method == 'forkserver':
pytest.skip(
"a bug with `capfd` seems to make forkserver capture not work?")
level = 'critical'
async def main():
async with tractor.open_nursery(
name='arbiter',
start_method=start_method,
arbiter_addr=arb_addr,
) as tn:
async with tractor.open_nursery() as tn:
await tn.run_in_actor(
'log_checker',
check_loglevel,
loglevel=level,
level=level,
)
trio.run(main)
tractor.run(
main,
name='arbiter',
loglevel=level,
start_method=start_method,
arbiter_addr=arb_addr,
)
# ensure subactor spits log message on stderr
captured = capfd.readouterr()
assert 'yoyoyo' in captured.err

View File

@ -9,8 +9,6 @@ import trio
import tractor
import pytest
from conftest import tractor_test
def test_must_define_ctx():
@ -33,16 +31,13 @@ async def async_gen_stream(sequence):
# block indefinitely waiting to be cancelled by ``aclose()`` call
with trio.CancelScope() as cs:
await trio.sleep_forever()
await trio.sleep(float('inf'))
assert 0
assert cs.cancelled_caught
@tractor.stream
async def context_stream(
ctx: tractor.Context,
sequence
):
async def context_stream(ctx, sequence):
for i in sequence:
await ctx.send_yield(i)
await trio.sleep(0.1)
@ -54,89 +49,70 @@ async def context_stream(
assert cs.cancelled_caught
async def stream_from_single_subactor(
arb_addr,
start_method,
stream_func,
):
async def stream_from_single_subactor(stream_func_name):
"""Verify we can spawn a daemon actor and retrieve streamed data.
"""
# only one per host address, spawns an actor if None
async with tractor.open_nursery(
arbiter_addr=arb_addr,
start_method=start_method,
) as nursery:
async with tractor.find_actor('streamerd') as portals:
if not portals:
async with tractor.find_actor('streamerd') as portals:
if not portals:
# only one per host address, spawns an actor if None
async with tractor.open_nursery() as nursery:
# no brokerd actor found
portal = await nursery.start_actor(
'streamerd',
enable_modules=[__name__],
rpc_module_paths=[__name__],
statespace={'global_dict': {}},
)
seq = range(10)
with trio.fail_after(5):
async with portal.open_stream_from(
stream_func,
sequence=list(seq), # has to be msgpack serializable
) as stream:
# it'd sure be nice to have an asyncitertools here...
iseq = iter(seq)
stream = await portal.run(
__name__,
stream_func_name, # one of the funcs above
sequence=list(seq), # has to be msgpack serializable
)
# it'd sure be nice to have an asyncitertools here...
iseq = iter(seq)
ival = next(iseq)
async for val in stream:
assert val == ival
try:
ival = next(iseq)
except StopIteration:
# should cancel far end task which will be
# caught and no error is raised
await stream.aclose()
async for val in stream:
assert val == ival
try:
ival = next(iseq)
except StopIteration:
# should cancel far end task which will be
# caught and no error is raised
await stream.aclose()
await trio.sleep(0.3)
# ensure EOC signalled-state translates
# XXX: not really sure this is correct,
# shouldn't it be a `ClosedResourceError`?
try:
await stream.__anext__()
except StopAsyncIteration:
# stop all spawned subactors
await portal.cancel_actor()
await trio.sleep(0.3)
try:
await stream.__anext__()
except StopAsyncIteration:
# stop all spawned subactors
await portal.cancel_actor()
# await nursery.cancel()
@pytest.mark.parametrize(
'stream_func', [async_gen_stream, context_stream]
'stream_func', ['async_gen_stream', 'context_stream']
)
def test_stream_from_single_subactor(arb_addr, start_method, stream_func):
"""Verify streaming from a spawned async generator.
"""
trio.run(
tractor.run(
partial(
stream_from_single_subactor,
arb_addr,
start_method,
stream_func=stream_func,
stream_func_name=stream_func,
),
arbiter_addr=arb_addr,
start_method=start_method,
)
# this is the first 2 actors, streamer_1 and streamer_2
async def stream_data(seed):
for i in range(seed):
yield i
# trigger scheduler to simulate practical usage
await trio.sleep(0.0001)
await trio.sleep(0)
# this is the third actor; the aggregator
@ -150,7 +126,7 @@ async def aggregate(seed):
# fork point
portal = await nursery.start_actor(
name=f'streamer_{i}',
enable_modules=[__name__],
rpc_module_paths=[__name__],
)
portals.append(portal)
@ -159,14 +135,11 @@ async def aggregate(seed):
async def push_to_chan(portal, send_chan):
async with send_chan:
async with portal.open_stream_from(
stream_data, seed=seed,
) as stream:
async for value in stream:
# leverage trio's built-in backpressure
await send_chan.send(value)
async for value in await portal.run(
__name__, 'stream_data', seed=seed
):
# leverage trio's built-in backpressure
await send_chan.send(value)
print(f"FINISHED ITERATING {portal.channel.uid}")
@ -204,31 +177,28 @@ async def a_quadruple_example():
seed = int(1e3)
pre_start = time.time()
portal = await nursery.start_actor(
name='aggregator',
enable_modules=[__name__],
portal = await nursery.run_in_actor(
'aggregator',
aggregate,
seed=seed,
)
start = time.time()
# the portal call returns exactly what you'd expect
# as if the remote "aggregate" function was called locally
result_stream = []
async with portal.open_stream_from(aggregate, seed=seed) as stream:
async for value in stream:
result_stream.append(value)
async for value in await portal.result():
result_stream.append(value)
print(f"STREAM TIME = {time.time() - start}")
print(f"STREAM + SPAWN TIME = {time.time() - pre_start}")
assert result_stream == list(range(seed))
await portal.cancel_actor()
return result_stream
async def cancel_after(wait, arb_addr):
async with tractor.open_root_actor(arbiter_addr=arb_addr):
with trio.move_on_after(wait):
return await a_quadruple_example()
async def cancel_after(wait):
with trio.move_on_after(wait):
return await a_quadruple_example()
@pytest.fixture(scope='module')
@ -240,7 +210,7 @@ def time_quad_ex(arb_addr, ci_env, spawn_backend):
timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4
start = time.time()
results = trio.run(cancel_after, timeout, arb_addr)
results = tractor.run(cancel_after, timeout, arbiter_addr=arb_addr)
diff = time.time() - start
assert results
return results, diff
@ -251,7 +221,7 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend):
results, diff = time_quad_ex
assert results
this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 3
this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 2.5
assert diff < this_fast
@ -267,7 +237,7 @@ def test_not_fast_enough_quad(
"""
results, diff = time_quad_ex
delay = max(diff - cancel_delay, 0)
results = trio.run(cancel_after, delay, arb_addr)
results = tractor.run(cancel_after, delay, arbiter_addr=arb_addr)
system = platform.system()
if system in ('Windows', 'Darwin') and results is not None:
# In CI envoirments it seems later runs are quicker then the first
@ -276,77 +246,3 @@ def test_not_fast_enough_quad(
else:
# should be cancelled mid-streaming
assert results is None
@tractor_test
async def test_respawn_consumer_task(
arb_addr,
spawn_backend,
loglevel,
):
"""Verify that ``._portal.ReceiveStream.shield()``
sucessfully protects the underlying IPC channel from being closed
when cancelling and respawning a consumer task.
This also serves to verify that all values from the stream can be
received despite the respawns.
"""
stream = None
async with tractor.open_nursery() as n:
portal = await n.start_actor(
name='streamer',
enable_modules=[__name__]
)
async with portal.open_stream_from(
stream_data,
seed=11,
) as stream:
expect = set(range(11))
received = []
# this is the re-spawn task routine
async def consume(task_status=trio.TASK_STATUS_IGNORED):
print('starting consume task..')
nonlocal stream
with trio.CancelScope() as cs:
task_status.started(cs)
# shield stream's underlying channel from cancellation
# with stream.shield():
async for v in stream:
print(f'from stream: {v}')
expect.remove(v)
received.append(v)
print('exited consume')
async with trio.open_nursery() as ln:
cs = await ln.start(consume)
while True:
await trio.sleep(0.1)
if received[-1] % 2 == 0:
print('cancelling consume task..')
cs.cancel()
# respawn
cs = await ln.start(consume)
if not expect:
print("all values streamed, BREAKING")
break
cs.cancel()
# TODO: this is justification for a
# ``ActorNursery.stream_from_actor()`` helper?
await portal.cancel_actor()

View File

@ -1,514 +0,0 @@
"""
Broadcast channels for fan-out to local tasks.
"""
from contextlib import asynccontextmanager
from functools import partial
from itertools import cycle
import time
from typing import Optional
import pytest
import trio
from trio.lowlevel import current_task
import tractor
from tractor.trionics import (
broadcast_receiver,
Lagged,
)
@tractor.context
async def echo_sequences(
ctx: tractor.Context,
) -> None:
'''Bidir streaming endpoint which will stream
back any sequence it is sent item-wise.
'''
await ctx.started()
async with ctx.open_stream() as stream:
async for sequence in stream:
seq = list(sequence)
for value in seq:
await stream.send(value)
print(f'producer sent {value}')
async def ensure_sequence(
stream: tractor.MsgStream,
sequence: list,
delay: Optional[float] = None,
) -> None:
name = current_task().name
async with stream.subscribe() as bcaster:
assert not isinstance(bcaster, type(stream))
async for value in bcaster:
print(f'{name} rx: {value}')
assert value == sequence[0]
sequence.remove(value)
if delay:
await trio.sleep(delay)
if not sequence:
# fully consumed
break
@asynccontextmanager
async def open_sequence_streamer(
sequence: list[int],
arb_addr: tuple[str, int],
start_method: str,
) -> tractor.MsgStream:
async with tractor.open_nursery(
arbiter_addr=arb_addr,
start_method=start_method,
) as tn:
portal = await tn.start_actor(
'sequence_echoer',
enable_modules=[__name__],
)
async with portal.open_context(
echo_sequences,
) as (ctx, first):
assert first is None
async with ctx.open_stream(backpressure=True) as stream:
yield stream
await portal.cancel_actor()
def test_stream_fan_out_to_local_subscriptions(
arb_addr,
start_method,
):
sequence = list(range(1000))
async def main():
async with open_sequence_streamer(
sequence,
arb_addr,
start_method,
) as stream:
async with trio.open_nursery() as n:
for i in range(10):
n.start_soon(
ensure_sequence,
stream,
sequence.copy(),
name=f'consumer_{i}',
)
await stream.send(tuple(sequence))
async for value in stream:
print(f'source stream rx: {value}')
assert value == sequence[0]
sequence.remove(value)
if not sequence:
# fully consumed
break
trio.run(main)
@pytest.mark.parametrize(
'task_delays',
[
(0.01, 0.001),
(0.001, 0.01),
]
)
def test_consumer_and_parent_maybe_lag(
arb_addr,
start_method,
task_delays,
):
async def main():
sequence = list(range(300))
parent_delay, sub_delay = task_delays
async with open_sequence_streamer(
sequence,
arb_addr,
start_method,
) as stream:
try:
async with trio.open_nursery() as n:
n.start_soon(
ensure_sequence,
stream,
sequence.copy(),
sub_delay,
name='consumer_task',
)
await stream.send(tuple(sequence))
# async for value in stream:
lagged = False
lag_count = 0
while True:
try:
value = await stream.receive()
print(f'source stream rx: {value}')
if lagged:
# re set the sequence starting at our last
# value
sequence = sequence[sequence.index(value) + 1:]
else:
assert value == sequence[0]
sequence.remove(value)
lagged = False
except Lagged:
lagged = True
print(f'source stream lagged after {value}')
lag_count += 1
continue
# lag the parent
await trio.sleep(parent_delay)
if not sequence:
# fully consumed
break
print(f'parent + source stream lagged: {lag_count}')
if parent_delay > sub_delay:
assert lag_count > 0
except Lagged:
# child was lagged
assert parent_delay < sub_delay
trio.run(main)
def test_faster_task_to_recv_is_cancelled_by_slower(
arb_addr,
start_method,
):
'''
Ensure that if a faster task consuming from a stream is cancelled
the slower task can continue to receive all expected values.
'''
async def main():
sequence = list(range(1000))
async with open_sequence_streamer(
sequence,
arb_addr,
start_method,
) as stream:
async with trio.open_nursery() as n:
n.start_soon(
ensure_sequence,
stream,
sequence.copy(),
0,
name='consumer_task',
)
await stream.send(tuple(sequence))
# pull 3 values, cancel the subtask, then
# expect to be able to pull all values still
for i in range(20):
try:
value = await stream.receive()
print(f'source stream rx: {value}')
await trio.sleep(0.01)
except Lagged:
print(f'parent overrun after {value}')
continue
print('cancelling faster subtask')
n.cancel_scope.cancel()
try:
value = await stream.receive()
print(f'source stream after cancel: {value}')
except Lagged:
print(f'parent overrun after {value}')
# expect to see all remaining values
with trio.fail_after(0.5):
async for value in stream:
assert stream._broadcaster._state.recv_ready is None
print(f'source stream rx: {value}')
if value == 999:
# fully consumed and we missed no values once
# the faster subtask was cancelled
break
# await tractor.breakpoint()
# await stream.receive()
print(f'final value: {value}')
trio.run(main)
def test_subscribe_errors_after_close():
async def main():
size = 1
tx, rx = trio.open_memory_channel(size)
async with broadcast_receiver(rx, size) as brx:
pass
try:
# open and close
async with brx.subscribe():
pass
except trio.ClosedResourceError:
assert brx.key not in brx._state.subs
else:
assert 0
trio.run(main)
def test_ensure_slow_consumers_lag_out(
arb_addr,
start_method,
):
'''This is a pure local task test; no tractor
machinery is really required.
'''
async def main():
# make sure it all works within the runtime
async with tractor.open_root_actor():
num_laggers = 4
laggers: dict[str, int] = {}
retries = 3
size = 100
tx, rx = trio.open_memory_channel(size)
brx = broadcast_receiver(rx, size)
async def sub_and_print(
delay: float,
) -> None:
task = current_task()
start = time.time()
async with brx.subscribe() as lbrx:
while True:
print(f'{task.name}: starting consume loop')
try:
async for value in lbrx:
print(f'{task.name}: {value}')
await trio.sleep(delay)
if task.name == 'sub_1':
# trigger checkpoint to clean out other subs
await trio.sleep(0.01)
# the non-lagger got
# a ``trio.EndOfChannel``
# because the ``tx`` below was closed
assert len(lbrx._state.subs) == 1
await lbrx.aclose()
assert len(lbrx._state.subs) == 0
except trio.ClosedResourceError:
# only the fast sub will try to re-enter
# iteration on the now closed bcaster
assert task.name == 'sub_1'
return
except Lagged:
lag_time = time.time() - start
lags = laggers[task.name]
print(
f'restarting slow task {task.name} '
f'that bailed out on {lags}:{value} '
f'after {lag_time:.3f}')
if lags <= retries:
laggers[task.name] += 1
continue
else:
print(
f'{task.name} was too slow and terminated '
f'on {lags}:{value}')
return
async with trio.open_nursery() as nursery:
for i in range(1, num_laggers):
task_name = f'sub_{i}'
laggers[task_name] = 0
nursery.start_soon(
partial(
sub_and_print,
delay=i*0.001,
),
name=task_name,
)
# allow subs to sched
await trio.sleep(0.1)
async with tx:
for i in cycle(range(size)):
await tx.send(i)
if len(brx._state.subs) == 2:
# only one, the non lagger, sub is left
break
# the non-lagger
assert laggers.pop('sub_1') == 0
for n, v in laggers.items():
assert v == 4
assert tx._closed
assert not tx._state.open_send_channels
# check that "first" bcaster that we created
# above, never was iterated and is thus overrun
try:
await brx.receive()
except Lagged:
# expect tokio style index truncation
seq = brx._state.subs[brx.key]
assert seq == len(brx._state.queue) - 1
# all backpressured entries in the underlying
# channel should have been copied into the caster
# queue trailing-window
async for i in rx:
print(f'bped: {i}')
assert i in brx._state.queue
# should be noop
await brx.aclose()
trio.run(main)
def test_first_recver_is_cancelled():
async def main():
# make sure it all works within the runtime
async with tractor.open_root_actor():
tx, rx = trio.open_memory_channel(1)
brx = broadcast_receiver(rx, 1)
cs = trio.CancelScope()
async def sub_and_recv():
with cs:
async with brx.subscribe() as bc:
async for value in bc:
print(value)
async def cancel_and_send():
await trio.sleep(0.2)
cs.cancel()
await tx.send(1)
async with trio.open_nursery() as n:
n.start_soon(sub_and_recv)
await trio.sleep(0.1)
assert brx._state.recv_ready
n.start_soon(cancel_and_send)
# ensure that we don't hang because no-task is now
# waiting on the underlying receive..
with trio.fail_after(0.5):
value = await brx.receive()
print(f'parent: {value}')
assert value == 1
trio.run(main)
def test_no_raise_on_lag():
'''
Run a simple 2-task broadcast where one task is slow but configured
so that it does not raise `Lagged` on overruns using
`raise_on_lasg=False` and verify that the task does not raise.
'''
size = 100
tx, rx = trio.open_memory_channel(size)
brx = broadcast_receiver(rx, size)
async def slow():
async with brx.subscribe(
raise_on_lag=False,
) as br:
async for msg in br:
print(f'slow task got: {msg}')
await trio.sleep(0.1)
async def fast():
async with brx.subscribe() as br:
async for msg in br:
print(f'fast task got: {msg}')
async def main():
async with (
tractor.open_root_actor(
# NOTE: so we see the warning msg emitted by the bcaster
# internals when the no raise flag is set.
loglevel='warning',
),
trio.open_nursery() as n,
):
n.start_soon(slow)
n.start_soon(fast)
for i in range(1000):
await tx.send(i)
# simulate user nailing ctl-c after realizing
# there's a lag in the slow task.
await trio.sleep(1)
raise KeyboardInterrupt
with pytest.raises(KeyboardInterrupt):
trio.run(main)

View File

@ -1,82 +0,0 @@
'''
Reminders for oddities in `trio` that we need to stay aware of and/or
want to see changed.
'''
import pytest
import trio
from trio_typing import TaskStatus
@pytest.mark.parametrize(
'use_start_soon', [
pytest.param(
True,
marks=pytest.mark.xfail(reason="see python-trio/trio#2258")
),
False,
]
)
def test_stashed_child_nursery(use_start_soon):
_child_nursery = None
async def waits_on_signal(
ev: trio.Event(),
task_status: TaskStatus[trio.Nursery] = trio.TASK_STATUS_IGNORED,
):
'''
Do some stuf, then signal other tasks, then yield back to "starter".
'''
await ev.wait()
task_status.started()
async def mk_child_nursery(
task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
):
'''
Allocate a child sub-nursery and stash it as a global.
'''
nonlocal _child_nursery
async with trio.open_nursery() as cn:
_child_nursery = cn
task_status.started(cn)
# block until cancelled by parent.
await trio.sleep_forever()
async def sleep_and_err(
ev: trio.Event,
task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
):
await trio.sleep(0.5)
doggy() # noqa
ev.set()
task_status.started()
async def main():
async with (
trio.open_nursery() as pn,
):
cn = await pn.start(mk_child_nursery)
assert cn
ev = trio.Event()
if use_start_soon:
# this causes inf hang
cn.start_soon(sleep_and_err, ev)
else:
# this does not.
await cn.start(sleep_and_err, ev)
with trio.fail_after(1):
await cn.start(waits_on_signal, ev)
with pytest.raises(NameError):
trio.run(main)

View File

@ -1,86 +1,174 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
tractor: structured concurrent "actors".
tractor: An actor model micro-framework built on
``trio`` and ``multiprocessing``.
"""
from exceptiongroup import BaseExceptionGroup
import importlib
from functools import partial
from typing import Tuple, Any, Optional, List
import typing
from ._clustering import open_actor_cluster
from ._ipc import Channel
from ._streaming import (
Context,
MsgStream,
stream,
context,
)
from ._discovery import (
get_arbiter,
find_actor,
wait_for_actor,
query_actor,
)
from ._supervise import open_nursery
from ._state import (
current_actor,
is_root_process,
)
from ._exceptions import (
RemoteActorError,
ModuleNotExposed,
ContextCancelled,
)
from ._debug import (
breakpoint,
post_mortem,
)
import trio # type: ignore
from trio import MultiError
from . import log
from ._ipc import _connect_chan, Channel
from ._streaming import Context, stream
from ._discovery import get_arbiter, find_actor, wait_for_actor
from ._actor import Actor, _start_actor, Arbiter
from ._trionics import open_nursery
from ._state import current_actor
from . import _state
from ._exceptions import RemoteActorError, ModuleNotExposed
from ._debug import breakpoint, post_mortem
from . import msg
from ._root import (
run_daemon,
open_root_actor,
)
from ._portal import Portal
from ._runtime import Actor
from . import _spawn
__all__ = [
'Actor',
'Channel',
'Context',
'ContextCancelled',
'ModuleNotExposed',
'MsgStream',
'BaseExceptionGroup',
'Portal',
'RemoteActorError',
'breakpoint',
'context',
'post_mortem',
'current_actor',
'find_actor',
'get_arbiter',
'is_root_process',
'msg',
'open_actor_cluster',
'open_nursery',
'open_root_actor',
'post_mortem',
'query_actor',
'run_daemon',
'stream',
'to_asyncio',
'wait_for_actor',
'Channel',
'Context',
'stream',
'MultiError',
'RemoteActorError',
'ModuleNotExposed',
'msg'
]
# set at startup and after forks
_default_arbiter_host = '127.0.0.1'
_default_arbiter_port = 1616
async def _main(
async_fn: typing.Callable[..., typing.Awaitable],
args: Tuple,
arbiter_addr: Tuple[str, int],
name: Optional[str] = None,
start_method: Optional[str] = None,
debug_mode: bool = False,
**kwargs,
) -> typing.Any:
"""Async entry point for ``tractor``.
"""
logger = log.get_logger('tractor')
if start_method is not None:
_spawn.try_set_start_method(start_method)
if debug_mode and _spawn._spawn_method == 'trio':
_state._runtime_vars['_debug_mode'] = True
# expose internal debug module to every actor allowing
# for use of ``await tractor.breakpoint()``
kwargs.setdefault('rpc_module_paths', []).append('tractor._debug')
elif debug_mode:
raise RuntimeError("Debug mode is only supported for the `trio` backend!")
main = partial(async_fn, *args)
arbiter_addr = (host, port) = arbiter_addr or (
_default_arbiter_host,
_default_arbiter_port
)
loglevel = kwargs.get('loglevel', log.get_loglevel())
if loglevel is not None:
log._default_loglevel = loglevel
log.get_console_log(loglevel)
# make a temporary connection to see if an arbiter exists
arbiter_found = False
try:
async with _connect_chan(host, port):
arbiter_found = True
except OSError:
logger.warning(f"No actor could be found @ {host}:{port}")
# create a local actor and start up its main routine/task
if arbiter_found: # we were able to connect to an arbiter
logger.info(f"Arbiter seems to exist @ {host}:{port}")
actor = Actor(
name or 'anonymous',
arbiter_addr=arbiter_addr,
**kwargs
)
host, port = (host, 0)
else:
# start this local actor as the arbiter
actor = Arbiter(
name or 'arbiter', arbiter_addr=arbiter_addr, **kwargs)
# ``Actor._async_main()`` creates an internal nursery if one is not
# provided and thus blocks here until it's main task completes.
# Note that if the current actor is the arbiter it is desirable
# for it to stay up indefinitely until a re-election process has
# taken place - which is not implemented yet FYI).
return await _start_actor(
actor, main, host, port, arbiter_addr=arbiter_addr
)
def run(
async_fn: typing.Callable[..., typing.Awaitable],
*args,
name: Optional[str] = None,
arbiter_addr: Tuple[str, int] = (
_default_arbiter_host,
_default_arbiter_port,
),
# either the `multiprocessing` start method:
# https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
# OR `trio` (the new default).
start_method: Optional[str] = None,
debug_mode: bool = False,
**kwargs,
) -> Any:
"""Run a trio-actor async function in process.
This is tractor's main entry and the start point for any async actor.
"""
# mark top most level process as root actor
_state._runtime_vars['_is_root'] = True
return trio.run(
partial(
# our entry
_main,
# user entry point
async_fn,
args,
# global kwargs
arbiter_addr=arbiter_addr,
name=name,
start_method=start_method,
debug_mode=debug_mode,
**kwargs,
)
)
def run_daemon(
rpc_module_paths: List[str],
**kwargs
) -> None:
"""Spawn daemon actor which will respond to RPC.
This is a convenience wrapper around
``tractor.run(trio.sleep(float('inf')))`` such that the first actor spawned
is meant to run forever responding to RPC requests.
"""
kwargs['rpc_module_paths'] = list(rpc_module_paths)
for path in rpc_module_paths:
importlib.import_module(path)
return run(partial(trio.sleep, float('inf')), **kwargs)

1097
tractor/_actor.py 100644

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,4 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This is the "bootloader" for actors started using the native trio backend.
"""This is the "bootloader" for actors started using the native trio backend.
"""
import sys
import trio
@ -24,7 +6,7 @@ import argparse
from ast import literal_eval
from ._runtime import Actor
from ._actor import Actor
from ._entry import _trio_main
@ -37,15 +19,12 @@ def parse_ipaddr(arg):
return (str(host), int(port))
from ._entry import _trio_main
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--uid", type=parse_uid)
parser.add_argument("--loglevel", type=str)
parser.add_argument("--parent_addr", type=parse_ipaddr)
parser.add_argument("--asyncio", action='store_true')
args = parser.parse_args()
subactor = Actor(
@ -57,6 +36,5 @@ if __name__ == "__main__":
_trio_main(
subactor,
parent_addr=args.parent_addr,
infect_asyncio=args.asyncio,
)
parent_addr=args.parent_addr
)

View File

@ -1,74 +0,0 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Actor cluster helpers.
'''
from __future__ import annotations
from contextlib import asynccontextmanager as acm
from multiprocessing import cpu_count
from typing import AsyncGenerator, Optional
import trio
import tractor
@acm
async def open_actor_cluster(
modules: list[str],
count: int = cpu_count(),
names: list[str] | None = None,
hard_kill: bool = False,
# passed through verbatim to ``open_root_actor()``
**runtime_kwargs,
) -> AsyncGenerator[
dict[str, tractor.Portal],
None,
]:
portals: dict[str, tractor.Portal] = {}
if not names:
names = [f'worker_{i}' for i in range(count)]
if not len(names) == count:
raise ValueError(
'Number of names is {len(names)} but count it {count}')
async with tractor.open_nursery(
**runtime_kwargs,
) as an:
async with trio.open_nursery() as n:
uid = tractor.current_actor().uid
async def _start(name: str) -> None:
name = f'{uid[0]}.{name}'
portals[name] = await an.start_actor(
enable_modules=modules,
name=name,
)
for name in names:
n.start_soon(_start, name)
assert len(portals) == count
yield portals
await an.cancel(hard_kill=hard_kill)

File diff suppressed because it is too large Load Diff

View File

@ -1,29 +1,9 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Actor discovery API.
"""
from typing import (
Optional,
Union,
AsyncGenerator,
)
from contextlib import asynccontextmanager as acm
import typing
from typing import Tuple, Optional, Union
from async_generator import asynccontextmanager
from ._ipc import _connect_chan, Channel
from ._portal import (
@ -34,16 +14,14 @@ from ._portal import (
from ._state import current_actor, _runtime_vars
@acm
@asynccontextmanager
async def get_arbiter(
host: str,
port: int,
) -> AsyncGenerator[Union[Portal, LocalPortal], None]:
'''Return a portal instance connected to a local or remote
) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]:
"""Return a portal instance connected to a local or remote
arbiter.
'''
"""
actor = current_actor()
if not actor:
@ -55,75 +33,38 @@ async def get_arbiter(
yield LocalPortal(actor, Channel((host, port)))
else:
async with _connect_chan(host, port) as chan:
async with open_portal(chan) as arb_portal:
yield arb_portal
@acm
@asynccontextmanager
async def get_root(
**kwargs,
) -> AsyncGenerator[Portal, None]:
**kwargs,
) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]:
host, port = _runtime_vars['_root_mailbox']
assert host is not None
async with _connect_chan(host, port) as chan:
async with open_portal(chan, **kwargs) as portal:
yield portal
@acm
async def query_actor(
@asynccontextmanager
async def find_actor(
name: str,
arbiter_sockaddr: Optional[tuple[str, int]] = None,
arbiter_sockaddr: Tuple[str, int] = None
) -> typing.AsyncGenerator[Optional[Portal], None]:
"""Ask the arbiter to find actor(s) by name.
) -> AsyncGenerator[tuple[str, int], None]:
'''
Simple address lookup for a given actor name.
Returns the (socket) address or ``None``.
'''
Returns a connected portal to the last registered matching actor
known to the arbiter.
"""
actor = current_actor()
async with get_arbiter(
*arbiter_sockaddr or actor._arb_addr
) as arb_portal:
sockaddr = await arb_portal.run_from_ns(
'self',
'find_actor',
name=name,
)
async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal:
sockaddr = await arb_portal.run('self', 'find_actor', name=name)
# TODO: return portals to all available actors - for now just
# the last one that registered
if name == 'arbiter' and actor.is_arbiter:
raise RuntimeError("The current actor is the arbiter")
yield sockaddr if sockaddr else None
@acm
async def find_actor(
name: str,
arbiter_sockaddr: tuple[str, int] | None = None
) -> AsyncGenerator[Optional[Portal], None]:
'''
Ask the arbiter to find actor(s) by name.
Returns a connected portal to the last registered matching actor
known to the arbiter.
'''
async with query_actor(
name=name,
arbiter_sockaddr=arbiter_sockaddr,
) as sockaddr:
if sockaddr:
elif sockaddr:
async with _connect_chan(*sockaddr) as chan:
async with open_portal(chan) as portal:
yield portal
@ -131,27 +72,19 @@ async def find_actor(
yield None
@acm
@asynccontextmanager
async def wait_for_actor(
name: str,
arbiter_sockaddr: tuple[str, int] | None = None
) -> AsyncGenerator[Portal, None]:
arbiter_sockaddr: Tuple[str, int] = None
) -> typing.AsyncGenerator[Portal, None]:
"""Wait on an actor to register with the arbiter.
A portal to the first registered actor is returned.
"""
actor = current_actor()
async with get_arbiter(
*arbiter_sockaddr or actor._arb_addr,
) as arb_portal:
sockaddrs = await arb_portal.run_from_ns(
'self',
'wait_for_actor',
name=name,
)
async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal:
sockaddrs = await arb_portal.run('self', 'wait_for_actor', name=name)
sockaddr = sockaddrs[-1]
async with _connect_chan(*sockaddr) as chan:
async with open_portal(chan) as portal:
yield portal

View File

@ -1,64 +1,29 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Sub-process entry points.
Process entry points.
"""
from __future__ import annotations
from functools import partial
from typing import (
Any,
TYPE_CHECKING,
)
from typing import Tuple, Any
import signal
import trio # type: ignore
from .log import (
get_console_log,
get_logger,
)
from ._actor import Actor
from .log import get_console_log, get_logger
from . import _state
from .to_asyncio import run_as_asyncio_guest
from ._runtime import (
async_main,
Actor,
)
if TYPE_CHECKING:
from ._spawn import SpawnMethodKey
log = get_logger(__name__)
def _mp_main(
actor: Actor, # type: ignore
accept_addr: tuple[str, int],
forkserver_info: tuple[Any, Any, Any, Any, Any],
start_method: SpawnMethodKey,
parent_addr: tuple[str, int] | None = None,
infect_asyncio: bool = False,
actor: 'Actor',
accept_addr: Tuple[str, int],
forkserver_info: Tuple[Any, Any, Any, Any, Any],
start_method: str,
parent_addr: Tuple[str, int] = None,
) -> None:
'''
The routine called *after fork* which invokes a fresh ``trio.run``
'''
"""The routine called *after fork* which invokes a fresh ``trio.run``
"""
actor._forkserver_info = forkserver_info
from ._spawn import try_set_start_method
spawn_ctx = try_set_start_method(start_method)
@ -76,37 +41,29 @@ def _mp_main(
log.debug(f"parent_addr is {parent_addr}")
trio_main = partial(
async_main,
actor,
actor._async_main,
accept_addr,
parent_addr=parent_addr
)
try:
if infect_asyncio:
actor._infected_aio = True
run_as_asyncio_guest(trio_main)
else:
trio.run(trio_main)
trio.run(trio_main)
except KeyboardInterrupt:
pass # handle it the same way trio does?
finally:
log.info(f"Actor {actor.uid} terminated")
log.info(f"Actor {actor.uid} terminated")
def _trio_main(
actor: Actor, # type: ignore
*,
parent_addr: tuple[str, int] | None = None,
infect_asyncio: bool = False,
actor: 'Actor',
parent_addr: Tuple[str, int] = None
) -> None:
'''
Entry point for a `trio_run_in_process` subactor.
"""Entry point for a `trio_run_in_process` subactor.
"""
# Disable sigint handling in children;
# we don't need it thanks to our cancellation machinery.
signal.signal(signal.SIGINT, signal.SIG_IGN)
'''
log.info(f"Started new trio process for {actor.uid}")
# TODO: make a global func to set this or is it too hacky?
# os.environ['PYTHONBREAKPOINT'] = 'tractor._debug.breakpoint'
if actor.loglevel is not None:
log.info(
@ -120,19 +77,13 @@ def _trio_main(
log.debug(f"parent_addr is {parent_addr}")
trio_main = partial(
async_main,
actor,
actor._async_main,
parent_addr=parent_addr
)
try:
if infect_asyncio:
actor._infected_aio = True
run_as_asyncio_guest(trio_main)
else:
trio.run(trio_main)
trio.run(trio_main)
except KeyboardInterrupt:
log.warning(f"Actor {actor.uid} received KBI")
finally:
log.info(f"Actor {actor.uid} terminated")
log.info(f"Actor {actor.uid} terminated")

View File

@ -1,58 +1,35 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Our classy exception set.
"""
from typing import (
Any,
Optional,
Type,
)
import importlib
import builtins
import traceback
import exceptiongroup as eg
import trio
_this_mod = importlib.import_module(__name__)
class ActorFailure(Exception):
"General actor failure"
class RemoteActorError(Exception):
# TODO: local recontruction of remote exception deats
"Remote actor exception bundled locally"
def __init__(
self,
message: str,
suberror_type: Optional[Type[BaseException]] = None,
**msgdata
) -> None:
def __init__(self, message, type_str, **msgdata):
super().__init__(message)
for ns in [builtins, _this_mod, trio]:
try:
self.type = getattr(ns, type_str)
break
except AttributeError:
continue
else:
self.type = Exception
self.type = suberror_type
self.msgdata = msgdata
# TODO: a trio.MultiError.catch like context manager
# for catching underlying remote errors of a particular type
class InternalActorError(RemoteActorError):
"""Remote internal ``tractor`` error indicating
@ -60,14 +37,6 @@ class InternalActorError(RemoteActorError):
"""
class TransportClosed(trio.ClosedResourceError):
"Underlying channel transport was closed prior to use"
class ContextCancelled(RemoteActorError):
"Inter-actor task context cancelled itself on the callee side."
class NoResult(RuntimeError):
"No final result is expected for this actor"
@ -76,102 +45,24 @@ class ModuleNotExposed(ModuleNotFoundError):
"The requested module is not exposed for RPC"
class NoRuntime(RuntimeError):
"The root actor has not been initialized yet"
class StreamOverrun(trio.TooSlowError):
"This stream was overrun by sender"
class AsyncioCancelled(Exception):
'''
Asyncio cancelled translation (non-base) error
for use with the ``to_asyncio`` module
to be raised in the ``trio`` side task
'''
def pack_error(
exc: BaseException,
tb=None,
) -> dict[str, Any]:
def pack_error(exc):
"""Create an "error message" for tranmission over
a channel (aka the wire).
"""
if tb:
tb_str = ''.join(traceback.format_tb(tb))
else:
tb_str = traceback.format_exc()
return {
'error': {
'tb_str': tb_str,
'tb_str': traceback.format_exc(),
'type_str': type(exc).__name__,
}
}
def unpack_error(
msg: dict[str, Any],
chan=None,
err_type=RemoteActorError
) -> Exception:
'''
Unpack an 'error' message from the wire
def unpack_error(msg, chan=None, err_type=RemoteActorError):
"""Unpack an 'error' message from the wire
into a local ``RemoteActorError``.
'''
__tracebackhide__ = True
error = msg['error']
tb_str = error.get('tb_str', '')
message = f"{chan.uid}\n" + tb_str
type_name = error['type_str']
suberror_type: Type[BaseException] = Exception
if type_name == 'ContextCancelled':
err_type = ContextCancelled
suberror_type = trio.Cancelled
else: # try to lookup a suitable local error type
for ns in [
builtins,
_this_mod,
eg,
trio,
]:
try:
suberror_type = getattr(ns, type_name)
break
except AttributeError:
continue
exc = err_type(
message,
suberror_type=suberror_type,
# unpack other fields into error type init
"""
tb_str = msg['error'].get('tb_str', '')
return err_type(
f"{chan.uid}\n" + tb_str,
**msg['error'],
)
return exc
def is_multi_cancelled(exc: BaseException) -> bool:
'''
Predicate to determine if a possible ``eg.BaseExceptionGroup`` contains
only ``trio.Cancelled`` sub-exceptions (and is likely the result of
cancelling a collection of subtasks.
'''
if isinstance(exc, eg.BaseExceptionGroup):
return exc.subgroup(
lambda exc: isinstance(exc, trio.Cancelled)
) is not None
return False

View File

@ -1,19 +1,3 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This is near-copy of the 3.8 stdlib's ``multiprocessing.forkserver.py``
with some hackery to prevent any more then a single forkserver and
@ -22,8 +6,6 @@ semaphore tracker per ``MainProcess``.
.. note:: There is no type hinting in this code base (yet) to remain as
a close as possible to upstream.
"""
# type: ignore
import os
import socket
import signal
@ -138,8 +120,7 @@ class PatchedForkServer(ForkServer):
with socket.socket(socket.AF_UNIX) as listener:
address = connection.arbitrary_address('AF_UNIX')
listener.bind(address)
if not util.is_abstract_socket_namespace(address):
os.chmod(address, 0o600)
os.chmod(address, 0o600)
listener.listen()
# all client processes own the write end of the "alive" pipe;

View File

@ -1,240 +1,84 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Inter-process comms abstractions
"""
from __future__ import annotations
import platform
import struct
import typing
from collections.abc import (
AsyncGenerator,
AsyncIterator,
)
from typing import (
Any,
runtime_checkable,
Optional,
Protocol,
Type,
TypeVar,
)
from typing import Any, Tuple, Optional
from functools import partial
import inspect
from tricycle import BufferedReceiveStream
import msgspec
import msgpack
import trio
from async_generator import asynccontextmanager
from .log import get_logger
from ._exceptions import TransportClosed
log = get_logger(__name__)
log = get_logger('ipc')
# :eyeroll:
try:
import msgpack_numpy
Unpacker = msgpack_numpy.Unpacker
except ImportError:
# just plain ``msgpack`` requires tweaking key settings
Unpacker = partial(msgpack.Unpacker, strict_map_key=False)
_is_windows = platform.system() == 'Windows'
log = get_logger(__name__)
def get_stream_addrs(stream: trio.SocketStream) -> tuple:
# should both be IP sockets
lsockname = stream.socket.getsockname()
rsockname = stream.socket.getpeername()
return (
tuple(lsockname[:2]),
tuple(rsockname[:2]),
)
MsgType = TypeVar("MsgType")
# TODO: consider using a generic def and indexing with our eventual
# msg definition/types?
# - https://docs.python.org/3/library/typing.html#typing.Protocol
# - https://jcristharif.com/msgspec/usage.html#structs
@runtime_checkable
class MsgTransport(Protocol[MsgType]):
stream: trio.SocketStream
drained: list[MsgType]
class MsgpackStream:
"""A ``trio.SocketStream`` delivering ``msgpack`` formatted data.
"""
def __init__(self, stream: trio.SocketStream) -> None:
...
# XXX: should this instead be called `.sendall()`?
async def send(self, msg: MsgType) -> None:
...
async def recv(self) -> MsgType:
...
def __aiter__(self) -> MsgType:
...
def connected(self) -> bool:
...
# defining this sync otherwise it causes a mypy error because it
# can't figure out it's a generator i guess?..?
def drain(self) -> AsyncIterator[dict]:
...
@property
def laddr(self) -> tuple[str, int]:
...
@property
def raddr(self) -> tuple[str, int]:
...
# TODO: not sure why we have to inherit here, but it seems to be an
# issue with ``get_msg_transport()`` returning a ``Type[Protocol]``;
# probably should make a `mypy` issue?
class MsgpackTCPStream(MsgTransport):
'''
A ``trio.SocketStream`` delivering ``msgpack`` formatted data
using the ``msgspec`` codec lib.
'''
def __init__(
self,
stream: trio.SocketStream,
prefix_size: int = 4,
) -> None:
self.stream = stream
assert self.stream.socket
# should both be IP sockets
self._laddr, self._raddr = get_stream_addrs(stream)
lsockname = stream.socket.getsockname()
assert isinstance(lsockname, tuple)
self._laddr = lsockname[:2]
rsockname = stream.socket.getpeername()
assert isinstance(rsockname, tuple)
self._raddr = rsockname[:2]
# create read loop instance
self._agen = self._iter_packets()
self._send_lock = trio.StrictFIFOLock()
# public i guess?
self.drained: list[dict] = []
self.recv_stream = BufferedReceiveStream(transport_stream=stream)
self.prefix_size = prefix_size
# TODO: struct aware messaging coders
self.encode = msgspec.msgpack.Encoder().encode
self.decode = msgspec.msgpack.Decoder().decode # dict[str, Any])
async def _iter_packets(self) -> AsyncGenerator[dict, None]:
'''Yield packets from the underlying stream.
'''
import msgspec # noqa
decodes_failed: int = 0
async def _iter_packets(self) -> typing.AsyncGenerator[dict, None]:
"""Yield packets from the underlying stream.
"""
unpacker = Unpacker(
raw=False,
use_list=False,
)
while True:
try:
header = await self.recv_stream.receive_exactly(4)
data = await self.stream.receive_some(2**10)
log.trace(f"received {data}") # type: ignore
except trio.BrokenResourceError:
log.error(f"Stream connection {self.raddr} broke")
return
except (
ValueError,
ConnectionResetError,
if data == b'':
log.debug(f"Stream connection {self.raddr} was closed")
return
# not sure entirely why we need this but without it we
# seem to be getting racy failures here on
# arbiter/registry name subs..
trio.BrokenResourceError,
):
raise TransportClosed(
f'transport {self} was already closed prior ro read'
)
if header == b'':
raise TransportClosed(
f'transport {self} was already closed prior ro read'
)
size, = struct.unpack("<I", header)
log.transport(f'received header {size}') # type: ignore
msg_bytes = await self.recv_stream.receive_exactly(size)
log.transport(f"received {msg_bytes}") # type: ignore
try:
yield self.decode(msg_bytes)
except (
msgspec.DecodeError,
UnicodeDecodeError,
):
if decodes_failed < 4:
# ignore decoding errors for now and assume they have to
# do with a channel drop - hope that receiving from the
# channel will raise an expected error and bubble up.
try:
msg_str: str | bytes = msg_bytes.decode()
except UnicodeDecodeError:
msg_str = msg_bytes
log.error(
'`msgspec` failed to decode!?\n'
'dumping bytes:\n'
f'{msg_str!r}'
)
decodes_failed += 1
else:
raise
async def send(self, msg: Any) -> None:
async with self._send_lock:
bytes_data: bytes = self.encode(msg)
# supposedly the fastest says,
# https://stackoverflow.com/a/54027962
size: bytes = struct.pack("<I", len(bytes_data))
return await self.stream.send_all(size + bytes_data)
unpacker.feed(data)
for packet in unpacker:
yield packet
@property
def laddr(self) -> tuple[str, int]:
def laddr(self) -> Tuple[Any, ...]:
return self._laddr
@property
def raddr(self) -> tuple[str, int]:
def raddr(self) -> Tuple[Any, ...]:
return self._raddr
# XXX: should this instead be called `.sendall()`?
async def send(self, data: Any) -> None:
async with self._send_lock:
return await self.stream.send_all(
msgpack.dumps(data, use_bin_type=True))
async def recv(self) -> Any:
return await self._agen.asend(None)
async def drain(self) -> AsyncIterator[dict]:
'''
Drain the stream's remaining messages sent from
the far end until the connection is closed by
the peer.
'''
try:
async for msg in self._iter_packets():
self.drained.append(msg)
except TransportClosed:
for msg in self.drained:
yield msg
def __aiter__(self):
return self._agen
@ -242,87 +86,32 @@ class MsgpackTCPStream(MsgTransport):
return self.stream.socket.fileno() != -1
def get_msg_transport(
key: tuple[str, str],
) -> Type[MsgTransport]:
return {
('msgpack', 'tcp'): MsgpackTCPStream,
}[key]
class Channel:
'''
An inter-process channel for communication between (remote) actors.
"""An inter-process channel for communication between (remote) actors.
Wraps a ``MsgStream``: transport + encoding IPC connection.
Currently we only support ``trio.SocketStream`` for transport
(aka TCP) and the ``msgpack`` interchange format via the ``msgspec``
codec libary.
'''
Currently the only supported transport is a ``trio.SocketStream``.
"""
def __init__(
self,
destaddr: Optional[tuple[str, int]],
msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'),
# TODO: optional reconnection support?
# auto_reconnect: bool = False,
# on_reconnect: typing.Callable[..., typing.Awaitable] = None,
destaddr: Optional[Tuple[str, int]] = None,
on_reconnect: typing.Callable[..., typing.Awaitable] = None,
auto_reconnect: bool = False,
stream: trio.SocketStream = None, # expected to be active
) -> None:
# self._recon_seq = on_reconnect
# self._autorecon = auto_reconnect
self._destaddr = destaddr
self._transport_key = msg_transport_type_key
# Either created in ``.connect()`` or passed in by
# user in ``.from_stream()``.
self._stream: Optional[trio.SocketStream] = None
self.msgstream: Optional[MsgTransport] = None
self._recon_seq = on_reconnect
self._autorecon = auto_reconnect
self.msgstream: Optional[MsgpackStream] = MsgpackStream(
stream) if stream else None
if self.msgstream and destaddr:
raise ValueError(
f"A stream was provided with local addr {self.laddr}"
)
self._destaddr = self.msgstream.raddr if self.msgstream else destaddr
# set after handshake - always uid of far end
self.uid: Optional[tuple[str, str]] = None
self.uid: Optional[Tuple[str, str]] = None
# set if far end actor errors internally
self._exc: Optional[Exception] = None
self._agen = self._aiter_recv()
self._exc: Optional[Exception] = None # set if far end actor errors
self._closed: bool = False
# flag set on ``Portal.cancel_actor()`` indicating
# remote (peer) cancellation of the far end actor runtime.
self._cancel_called: bool = False # set on ``Portal.cancel_actor()``
@classmethod
def from_stream(
cls,
stream: trio.SocketStream,
**kwargs,
) -> Channel:
src, dst = get_stream_addrs(stream)
chan = Channel(destaddr=dst, **kwargs)
# set immediately here from provided instance
chan._stream = stream
chan.set_msg_transport(stream)
return chan
def set_msg_transport(
self,
stream: trio.SocketStream,
type_key: Optional[tuple[str, str]] = None,
) -> MsgTransport:
type_key = type_key or self._transport_key
self.msgstream = get_msg_transport(type_key)(stream)
return self.msgstream
def __repr__(self) -> str:
if self.msgstream:
@ -332,65 +121,43 @@ class Channel:
return object.__repr__(self)
@property
def laddr(self) -> Optional[tuple[str, int]]:
def laddr(self) -> Optional[Tuple[Any, ...]]:
return self.msgstream.laddr if self.msgstream else None
@property
def raddr(self) -> Optional[tuple[str, int]]:
def raddr(self) -> Optional[Tuple[Any, ...]]:
return self.msgstream.raddr if self.msgstream else None
async def connect(
self,
destaddr: tuple[Any, ...] | None = None,
self, destaddr: Tuple[Any, ...] = None,
**kwargs
) -> MsgTransport:
) -> trio.SocketStream:
if self.connected():
raise RuntimeError("channel is already connected?")
destaddr = destaddr or self._destaddr
assert isinstance(destaddr, tuple)
stream = await trio.open_tcp_stream(
*destaddr,
**kwargs
)
msgstream = self.set_msg_transport(stream)
log.transport(
f'Opened channel[{type(msgstream)}]: {self.laddr} -> {self.raddr}'
)
return msgstream
stream = await trio.open_tcp_stream(*destaddr, **kwargs)
self.msgstream = MsgpackStream(stream)
return stream
async def send(self, item: Any) -> None:
log.transport(f"send `{item}`") # type: ignore
log.trace(f"send `{item}`") # type: ignore
assert self.msgstream
await self.msgstream.send(item)
async def recv(self) -> Any:
assert self.msgstream
return await self.msgstream.recv()
# try:
# return await self.msgstream.recv()
# except trio.BrokenResourceError:
# if self._autorecon:
# await self._reconnect()
# return await self.recv()
# raise
try:
return await self.msgstream.recv()
except trio.BrokenResourceError:
if self._autorecon:
await self._reconnect()
return await self.recv()
async def aclose(self) -> None:
log.transport(
f'Closing channel to {self.uid} '
f'{self.laddr} -> {self.raddr}'
)
log.debug(f"Closing {self}")
assert self.msgstream
await self.msgstream.stream.aclose()
self._closed = True
async def __aenter__(self):
await self.connect()
@ -402,44 +169,40 @@ class Channel:
def __aiter__(self):
return self._agen
# async def _reconnect(self) -> None:
# """Handle connection failures by polling until a reconnect can be
# established.
# """
# down = False
# while True:
# try:
# with trio.move_on_after(3) as cancel_scope:
# await self.connect()
# cancelled = cancel_scope.cancelled_caught
# if cancelled:
# log.transport(
# "Reconnect timed out after 3 seconds, retrying...")
# continue
# else:
# log.transport("Stream connection re-established!")
# # TODO: run any reconnection sequence
# # on_recon = self._recon_seq
# # if on_recon:
# # await on_recon(self)
# break
# except (OSError, ConnectionRefusedError):
# if not down:
# down = True
# log.transport(
# f"Connection to {self.raddr} went down, waiting"
# " for re-establishment")
# await trio.sleep(1)
async def _reconnect(self) -> None:
"""Handle connection failures by polling until a reconnect can be
established.
"""
down = False
while True:
try:
with trio.move_on_after(3) as cancel_scope:
await self.connect()
cancelled = cancel_scope.cancelled_caught
if cancelled:
log.warning(
"Reconnect timed out after 3 seconds, retrying...")
continue
else:
log.warning("Stream connection re-established!")
# run any reconnection sequence
on_recon = self._recon_seq
if on_recon:
await on_recon(self)
break
except (OSError, ConnectionRefusedError):
if not down:
down = True
log.warning(
f"Connection to {self.raddr} went down, waiting"
" for re-establishment")
await trio.sleep(1)
async def _aiter_recv(
self
) -> AsyncGenerator[Any, None]:
'''
Async iterate items from underlying stream.
'''
) -> typing.AsyncGenerator[Any, None]:
"""Async iterate items from underlying stream.
"""
assert self.msgstream
while True:
try:
@ -451,15 +214,14 @@ class Channel:
# # time is pointless
# await self.msgstream.send(sent)
except trio.BrokenResourceError:
# if not self._autorecon:
raise
if not self._autorecon:
raise
await self.aclose()
# if self._autorecon: # attempt reconnect
# await self._reconnect()
# continue
if self._autorecon: # attempt reconnect
await self._reconnect()
continue
else:
return
def connected(self) -> bool:
return self.msgstream.connected() if self.msgstream else False
@ -469,11 +231,9 @@ class Channel:
async def _connect_chan(
host: str, port: int
) -> typing.AsyncGenerator[Channel, None]:
'''
Create and connect a channel with disconnect on context manager
"""Create and connect a channel with disconnect on context manager
teardown.
'''
"""
chan = Channel((host, port))
await chan.connect()
yield chan

View File

@ -1,39 +1,23 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Helpers pulled mostly verbatim from ``multiprocessing.spawn``
to aid with "fixing up" the ``__main__`` module in subprocesses.
These helpers are needed for any spawing backend that doesn't already
handle this. For example when using ``trio_run_in_process`` it is needed
but obviously not when we're already using ``multiprocessing``.
These helpers are needed for any spawing backend that doesn't already handle this.
For example when using ``trio_run_in_process`` it is needed but obviously not when
we're already using ``multiprocessing``.
"""
import os
import sys
import platform
import types
import runpy
from typing import Dict
ORIGINAL_DIR = os.path.abspath(os.getcwd())
def _mp_figure_out_main() -> dict[str, str]:
def _mp_figure_out_main() -> Dict[str, str]:
"""Taken from ``multiprocessing.spawn.get_preparation_data()``.
Retrieve parent actor `__main__` module data.
@ -83,7 +67,7 @@ def _fixup_main_from_name(mod_name: str) -> None:
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_module(mod_name,
run_name="__mp_main__",
alter_sys=True) # type: ignore
alter_sys=True)
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
@ -111,6 +95,6 @@ def _fixup_main_from_path(main_path: str) -> None:
# old_main_modules.append(current_main)
main_module = types.ModuleType("__mp_main__")
main_content = runpy.run_path(main_path,
run_name="__mp_main__") # type: ignore
run_name="__mp_main__")
main_module.__dict__.update(main_content)
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module

View File

@ -1,145 +1,222 @@
# tractor: structured concurrent "actors".
# Copyright 2018-eternity Tyler Goodlet.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Memory boundary "Portals": an API for structured
concurrency linked tasks running in disparate memory domains.
'''
from __future__ import annotations
"""
Portal api
"""
import importlib
import inspect
from typing import (
Any, Optional,
Callable, AsyncGenerator,
Type,
)
import typing
from typing import Tuple, Any, Dict, Optional, Set
from functools import partial
from dataclasses import dataclass
from pprint import pformat
import warnings
import trio
from async_generator import asynccontextmanager
from .trionics import maybe_open_nursery
from ._state import current_actor
from ._ipc import Channel
from .log import get_logger
from .msg import NamespacePath
from ._exceptions import (
unpack_error,
NoResult,
ContextCancelled,
)
from ._streaming import (
Context,
MsgStream,
)
from ._exceptions import unpack_error, NoResult, RemoteActorError
log = get_logger(__name__)
log = get_logger('tractor')
def _unwrap_msg(
msg: dict[str, Any],
channel: Channel
@asynccontextmanager
async def maybe_open_nursery(
nursery: trio.Nursery = None,
shield: bool = False,
) -> typing.AsyncGenerator[trio.Nursery, Any]:
"""Create a new nursery if None provided.
) -> Any:
__tracebackhide__ = True
try:
return msg['return']
except KeyError:
# internal error should never get here
assert msg.get('cid'), "Received internal error at portal?"
raise unpack_error(msg, channel) from None
Blocks on exit as expected if no input nursery is provided.
"""
if nursery is not None:
yield nursery
else:
async with trio.open_nursery() as nursery:
nursery.cancel_scope.shield = shield
yield nursery
class MessagingError(Exception):
'Some kind of unexpected SC messaging dialog issue'
class StreamReceiveChannel(trio.abc.ReceiveChannel):
"""A wrapper around a ``trio._channel.MemoryReceiveChannel`` with
special behaviour for signalling stream termination across an
inter-actor ``Channel``. This is the type returned to a local task
which invoked a remote streaming function using `Portal.run()`.
Termination rules:
- if the local task signals stop iteration a cancel signal is
relayed to the remote task indicating to stop streaming
- if the remote task signals the end of a stream, raise a
``StopAsyncIteration`` to terminate the local ``async for``
"""
def __init__(
self,
cid: str,
rx_chan: trio.abc.ReceiveChannel,
portal: 'Portal',
) -> None:
self._cid = cid
self._rx_chan = rx_chan
self._portal = portal
# delegate directly to underlying mem channel
def receive_nowait(self):
return self._rx_chan.receive_nowait()
async def receive(self):
try:
msg = await self._rx_chan.receive()
return msg['yield']
except trio.ClosedResourceError:
# when the send is closed we assume the stream has
# terminated and signal this local iterator to stop
await self.aclose()
raise StopAsyncIteration
except trio.Cancelled:
# relay cancels to the remote task
await self.aclose()
raise
except KeyError:
# internal error should never get here
assert msg.get('cid'), (
"Received internal error at portal?")
raise unpack_error(msg, self._portal.channel)
async def aclose(self):
"""Cancel associated remote actor task and local memory channel
on close.
"""
if self._rx_chan._closed:
log.warning(f"{self} is already closed")
return
cid = self._cid
with trio.move_on_after(0.5) as cs:
cs.shield = True
log.warning(
f"Cancelling stream {cid} to "
f"{self._portal.channel.uid}")
# NOTE: we're telling the far end actor to cancel a task
# corresponding to *this actor*. The far end local channel
# instance is passed to `Actor._cancel_task()` implicitly.
await self._portal.run('self', '_cancel_task', cid=cid)
if cs.cancelled_caught:
# XXX: there's no way to know if the remote task was indeed
# cancelled in the case where the connection is broken or
# some other network error occurred.
if not self._portal.channel.connected():
log.warning(
"May have failed to cancel remote task "
f"{cid} for {self._portal.channel.uid}")
with trio.CancelScope(shield=True):
await self._rx_chan.aclose()
def clone(self):
return self
class Portal:
'''
A 'portal' to a(n) (remote) ``Actor``.
"""A 'portal' to a(n) (remote) ``Actor``.
A portal is "opened" (and eventually closed) by one side of an
inter-actor communication context. The side which opens the portal
is equivalent to a "caller" in function parlance and usually is
either the called actor's parent (in process tree hierarchy terms)
or a client interested in scheduling work to be done remotely in a
far process.
The portal api allows the "caller" actor to invoke remote routines
and receive results through an underlying ``tractor.Channel`` as
though the remote (async) function / generator was called locally.
It may be thought of loosely as an RPC api where native Python
function calling semantics are supported transparently; hence it is
like having a "portal" between the seperate actor memory spaces.
'''
# the timeout for a remote cancel request sent to
# a(n) (peer) actor.
cancel_timeout = 0.5
Allows for invoking remote routines and receiving results through an
underlying ``tractor.Channel`` as though the remote (async)
function / generator was invoked locally.
Think of this like a native async IPC API.
"""
def __init__(self, channel: Channel) -> None:
self.channel = channel
# when this is set to a tuple returned from ``_submit()`` then
# it is expected that ``result()`` will be awaited at some point
# during the portal's lifetime
self._result_msg: Optional[dict] = None
# When set to a ``Context`` (when _submit_for_result is called)
# it is expected that ``result()`` will be awaited at some
# point.
self._expect_result: Optional[Context] = None
self._streams: set[MsgStream] = set()
self._result: Optional[Any] = None
# set when _submit_for_result is called
self._expect_result: Optional[
Tuple[str, Any, str, Dict[str, Any]]
] = None
self._streams: Set[StreamReceiveChannel] = set()
self.actor = current_actor()
async def _submit_for_result(
async def _submit(
self,
ns: str,
func: str,
**kwargs
) -> None:
kwargs,
) -> Tuple[str, trio.abc.ReceiveChannel, str, Dict[str, Any]]:
"""Submit a function to be scheduled and run by actor, return the
associated caller id, response queue, response type str,
first message packet as a tuple.
This is an async call.
"""
# ship a function call request to the remote actor
cid, recv_chan = await self.actor.send_cmd(
self.channel, ns, func, kwargs)
# wait on first response msg and handle (this should be
# in an immediate response)
first_msg = await recv_chan.receive()
functype = first_msg.get('functype')
if functype == 'function' or functype == 'asyncfunction':
resp_type = 'return'
elif functype == 'asyncgen':
resp_type = 'yield'
elif 'error' in first_msg:
raise unpack_error(first_msg, self.channel)
else:
raise ValueError(f"{first_msg} is an invalid response packet?")
return cid, recv_chan, resp_type, first_msg
async def _submit_for_result(self, ns: str, func: str, **kwargs) -> None:
assert self._expect_result is None, \
"A pending main result has already been submitted"
self._expect_result = await self._submit(ns, func, kwargs)
self._expect_result = await self.actor.start_remote_task(
self.channel,
ns,
func,
kwargs
async def run(self, ns: str, func: str, **kwargs) -> Any:
"""Submit a remote function to be scheduled and run by actor,
wrap and return its (stream of) result(s).
This is a blocking call and returns either a value from the
remote rpc task or a local async generator instance.
"""
return await self._return_from_resptype(
*(await self._submit(ns, func, kwargs))
)
async def _return_once(
async def _return_from_resptype(
self,
ctx: Context,
cid: str,
recv_chan: trio.abc.ReceiveChannel,
resptype: str,
first_msg: dict
) -> Any:
# TODO: not this needs some serious work and thinking about how
# to make async-generators the fundamental IPC API over channels!
# (think `yield from`, `gen.send()`, and functional reactive stuff)
if resptype == 'yield': # stream response
rchan = StreamReceiveChannel(cid, recv_chan, self)
self._streams.add(rchan)
return rchan
) -> dict[str, Any]:
assert ctx._remote_func_type == 'asyncfunc' # single response
msg = await ctx._recv_chan.receive()
return msg
elif resptype == 'return': # single response
msg = await recv_chan.receive()
try:
return msg['return']
except KeyError:
# internal error should never get here
assert msg.get('cid'), "Received internal error at portal?"
raise unpack_error(msg, self.channel)
else:
raise ValueError(f"Unknown msg response type: {first_msg}")
async def result(self) -> Any:
'''
Return the result(s) from the remote actor's "main" task.
'''
# __tracebackhide__ = True
"""Return the result(s) from the remote actor's "main" task.
"""
# Check for non-rpc errors slapped on the
# channel for which we always raise
exc = self.channel._exc
@ -156,29 +233,28 @@ class Portal:
# expecting a "main" result
assert self._expect_result
if self._result is None:
try:
self._result = await self._return_from_resptype(
*self._expect_result
)
except RemoteActorError as err:
self._result = err
if self._result_msg is None:
self._result_msg = await self._return_once(
self._expect_result
)
# re-raise error on every call
if isinstance(self._result, RemoteActorError):
raise self._result
return _unwrap_msg(self._result_msg, self.channel)
return self._result
async def _cancel_streams(self):
# terminate all locally running async generator
# IPC calls
if self._streams:
log.cancel(
log.warning(
f"Cancelling all streams with {self.channel.uid}")
for stream in self._streams.copy():
try:
await stream.aclose()
except trio.ClosedResourceError:
# don't error the stream having already been closed
# (unless of course at some point down the road we
# won't expect this to always be the case or need to
# detect it for respawning purposes?)
log.debug(f"{stream} was already closed.")
await stream.aclose()
async def aclose(self):
log.debug(f"Closing {self}")
@ -187,407 +263,75 @@ class Portal:
# we'll need to .aclose all those channels here
await self._cancel_streams()
async def cancel_actor(
self,
timeout: float | None = None,
) -> bool:
'''
Cancel the actor on the other end of this portal.
'''
async def cancel_actor(self):
"""Cancel the actor on the other end of this portal.
"""
if not self.channel.connected():
log.cancel("This channel is already closed can't cancel")
log.warning("This portal is already closed can't cancel")
return False
log.cancel(
await self._cancel_streams()
log.warning(
f"Sending actor cancel request to {self.channel.uid} on "
f"{self.channel}")
self.channel._cancel_called = True
try:
# send cancel cmd - might not get response
# XXX: sure would be nice to make this work with a proper shield
with trio.move_on_after(timeout or self.cancel_timeout) as cs:
cs.shield = True
await self.run_from_ns('self', 'cancel')
# with trio.CancelScope() as cancel_scope:
# with trio.CancelScope(shield=True) as cancel_scope:
with trio.move_on_after(0.5) as cancel_scope:
cancel_scope.shield = True
await self.run('self', 'cancel')
return True
if cs.cancelled_caught:
log.cancel(f"May have failed to cancel {self.channel.uid}")
if cancel_scope.cancelled_caught:
log.warning(f"May have failed to cancel {self.channel.uid}")
# if we get here some weird cancellation case happened
return False
except (
trio.ClosedResourceError,
trio.BrokenResourceError,
):
log.cancel(
f"{self.channel} for {self.channel.uid} was already "
"closed or broken?")
except trio.ClosedResourceError:
log.warning(
f"{self.channel} for {self.channel.uid} was already closed?")
return False
async def run_from_ns(
self,
namespace_path: str,
function_name: str,
**kwargs,
) -> Any:
'''
Run a function from a (remote) namespace in a new task on the
far-end actor.
This is a more explitcit way to run tasks in a remote-process
actor using explicit object-path syntax. Hint: this is how
`.run()` works underneath.
Note::
A special namespace `self` can be used to invoke `Actor`
instance methods in the remote runtime. Currently this
should only be used solely for ``tractor`` runtime
internals.
'''
ctx = await self.actor.start_remote_task(
self.channel,
namespace_path,
function_name,
kwargs,
)
ctx._portal = self
msg = await self._return_once(ctx)
return _unwrap_msg(msg, self.channel)
async def run(
self,
func: str,
fn_name: Optional[str] = None,
**kwargs
) -> Any:
'''
Submit a remote function to be scheduled and run by actor, in
a new task, wrap and return its (stream of) result(s).
This is a blocking call and returns either a value from the
remote rpc task or a local async generator instance.
'''
if isinstance(func, str):
warnings.warn(
"`Portal.run(namespace: str, funcname: str)` is now"
"deprecated, pass a function reference directly instead\n"
"If you still want to run a remote function by name use"
"`Portal.run_from_ns()`",
DeprecationWarning,
stacklevel=2,
)
fn_mod_path = func
assert isinstance(fn_name, str)
else: # function reference was passed directly
if (
not inspect.iscoroutinefunction(func) or
(
inspect.iscoroutinefunction(func) and
getattr(func, '_tractor_stream_function', False)
)
):
raise TypeError(
f'{func} must be a non-streaming async function!')
fn_mod_path, fn_name = NamespacePath.from_ref(func).to_tuple()
ctx = await self.actor.start_remote_task(
self.channel,
fn_mod_path,
fn_name,
kwargs,
)
ctx._portal = self
return _unwrap_msg(
await self._return_once(ctx),
self.channel,
)
@asynccontextmanager
async def open_stream_from(
self,
async_gen_func: Callable, # typing: ignore
**kwargs,
) -> AsyncGenerator[MsgStream, None]:
if not inspect.isasyncgenfunction(async_gen_func):
if not (
inspect.iscoroutinefunction(async_gen_func) and
getattr(async_gen_func, '_tractor_stream_function', False)
):
raise TypeError(
f'{async_gen_func} must be an async generator function!')
fn_mod_path, fn_name = NamespacePath.from_ref(
async_gen_func).to_tuple()
ctx = await self.actor.start_remote_task(
self.channel,
fn_mod_path,
fn_name,
kwargs
)
ctx._portal = self
# ensure receive-only stream entrypoint
assert ctx._remote_func_type == 'asyncgen'
try:
# deliver receive only stream
async with MsgStream(
ctx, ctx._recv_chan,
) as rchan:
self._streams.add(rchan)
yield rchan
finally:
# cancel the far end task on consumer close
# NOTE: this is a special case since we assume that if using
# this ``.open_fream_from()`` api, the stream is one a one
# time use and we couple the far end tasks's lifetime to
# the consumer's scope; we don't ever send a `'stop'`
# message right now since there shouldn't be a reason to
# stop and restart the stream, right?
try:
with trio.CancelScope(shield=True):
await ctx.cancel()
except trio.ClosedResourceError:
# if the far end terminates before we send a cancel the
# underlying transport-channel may already be closed.
log.cancel(f'Context {ctx} was already closed?')
# XXX: should this always be done?
# await recv_chan.aclose()
self._streams.remove(rchan)
@asynccontextmanager
async def open_context(
self,
func: Callable,
**kwargs,
) -> AsyncGenerator[tuple[Context, Any], None]:
'''
Open an inter-actor task context.
This is a synchronous API which allows for deterministic
setup/teardown of a remote task. The yielded ``Context`` further
allows for opening bidirectional streams, explicit cancellation
and synchronized final result collection. See ``tractor.Context``.
'''
# conduct target func method structural checks
if not inspect.iscoroutinefunction(func) and (
getattr(func, '_tractor_contex_function', False)
):
raise TypeError(
f'{func} must be an async generator function!')
fn_mod_path, fn_name = NamespacePath.from_ref(func).to_tuple()
ctx = await self.actor.start_remote_task(
self.channel,
fn_mod_path,
fn_name,
kwargs
)
assert ctx._remote_func_type == 'context'
msg = await ctx._recv_chan.receive()
try:
# the "first" value here is delivered by the callee's
# ``Context.started()`` call.
first = msg['started']
ctx._started_called = True
except KeyError:
assert msg.get('cid'), ("Received internal error at context?")
if msg.get('error'):
# raise kerr from unpack_error(msg, self.channel)
raise unpack_error(msg, self.channel) from None
else:
raise MessagingError(
f'Context for {ctx.cid} was expecting a `started` message'
f' but received a non-error msg:\n{pformat(msg)}'
)
_err: Optional[BaseException] = None
ctx._portal = self
uid = self.channel.uid
cid = ctx.cid
etype: Optional[Type[BaseException]] = None
# deliver context instance and .started() msg value in open tuple.
try:
async with trio.open_nursery() as scope_nursery:
ctx._scope_nursery = scope_nursery
# do we need this?
# await trio.lowlevel.checkpoint()
yield ctx, first
except ContextCancelled as err:
_err = err
if not ctx._cancel_called:
# context was cancelled at the far end but was
# not part of this end requesting that cancel
# so raise for the local task to respond and handle.
raise
# if the context was cancelled by client code
# then we don't need to raise since user code
# is expecting this and the block should exit.
else:
log.debug(f'Context {ctx} cancelled gracefully')
except (
BaseException,
# more specifically, we need to handle these but not
# sure it's worth being pedantic:
# Exception,
# trio.Cancelled,
# KeyboardInterrupt,
) as err:
etype = type(err)
# the context cancels itself on any cancel
# causing error.
if ctx.chan.connected():
log.cancel(
'Context cancelled for task, sending cancel request..\n'
f'task:{cid}\n'
f'actor:{uid}'
)
await ctx.cancel()
else:
log.warning(
'IPC connection for context is broken?\n'
f'task:{cid}\n'
f'actor:{uid}'
)
raise
finally:
# in the case where a runtime nursery (due to internal bug)
# or a remote actor transmits an error we want to be
# sure we get the error the underlying feeder mem chan.
# if it's not raised here it *should* be raised from the
# msg loop nursery right?
if ctx.chan.connected():
log.info(
'Waiting on final context-task result for\n'
f'task: {cid}\n'
f'actor: {uid}'
)
result = await ctx.result()
log.runtime(
f'Context {fn_name} returned '
f'value from callee `{result}`'
)
# though it should be impossible for any tasks
# operating *in* this scope to have survived
# we tear down the runtime feeder chan last
# to avoid premature stream clobbers.
if ctx._recv_chan is not None:
# should we encapsulate this in the context api?
await ctx._recv_chan.aclose()
if etype:
if ctx._cancel_called:
log.cancel(
f'Context {fn_name} cancelled by caller with\n{etype}'
)
elif _err is not None:
log.cancel(
f'Context for task cancelled by callee with {etype}\n'
f'target: `{fn_name}`\n'
f'task:{cid}\n'
f'actor:{uid}'
)
# XXX: (MEGA IMPORTANT) if this is a root opened process we
# wait for any immediate child in debug before popping the
# context from the runtime msg loop otherwise inside
# ``Actor._push_result()`` the msg will be discarded and in
# the case where that msg is global debugger unlock (via
# a "stop" msg for a stream), this can result in a deadlock
# where the root is waiting on the lock to clear but the
# child has already cleared it and clobbered IPC.
from ._debug import maybe_wait_for_debugger
await maybe_wait_for_debugger()
# remove the context from runtime tracking
self.actor._contexts.pop(
(self.channel.uid, ctx.cid),
None,
)
@dataclass
class LocalPortal:
'''
A 'portal' to a local ``Actor``.
"""A 'portal' to a local ``Actor``.
A compatibility shim for normal portals but for invoking functions
using an in process actor instance.
'''
actor: 'Actor' # type: ignore # noqa
"""
actor: 'Actor' # type: ignore
channel: Channel
async def run_from_ns(self, ns: str, func_name: str, **kwargs) -> Any:
'''
Run a requested local function from a namespace path and
return it's result.
'''
async def run(self, ns: str, func_name: str, **kwargs) -> Any:
"""Run a requested function locally and return it's result.
"""
obj = self.actor if ns == 'self' else importlib.import_module(ns)
func = getattr(obj, func_name)
return await func(**kwargs)
if inspect.iscoroutinefunction(func):
return await func(**kwargs)
else:
return func(**kwargs)
@asynccontextmanager
async def open_portal(
channel: Channel,
nursery: Optional[trio.Nursery] = None,
start_msg_loop: bool = True,
shield: bool = False,
) -> typing.AsyncGenerator[Portal, None]:
"""Open a ``Portal`` through the provided ``channel``.
) -> AsyncGenerator[Portal, None]:
'''
Open a ``Portal`` through the provided ``channel``.
Spawns a background task to handle message processing (normally
done by the actor-runtime implicitly).
'''
Spawns a background task to handle message processing.
"""
actor = current_actor()
assert actor
was_connected = False
async with maybe_open_nursery(nursery, shield=shield) as nursery:
if not channel.connected():
await channel.connect()
was_connected = True
@ -597,11 +341,9 @@ async def open_portal(
msg_loop_cs: Optional[trio.CancelScope] = None
if start_msg_loop:
from ._runtime import process_messages
msg_loop_cs = await nursery.start(
partial(
process_messages,
actor,
actor._process_messages,
channel,
# if the local task is cancelled we want to keep
# the msg loop running until our block ends
@ -615,9 +357,8 @@ async def open_portal(
await portal.aclose()
if was_connected:
# gracefully signal remote channel-msg loop
# cancel remote channel-msg loop
await channel.send(None)
# await channel.aclose()
# cancel background msg loop task
if msg_loop_cs:

Some files were not shown because too many files have changed in this diff Show More