Compare commits
58 Commits
Author | SHA1 | Date |
---|---|---|
|
fd7a2d378a | |
|
4bd583786a | |
|
06ad9c10b6 | |
|
8d7eacdc02 | |
|
c2f6c39f8f | |
|
a602de02a9 | |
|
df3ceffc77 | |
|
94fd6e5857 | |
|
589d16dd95 | |
|
aace4eae5f | |
|
62a81a7e73 | |
|
bd71e49f89 | |
|
340d1f6182 | |
|
3483ed4e6f | |
|
2f5dc0783f | |
|
a06e9d2a9e | |
|
3b3abe101c | |
|
018e138461 | |
|
84358e7443 | |
|
34234fb4fc | |
|
ea6c2504c5 | |
|
b1f13a7002 | |
|
8b19c9ff6e | |
|
5f1efd9eae | |
|
bd189f75cc | |
|
01208739ff | |
|
6f19fa3107 | |
|
a1603709ab | |
|
78b4eef7ee | |
|
211fb07074 | |
|
ae45b5ff1d | |
|
c542b915d6 | |
|
6bd16749f0 | |
|
8f468a8c86 | |
|
3fa36f64ac | |
|
be39ff38e4 | |
|
9cd5d2d7b9 | |
|
4601c88574 | |
|
a1488a1773 | |
|
e058506a00 | |
|
19a23fefa9 | |
|
40ad00ce02 | |
|
b3caf846fc | |
|
40cb3585c1 | |
|
88dbaff11b | |
|
3e34f0a374 | |
|
9e7bed646d | |
|
0b73a4b61e | |
|
eb237f24cd | |
|
83f1e79fdd | |
|
1192541623 | |
|
15b63b7190 | |
|
c4d5f9d41e | |
|
b7089bb4e0 | |
|
ecb9655519 | |
|
f98860a5e5 | |
|
8c8a236799 | |
|
38ccbd0a9c |
|
@ -1,131 +1,41 @@
|
|||
name: CI
|
||||
|
||||
on:
|
||||
# any time someone pushes a new branch to origin
|
||||
push:
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
on: push
|
||||
|
||||
jobs:
|
||||
|
||||
mypy:
|
||||
name: 'MyPy'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
python-version: '3.8'
|
||||
- name: Install dependencies
|
||||
run: pip install -U . --upgrade-strategy eager -r requirements-test.txt
|
||||
|
||||
run: pip install -U . --upgrade-strategy eager
|
||||
- name: Run MyPy check
|
||||
run: mypy tractor/ --ignore-missing-imports --show-traceback
|
||||
run: mypy tractor/ --ignore-missing-imports
|
||||
|
||||
# test that we can generate a software distribution and install it
|
||||
# thus avoid missing file issues after packaging.
|
||||
sdist-linux:
|
||||
name: 'sdist'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Build sdist
|
||||
run: python setup.py sdist --formats=zip
|
||||
|
||||
- name: Install sdist from .zips
|
||||
run: python -m pip install dist/*.zip
|
||||
|
||||
|
||||
testing-linux:
|
||||
testing:
|
||||
name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}'
|
||||
timeout-minutes: 10
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python: ['3.11']
|
||||
spawn_backend: [
|
||||
'trio',
|
||||
'mp_spawn',
|
||||
'mp_forkserver',
|
||||
]
|
||||
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
python: ['3.7', '3.8', '3.9']
|
||||
spawn_backend: ['trio', 'mp']
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup python
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '${{ matrix.python }}'
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager
|
||||
|
||||
- name: List dependencies
|
||||
run: pip list
|
||||
|
||||
- name: Run tests
|
||||
run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx
|
||||
|
||||
# We skip 3.10 on windows for now due to not having any collabs to
|
||||
# debug the CI failures. Anyone wanting to hack and solve them is very
|
||||
# welcome, but our primary user base is not using that OS.
|
||||
|
||||
# TODO: use job filtering to accomplish instead of repeated
|
||||
# boilerplate as is above XD:
|
||||
# - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows
|
||||
# - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix
|
||||
# - https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idif
|
||||
# testing-windows:
|
||||
# name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}'
|
||||
# timeout-minutes: 12
|
||||
# runs-on: ${{ matrix.os }}
|
||||
|
||||
# strategy:
|
||||
# fail-fast: false
|
||||
# matrix:
|
||||
# os: [windows-latest]
|
||||
# python: ['3.10']
|
||||
# spawn_backend: ['trio', 'mp']
|
||||
|
||||
# steps:
|
||||
|
||||
# - name: Checkout
|
||||
# uses: actions/checkout@v2
|
||||
|
||||
# - name: Setup python
|
||||
# uses: actions/setup-python@v2
|
||||
# with:
|
||||
# python-version: '${{ matrix.python }}'
|
||||
|
||||
# - name: Install dependencies
|
||||
# run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager
|
||||
|
||||
# # TODO: pretty sure this solves debugger deps-issues on windows, but it needs to
|
||||
# # be verified by someone with a native setup.
|
||||
# # - name: Force pyreadline3
|
||||
# # run: pip uninstall pyreadline; pip install -U pyreadline3
|
||||
|
||||
# - name: List dependencies
|
||||
# run: pip list
|
||||
|
||||
# - name: Run tests
|
||||
# run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx
|
||||
run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs
|
||||
|
|
147
LICENSE
147
LICENSE
|
@ -1,21 +1,23 @@
|
|||
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||
Version 3, 19 November 2007
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU Affero General Public License is a free, copyleft license for
|
||||
software and other kinds of works, specifically designed to ensure
|
||||
cooperation with the community in the case of network server software.
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
our General Public Licenses are intended to guarantee your freedom to
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users.
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
|
@ -24,34 +26,44 @@ them if you wish), that you receive source code or can get it if you
|
|||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
Developers that use our General Public Licenses protect your rights
|
||||
with two steps: (1) assert copyright on the software, and (2) offer
|
||||
you this License which gives you legal permission to copy, distribute
|
||||
and/or modify the software.
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
A secondary benefit of defending all users' freedom is that
|
||||
improvements made in alternate versions of the program, if they
|
||||
receive widespread use, become available for other developers to
|
||||
incorporate. Many developers of free software are heartened and
|
||||
encouraged by the resulting cooperation. However, in the case of
|
||||
software used on network servers, this result may fail to come about.
|
||||
The GNU General Public License permits making a modified version and
|
||||
letting the public access it on a server without ever releasing its
|
||||
source code to the public.
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
The GNU Affero General Public License is designed specifically to
|
||||
ensure that, in such cases, the modified source code becomes available
|
||||
to the community. It requires the operator of a network server to
|
||||
provide the source code of the modified version running there to the
|
||||
users of that server. Therefore, public use of a modified version, on
|
||||
a publicly accessible server, gives the public access to the source
|
||||
code of the modified version.
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
An older license, called the Affero General Public License and
|
||||
published by Affero, was designed to accomplish similar goals. This is
|
||||
a different license, not a version of the Affero GPL, but Affero has
|
||||
released a new version of the Affero GPL which permits relicensing under
|
||||
this license.
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
@ -60,7 +72,7 @@ modification follow.
|
|||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
@ -537,45 +549,35 @@ to collect a royalty for further conveying from those to whom you convey
|
|||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, if you modify the
|
||||
Program, your modified version must prominently offer all users
|
||||
interacting with it remotely through a computer network (if your version
|
||||
supports such interaction) an opportunity to receive the Corresponding
|
||||
Source of your version by providing access to the Corresponding Source
|
||||
from a network server at no charge, through some standard or customary
|
||||
means of facilitating copying of software. This Corresponding Source
|
||||
shall include the Corresponding Source for any work covered by version 3
|
||||
of the GNU General Public License that is incorporated pursuant to the
|
||||
following paragraph.
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU General Public License into a single
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the work with which it is combined will remain governed by version
|
||||
3 of the GNU General Public License.
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU Affero General Public License from time to time. Such new versions
|
||||
will be similar in spirit to the present version, but may differ in detail to
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU Affero General
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU Affero General Public License, you may choose any version ever published
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU Affero General Public License can be used, that proxy's
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
|
@ -633,29 +635,40 @@ the "copyright" line and a pointer to where the full notice is found.
|
|||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License as published by
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If your software can interact with users remotely through a computer
|
||||
network, you should also make sure that it provides a way for users to
|
||||
get its source. For example, if your program is a web application, its
|
||||
interface could display a "Source" link that leads users to an archive
|
||||
of the code. There are many ways you could offer source, and different
|
||||
solutions will be better for different programs; see section 13 for the
|
||||
specific requirements.
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<http://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
# https://packaging.python.org/en/latest/guides/using-manifest-in/#using-manifest-in
|
||||
include docs/README.rst
|
528
NEWS.rst
528
NEWS.rst
|
@ -1,528 +0,0 @@
|
|||
=========
|
||||
Changelog
|
||||
=========
|
||||
|
||||
.. towncrier release notes start
|
||||
|
||||
tractor 0.1.0a5 (2022-08-03)
|
||||
============================
|
||||
|
||||
This is our final release supporting Python 3.9 since we will be moving
|
||||
internals to the new `match:` syntax from 3.10 going forward and
|
||||
further, we have officially dropped usage of the `msgpack` library and
|
||||
happily adopted `msgspec`.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- `#165 <https://github.com/goodboy/tractor/issues/165>`_: Add SIGINT
|
||||
protection to our `pdbpp` based debugger subystem such that for
|
||||
(single-depth) actor trees in debug mode we ignore interrupts in any
|
||||
actor currently holding the TTY lock thus avoiding clobbering IPC
|
||||
connections and/or task and process state when working in the REPL.
|
||||
|
||||
As a big note currently so called "nested" actor trees (trees with
|
||||
actors having more then one parent/ancestor) are not fully supported
|
||||
since we don't yet have a mechanism to relay the debug mode knowledge
|
||||
"up" the actor tree (for eg. when handling a crash in a leaf actor).
|
||||
As such currently there is a set of tests and known scenarios which will
|
||||
result in process cloberring by the zombie repaing machinery and these
|
||||
have been documented in https://github.com/goodboy/tractor/issues/320.
|
||||
|
||||
The implementation details include:
|
||||
|
||||
- utilizing a custom SIGINT handler which we apply whenever an actor's
|
||||
runtime enters the debug machinery, which we also make sure the
|
||||
stdlib's `pdb` configuration doesn't override (which it does by
|
||||
default without special instance config).
|
||||
- litter the runtime with `maybe_wait_for_debugger()` mostly in spots
|
||||
where the root actor should block before doing embedded nursery
|
||||
teardown ops which both cancel potential-children-in-deubg as well
|
||||
as eventually trigger zombie reaping machinery.
|
||||
- hardening of the TTY locking semantics/API both in terms of IPC
|
||||
terminations and cancellation and lock release determinism from
|
||||
sync debugger instance methods.
|
||||
- factoring of locking infrastructure into a new `._debug.Lock` global
|
||||
which encapsulates all details of the ``trio`` sync primitives and
|
||||
task/actor uid management and tracking.
|
||||
|
||||
We also add `ctrl-c` cases throughout the test suite though these are
|
||||
disabled for py3.9 (`pdbpp` UX differences that don't seem worth
|
||||
compensating for, especially since this will be our last 3.9 supported
|
||||
release) and there are a slew of marked cases that aren't expected to
|
||||
work in CI more generally (as mentioned in the "nested" tree note
|
||||
above) despite seemingly working when run manually on linux.
|
||||
|
||||
- `#304 <https://github.com/goodboy/tractor/issues/304>`_: Add a new
|
||||
``to_asyncio.LinkedTaskChannel.subscribe()`` which gives task-oriented
|
||||
broadcast functionality semantically equivalent to
|
||||
``tractor.MsgStream.subscribe()`` this makes it possible for multiple
|
||||
``trio``-side tasks to consume ``asyncio``-side task msgs in tandem.
|
||||
|
||||
Further Improvements to the test suite were added in this patch set
|
||||
including a new scenario test for a sub-actor managed "service nursery"
|
||||
(implementing the basics of a "service manager") including use of
|
||||
*infected asyncio* mode. Further we added a lower level
|
||||
``test_trioisms.py`` to start to track issues we need to work around in
|
||||
``trio`` itself which in this case included a bug we were trying to
|
||||
solve related to https://github.com/python-trio/trio/issues/2258.
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#318 <https://github.com/goodboy/tractor/issues/318>`_: Fix
|
||||
a previously undetected ``trio``-``asyncio`` task lifetime linking
|
||||
issue with the ``to_asyncio.open_channel_from()`` api where both sides
|
||||
where not properly waiting/signalling termination and it was possible
|
||||
for ``asyncio``-side errors to not propagate due to a race condition.
|
||||
|
||||
The implementation fix summary is:
|
||||
- add state to signal the end of the ``trio`` side task to be
|
||||
read by the ``asyncio`` side and always cancel any ongoing
|
||||
task in such cases.
|
||||
- always wait on the ``asyncio`` task termination from the ``trio``
|
||||
side on error before maybe raising said error.
|
||||
- always close the ``trio`` mem chan on exit to ensure the other
|
||||
side can detect it and follow.
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- `#248 <https://github.com/goodboy/tractor/issues/248>`_: Adjust the
|
||||
`tractor._spawn.soft_wait()` strategy to avoid sending an actor cancel
|
||||
request (via `Portal.cancel_actor()`) if either the child process is
|
||||
detected as having terminated or the IPC channel is detected to be
|
||||
closed.
|
||||
|
||||
This ensures (even) more deterministic inter-actor cancellation by
|
||||
avoiding the timeout condition where possible when a whild never
|
||||
sucessfully spawned, crashed, or became un-contactable over IPC.
|
||||
|
||||
- `#295 <https://github.com/goodboy/tractor/issues/295>`_: Add an
|
||||
experimental ``tractor.msg.NamespacePath`` type for passing Python
|
||||
objects by "reference" through a ``str``-subtype message and using the
|
||||
new ``pkgutil.resolve_name()`` for reference loading.
|
||||
|
||||
- `#298 <https://github.com/goodboy/tractor/issues/298>`_: Add a new
|
||||
`tractor.experimental` subpackage for staging new high level APIs and
|
||||
subystems that we might eventually make built-ins.
|
||||
|
||||
- `#300 <https://github.com/goodboy/tractor/issues/300>`_: Update to and
|
||||
pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) both of which
|
||||
required adjustments for backwards imcompatible API tweaks.
|
||||
|
||||
- `#303 <https://github.com/goodboy/tractor/issues/303>`_: Fence off
|
||||
``multiprocessing`` imports until absolutely necessary in an effort to
|
||||
avoid "resource tracker" spawning side effects that seem to have
|
||||
varying degrees of unreliability per Python release. Port to new
|
||||
``msgspec.DecodeError``.
|
||||
|
||||
- `#305 <https://github.com/goodboy/tractor/issues/305>`_: Add
|
||||
``tractor.query_actor()`` an addr looker-upper which doesn't deliver
|
||||
a ``Portal`` instance and instead just a socket address ``tuple``.
|
||||
|
||||
Sometimes it's handy to just have a simple way to figure out if
|
||||
a "service" actor is up, so add this discovery helper for that. We'll
|
||||
prolly just leave it undocumented for now until we figure out
|
||||
a longer-term/better discovery system.
|
||||
|
||||
- `#316 <https://github.com/goodboy/tractor/issues/316>`_: Run windows
|
||||
CI jobs on python 3.10 after some hacks for ``pdbpp`` dependency
|
||||
issues.
|
||||
|
||||
Issue was to do with the now deprecated `pyreadline` project which
|
||||
should be changed over to `pyreadline3`.
|
||||
|
||||
- `#317 <https://github.com/goodboy/tractor/issues/317>`_: Drop use of
|
||||
the ``msgpack`` package and instead move fully to the ``msgspec``
|
||||
codec library.
|
||||
|
||||
We've now used ``msgspec`` extensively in production and there's no
|
||||
reason to not use it as default. Further this change preps us for the up
|
||||
and coming typed messaging semantics (#196), dialog-unprotocol system
|
||||
(#297), and caps-based messaging-protocols (#299) planned before our
|
||||
first beta.
|
||||
|
||||
|
||||
tractor 0.1.0a4 (2021-12-18)
|
||||
============================
|
||||
|
||||
Features
|
||||
--------
|
||||
- `#275 <https://github.com/goodboy/tractor/issues/275>`_: Re-license
|
||||
code base under AGPLv3. Also see `#274
|
||||
<https://github.com/goodboy/tractor/pull/274>`_ for majority
|
||||
contributor consensus on this decision.
|
||||
|
||||
- `#121 <https://github.com/goodboy/tractor/issues/121>`_: Add
|
||||
"infected ``asyncio`` mode; a sub-system to spawn and control
|
||||
``asyncio`` actors using ``trio``'s guest-mode.
|
||||
|
||||
This gets us the following very interesting functionality:
|
||||
|
||||
- ability to spawn an actor that has a process entry point of
|
||||
``asyncio.run()`` by passing ``infect_asyncio=True`` to
|
||||
``Portal.start_actor()`` (and friends).
|
||||
- the ``asyncio`` actor embeds ``trio`` using guest-mode and starts
|
||||
a main ``trio`` task which runs the ``tractor.Actor._async_main()``
|
||||
entry point engages all the normal ``tractor`` runtime IPC/messaging
|
||||
machinery; for all purposes the actor is now running normally on
|
||||
a ``trio.run()``.
|
||||
- the actor can now make one-to-one task spawning requests to the
|
||||
underlying ``asyncio`` event loop using either of:
|
||||
|
||||
* ``to_asyncio.run_task()`` to spawn and run an ``asyncio`` task to
|
||||
completion and block until a return value is delivered.
|
||||
* ``async with to_asyncio.open_channel_from():`` which spawns a task
|
||||
and hands it a pair of "memory channels" to allow for bi-directional
|
||||
streaming between the now SC-linked ``trio`` and ``asyncio`` tasks.
|
||||
|
||||
The output from any call(s) to ``asyncio`` can be handled as normal in
|
||||
``trio``/``tractor`` task operation with the caveat of the overhead due
|
||||
to guest-mode use.
|
||||
|
||||
For more details see the `original PR
|
||||
<https://github.com/goodboy/tractor/pull/121>`_ and `issue
|
||||
<https://github.com/goodboy/tractor/issues/120>`_.
|
||||
|
||||
- `#257 <https://github.com/goodboy/tractor/issues/257>`_: Add
|
||||
``trionics.maybe_open_context()`` an actor-scoped async multi-task
|
||||
context manager resource caching API.
|
||||
|
||||
Adds an SC-safe cacheing async context manager api that only enters on
|
||||
the *first* task entry and only exits on the *last* task exit while in
|
||||
between delivering the same cached value per input key. Keys can be
|
||||
either an explicit ``key`` named arg provided by the user or a
|
||||
hashable ``kwargs`` dict (will be converted to a ``list[tuple]``) which
|
||||
is passed to the underlying manager function as input.
|
||||
|
||||
- `#261 <https://github.com/goodboy/tractor/issues/261>`_: Add
|
||||
cross-actor-task ``Context`` oriented error relay, a new stream
|
||||
overrun error-signal ``StreamOverrun``, and support disabling
|
||||
``MsgStream`` backpressure as the default before a stream is opened or
|
||||
by choice of the user.
|
||||
|
||||
We added stricter semantics around ``tractor.Context.open_stream():``
|
||||
particularly to do with streams which are only opened at one end.
|
||||
Previously, if only one end opened a stream there was no way for that
|
||||
sender to know if msgs are being received until first, the feeder mem
|
||||
chan on the receiver side hit a backpressure state and then that
|
||||
condition delayed its msg loop processing task to eventually create
|
||||
backpressure on the associated IPC transport. This is non-ideal in the
|
||||
case where the receiver side never opened a stream by mistake since it
|
||||
results in silent block of the sender and no adherence to the underlying
|
||||
mem chan buffer size settings (which is still unsolved btw).
|
||||
|
||||
To solve this we add non-backpressure style message pushing inside
|
||||
``Actor._push_result()`` by default and only use the backpressure
|
||||
``trio.MemorySendChannel.send()`` call **iff** the local end of the
|
||||
context has entered ``Context.open_stream():``. This way if the stream
|
||||
was never opened but the mem chan is overrun, we relay back to the
|
||||
sender a (new exception) ``SteamOverrun`` error which is raised in the
|
||||
sender's scope with a special error message about the stream never
|
||||
having been opened. Further, this behaviour (non-backpressure style
|
||||
where senders can expect an error on overruns) can now be enabled with
|
||||
``.open_stream(backpressure=False)`` and the underlying mem chan size
|
||||
can be specified with a kwarg ``msg_buffer_size: int``.
|
||||
|
||||
Further bug fixes and enhancements in this changeset include:
|
||||
|
||||
- fix a race we were ignoring where if the callee task opened a context
|
||||
it could enter ``Context.open_stream()`` before calling
|
||||
``.started()``.
|
||||
- Disallow calling ``Context.started()`` more then once.
|
||||
- Enable ``Context`` linked tasks error relaying via the new
|
||||
``Context._maybe_raise_from_remote_msg()`` which (for now) uses
|
||||
a simple ``trio.Nursery.start_soon()`` to raise the error via closure
|
||||
in the local scope.
|
||||
|
||||
- `#267 <https://github.com/goodboy/tractor/issues/267>`_: This
|
||||
(finally) adds fully acknowledged remote cancellation messaging
|
||||
support for both explicit ``Portal.cancel_actor()`` calls as well as
|
||||
when there is a "runtime-wide" cancellations (eg. during KBI or
|
||||
general actor nursery exception handling which causes a full actor
|
||||
"crash"/termination).
|
||||
|
||||
You can think of this as the most ideal case in 2-generals where the
|
||||
actor requesting the cancel of its child is able to always receive back
|
||||
the ACK to that request. This leads to a more deterministic shutdown of
|
||||
the child where the parent is able to wait for the child to fully
|
||||
respond to the request. On a localhost setup, where the parent can
|
||||
monitor the state of the child through process or other OS APIs instead
|
||||
of solely through IPC messaging, the parent can know whether or not the
|
||||
child decided to cancel with more certainty. In the case of separate
|
||||
hosts, we still rely on a simple timeout approach until such a time
|
||||
where we prefer to get "fancier".
|
||||
|
||||
- `#271 <https://github.com/goodboy/tractor/issues/271>`_: Add a per
|
||||
actor ``debug_mode: bool`` control to our nursery.
|
||||
|
||||
This allows spawning actors via ``ActorNursery.start_actor()`` (and
|
||||
other dependent methods) with a ``debug_mode=True`` flag much like
|
||||
``tractor.open_nursery():`` such that per process crash handling
|
||||
can be toggled for cases where a user does not need/want all child actors
|
||||
to drop into the debugger on error. This is often useful when you have
|
||||
actor-tasks which are expected to error often (and be re-run) but want
|
||||
to specifically interact with some (problematic) child.
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- `#239 <https://github.com/goodboy/tractor/issues/239>`_: Fix
|
||||
keyboard interrupt handling in ``Portal.open_context()`` blocks.
|
||||
|
||||
Previously this was not triggering cancellation of the remote task
|
||||
context and could result in hangs if a stream was also opened. This
|
||||
fix is to accept `BaseException` since it is likely any other top
|
||||
level exception other then KBI (even though not expected) should also
|
||||
get this result.
|
||||
|
||||
- `#264 <https://github.com/goodboy/tractor/issues/264>`_: Fix
|
||||
``Portal.run_in_actor()`` returns ``None`` result.
|
||||
|
||||
``None`` was being used as the cached result flag and obviously breaks
|
||||
on a ``None`` returned from the remote target task. This would cause an
|
||||
infinite hang if user code ever called ``Portal.result()`` *before* the
|
||||
nursery exit. The simple fix is to use the *return message* as the
|
||||
initial "no-result-received-yet" flag value and, once received, the
|
||||
return value is read from the message to avoid the cache logic error.
|
||||
|
||||
- `#266 <https://github.com/goodboy/tractor/issues/266>`_: Fix
|
||||
graceful cancellation of daemon actors
|
||||
|
||||
Previously, his was a bug where if the soft wait on a sub-process (the
|
||||
``await .proc.wait()``) in the reaper task teardown was cancelled we
|
||||
would fail over to the hard reaping sequence (meant for culling off any
|
||||
potential zombies via system kill signals). The hard reap has a timeout
|
||||
of 3s (currently though in theory we could make it shorter?) before
|
||||
system signalling kicks in. This means that any daemon actor still
|
||||
running during nursery exit would get hard reaped (3s later) instead of
|
||||
cancelled via IPC message. Now we catch the ``trio.Cancelled``, call
|
||||
``Portal.cancel_actor()`` on the daemon and expect the child to
|
||||
self-terminate after the runtime cancels and shuts down the process.
|
||||
|
||||
- `#278 <https://github.com/goodboy/tractor/issues/278>`_: Repair
|
||||
inter-actor stream closure semantics to work correctly with
|
||||
``tractor.trionics.BroadcastReceiver`` task fan out usage.
|
||||
|
||||
A set of previously unknown bugs discovered in `#257
|
||||
<https://github.com/goodboy/tractor/pull/257>`_ let graceful stream
|
||||
closure result in hanging consumer tasks that use the broadcast APIs.
|
||||
This adds better internal closure state tracking to the broadcast
|
||||
receiver and message stream APIs and in particular ensures that when an
|
||||
underlying stream/receive-channel (a broadcast receiver is receiving
|
||||
from) is closed, all consumer tasks waiting on that underlying channel
|
||||
are woken so they can receive the ``trio.EndOfChannel`` signal and
|
||||
promptly terminate.
|
||||
|
||||
|
||||
tractor 0.1.0a3 (2021-11-02)
|
||||
============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Switch to using the ``trio`` process spawner by default on windows. (#166)
|
||||
|
||||
This gets windows users debugger support (manually tested) and in
|
||||
general a more resilient (nested) actor tree implementation.
|
||||
|
||||
- Add optional `msgspec <https://jcristharif.com/msgspec/>`_ support
|
||||
as an alernative, faster MessagePack codec. (#214)
|
||||
|
||||
Provides us with a path toward supporting typed IPC message contracts. Further,
|
||||
``msgspec`` structs may be a valid tool to start for formalizing our
|
||||
"SC dialog un-protocol" messages as described in `#36
|
||||
<https://github.com/goodboy/tractor/issues/36>`_.
|
||||
|
||||
- Introduce a new ``tractor.trionics`` `sub-package`_ that exposes
|
||||
a selection of our relevant high(er) level trio primitives and
|
||||
goodies. (#241)
|
||||
|
||||
At outset we offer a ``gather_contexts()`` context manager for
|
||||
concurrently entering a sequence of async context managers (much like
|
||||
a version of ``asyncio.gather()`` but for context managers) and use it
|
||||
in a new ``tractor.open_actor_cluster()`` manager-helper that can be
|
||||
entered to concurrently spawn a flat actor pool. We also now publicly
|
||||
expose our "broadcast channel" APIs (``open_broadcast_receiver()``)
|
||||
from here.
|
||||
|
||||
.. _sub-package: ../tractor/trionics
|
||||
|
||||
- Change the core message loop to handle task and actor-runtime cancel
|
||||
requests immediately instead of scheduling them as is done for rpc-task
|
||||
requests. (#245)
|
||||
|
||||
In order to obtain more reliable teardown mechanics for (complex) actor
|
||||
trees it's important that we specially treat cancel requests as having
|
||||
higher priority. Previously, it was possible that task cancel requests
|
||||
could actually also themselves be cancelled if a "actor-runtime" cancel
|
||||
request was received (can happen during messy multi actor crashes that
|
||||
propagate). Instead cancels now block the msg loop until serviced and
|
||||
a response is relayed back to the requester. This also allows for
|
||||
improved debugger support since we have determinism guarantees about
|
||||
which processes must wait before hard killing their children.
|
||||
|
||||
- (`#248 <https://github.com/goodboy/tractor/pull/248>`_) Drop Python
|
||||
3.8 support in favour of rolling with two latest releases for the time
|
||||
being.
|
||||
|
||||
|
||||
Misc
|
||||
----
|
||||
|
||||
- (`#243 <https://github.com/goodboy/tractor/pull/243>`_) add a distinct
|
||||
``'CANCEL'`` log level to allow the runtime to emit details about
|
||||
cancellation machinery statuses.
|
||||
|
||||
|
||||
tractor 0.1.0a2 (2021-09-07)
|
||||
============================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- Add `tokio-style broadcast channels
|
||||
<https://docs.rs/tokio/1.11.0/tokio/sync/broadcast/index.html>`_ as
|
||||
a solution for `#204 <https://github.com/goodboy/tractor/pull/204>`_ and
|
||||
discussed thoroughly in `trio/#987
|
||||
<https://github.com/python-trio/trio/issues/987>`_.
|
||||
|
||||
This gives us local task broadcast functionality using a new
|
||||
``BroadcastReceiver`` type which can wrap ``trio.ReceiveChannel`` and
|
||||
provide fan-out copies of a stream of data to every subscribed consumer.
|
||||
We use this new machinery to provide a ``ReceiveMsgStream.subscribe()``
|
||||
async context manager which can be used by actor-local concumers tasks
|
||||
to easily pull from a shared and dynamic IPC stream. (`#229
|
||||
<https://github.com/goodboy/tractor/pull/229>`_)
|
||||
|
||||
|
||||
Bugfixes
|
||||
--------
|
||||
|
||||
- Handle broken channel/stream faults where the root's tty lock is left
|
||||
acquired by some child actor who went MIA and the root ends up hanging
|
||||
indefinitely. (`#234 <https://github.com/goodboy/tractor/pull/234>`_)
|
||||
|
||||
There's two parts here: we no longer shield wait on the lock and,
|
||||
now always do our best to release the lock on the expected worst
|
||||
case connection faults.
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Drop stream "shielding" support which was originally added to sidestep
|
||||
a cancelled call to ``.receive()``
|
||||
|
||||
In the original api design a stream instance was returned directly from
|
||||
a call to ``Portal.run()`` and thus there was no "exit phase" to handle
|
||||
cancellations and errors which would trigger implicit closure. Now that
|
||||
we have said enter/exit semantics with ``Portal.open_stream_from()`` and
|
||||
``Context.open_stream()`` we can drop this implicit (and arguably
|
||||
confusing) behavior. (`#230 <https://github.com/goodboy/tractor/pull/230>`_)
|
||||
|
||||
- Drop Python 3.7 support in preparation for supporting 3.9+ syntax.
|
||||
(`#232 <https://github.com/goodboy/tractor/pull/232>`_)
|
||||
|
||||
|
||||
tractor 0.1.0a1 (2021-08-01)
|
||||
============================
|
||||
|
||||
Features
|
||||
--------
|
||||
- Updated our uni-directional streaming API (`#206
|
||||
<https://github.com/goodboy/tractor/pull/206>`_) to require a context
|
||||
manager style ``async with Portal.open_stream_from(target) as stream:``
|
||||
which explicitly determines when to stop a stream in the calling (aka
|
||||
portal opening) actor much like ``async_generator.aclosing()``
|
||||
enforcement.
|
||||
|
||||
- Improved the ``multiprocessing`` backend sub-actor reaping (`#208
|
||||
<https://github.com/goodboy/tractor/pull/208>`_) during actor nursery
|
||||
exit, particularly during cancellation scenarios that previously might
|
||||
result in hard to debug hangs.
|
||||
|
||||
- Added initial bi-directional streaming support in `#219
|
||||
<https://github.com/goodboy/tractor/pull/219>`_ with follow up debugger
|
||||
improvements via `#220 <https://github.com/goodboy/tractor/pull/220>`_
|
||||
using the new ``tractor.Context`` cross-actor task syncing system.
|
||||
The debugger upgrades add an edge triggered last-in-tty-lock semaphore
|
||||
which allows the root process for a tree to avoid clobbering children
|
||||
who have queued to acquire the ``pdb`` repl by waiting to cancel
|
||||
sub-actors until the lock is known to be released **and** has no
|
||||
pending waiters.
|
||||
|
||||
|
||||
Experiments and WIPs
|
||||
--------------------
|
||||
- Initial optional ``msgspec`` serialization support in `#214
|
||||
<https://github.com/goodboy/tractor/pull/214>`_ which should hopefully
|
||||
land by next release.
|
||||
|
||||
- Improved "infect ``asyncio``" cross-loop task cancellation and error
|
||||
propagation by vastly simplifying the cross-loop-task streaming approach.
|
||||
We may end up just going with a use of ``anyio`` in the medium term to
|
||||
avoid re-doing work done by their cross-event-loop portals. See the
|
||||
``infect_asyncio`` for details.
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
- `Updated our readme <https://github.com/goodboy/tractor/pull/211>`_ to
|
||||
include more (and better) `examples
|
||||
<https://github.com/goodboy/tractor#run-a-func-in-a-process>`_ (with
|
||||
matching multi-terminal process monitoring shell commands) as well as
|
||||
added many more examples to the `repo set
|
||||
<https://github.com/goodboy/tractor/tree/master/examples>`_.
|
||||
|
||||
- Added a readme `"actors under the hood" section
|
||||
<https://github.com/goodboy/tractor#under-the-hood>`_ in an effort to
|
||||
guard against suggestions for changing the API away from ``trio``'s
|
||||
*tasks-as-functions* style.
|
||||
|
||||
- Moved to using the `sphinx book theme
|
||||
<https://sphinx-book-theme.readthedocs.io/en/latest/index.html>`_
|
||||
though it needs some heavy tweaking and doesn't seem to show our logo
|
||||
on rtd :(
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
- Added a new ``TransportClosed`` internal exception/signal (`#215
|
||||
<https://github.com/goodboy/tractor/pull/215>`_ for catching TCP
|
||||
channel gentle closes instead of silently falling through the message
|
||||
handler loop via an async generator ``return``.
|
||||
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
- Dropped support for invoking sync functions (`#205
|
||||
<https://github.com/goodboy/tractor/pull/205>`_) in other
|
||||
actors/processes since you can always wrap a sync function from an
|
||||
async one. Users can instead consider using ``trio-parallel`` which
|
||||
is a project specifically geared for purely synchronous calls in
|
||||
sub-processes.
|
||||
|
||||
- Deprecated our ``tractor.run()`` entrypoint `#197
|
||||
<https://github.com/goodboy/tractor/pull/197>`_; the runtime is now
|
||||
either started implicitly in first actor nursery use or via an
|
||||
explicit call to ``tractor.open_root_actor()``. Full removal of
|
||||
``tractor.run()`` will come by beta release.
|
||||
|
||||
|
||||
tractor 0.1.0a0 (2021-02-28)
|
||||
============================
|
||||
|
||||
..
|
||||
TODO: fill out more of the details of the initial feature set in some TLDR form
|
||||
|
||||
Summary
|
||||
-------
|
||||
- ``trio`` based process spawner (using ``subprocess``)
|
||||
- initial multi-process debugging with ``pdb++``
|
||||
- windows support using both ``trio`` and ``multiprocessing`` spawners
|
||||
- "portal" api for cross-process, structured concurrent, (streaming) IPC
|
446
docs/README.rst
446
docs/README.rst
|
@ -1,122 +1,36 @@
|
|||
|logo| ``tractor``: distributed structurred concurrency
|
||||
|logo| ``tractor``: next-gen Python parallelism
|
||||
|
||||
|gh_actions|
|
||||
|docs|
|
||||
|
||||
``tractor`` is a `structured concurrency`_ (SC), multi-processing_ runtime built on trio_.
|
||||
``tractor`` is a `structured concurrent`_, multi-processing_ runtime built on trio_.
|
||||
|
||||
Fundamentally, ``tractor`` provides parallelism via
|
||||
``trio``-"*actors*": independent Python **processes** (i.e.
|
||||
*non-shared-memory threads*) which can schedule ``trio`` tasks whilst
|
||||
maintaining *end-to-end SC* inside a *distributed supervision tree*.
|
||||
Fundamentally ``tractor`` gives you parallelism via ``trio``-"*actors*":
|
||||
our nurseries_ let you spawn new Python processes which each run a ``trio``
|
||||
scheduled runtime - a call to ``trio.run()``.
|
||||
|
||||
Cross-process (and thus cross-host) SC is accomplished through the
|
||||
combined use of our,
|
||||
We believe the system adhere's to the `3 axioms`_ of an "`actor model`_"
|
||||
but likely *does not* look like what *you* probably think an "actor
|
||||
model" looks like, and that's *intentional*.
|
||||
|
||||
- "actor nurseries_" which provide for spawning multiple, and
|
||||
possibly nested, Python processes each running a ``trio`` scheduled
|
||||
runtime - a call to ``trio.run()``,
|
||||
- an "SC-transitive supervision protocol" enforced as an
|
||||
IPC-message-spec encapsulating all RPC-dialogs.
|
||||
|
||||
We believe the system adheres to the `3 axioms`_ of an "`actor model`_"
|
||||
but likely **does not** look like what **you** probably *think* an "actor
|
||||
model" looks like, and that's **intentional**.
|
||||
|
||||
|
||||
Where do i start!?
|
||||
------------------
|
||||
The first step to grok ``tractor`` is to get an intermediate
|
||||
knowledge of ``trio`` and **structured concurrency** B)
|
||||
|
||||
Some great places to start are,
|
||||
|
||||
- the seminal `blog post`_
|
||||
- obviously the `trio docs`_
|
||||
- wikipedia's nascent SC_ page
|
||||
- the fancy diagrams @ libdill-docs_
|
||||
The first step to grok ``tractor`` is to get the basics of ``trio`` down.
|
||||
A great place to start is the `trio docs`_ and this `blog post`_.
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
- **It's just** a ``trio`` API!
|
||||
- *Infinitely nesteable* process trees running embedded ``trio`` tasks.
|
||||
- Swappable, OS-specific, process spawning via multiple backends.
|
||||
- Modular IPC stack, allowing for custom interchange formats (eg.
|
||||
as offered from `msgspec`_), varied transport protocols (TCP, RUDP,
|
||||
QUIC, wireguard), and OS-env specific higher-perf primitives (UDS,
|
||||
shm-ring-buffers).
|
||||
- Optionally distributed_: all IPC and RPC APIs work over multi-host
|
||||
transports the same as local.
|
||||
- Builtin high-level streaming API that enables your app to easily
|
||||
leverage the benefits of a "`cheap or nasty`_" `(un)protocol`_.
|
||||
- A "native UX" around a multi-process safe debugger REPL using
|
||||
`pdbp`_ (a fork & fix of `pdb++`_)
|
||||
- "Infected ``asyncio``" mode: support for starting an actor's
|
||||
runtime as a `guest`_ on the ``asyncio`` loop allowing us to
|
||||
provide stringent SC-style ``trio.Task``-supervision around any
|
||||
``asyncio.Task`` spawned via our ``tractor.to_asyncio`` APIs.
|
||||
- A **very naive** and still very much work-in-progress inter-actor
|
||||
`discovery`_ sys with plans to support multiple `modern protocol`_
|
||||
approaches.
|
||||
- Various ``trio`` extension APIs via ``tractor.trionics`` such as,
|
||||
- task fan-out `broadcasting`_,
|
||||
- multi-task-single-resource-caching and fan-out-to-multi
|
||||
``__aenter__()`` APIs for ``@acm`` functions,
|
||||
- (WIP) a ``TaskMngr``: one-cancels-one style nursery supervisor.
|
||||
|
||||
|
||||
Install
|
||||
-------
|
||||
``tractor`` is still in a *alpha-near-beta-stage* for many
|
||||
of its subsystems, however we are very close to having a stable
|
||||
lowlevel runtime and API.
|
||||
|
||||
As such, it's currently recommended that you clone and install the
|
||||
repo from source::
|
||||
|
||||
pip install git+git://github.com/goodboy/tractor.git
|
||||
|
||||
|
||||
We use the very hip `uv`_ for project mgmt::
|
||||
|
||||
git clone https://github.com/goodboy/tractor.git
|
||||
cd tractor
|
||||
uv sync --dev
|
||||
uv run python examples/rpc_bidir_streaming.py
|
||||
|
||||
Consider activating a virtual/project-env before starting to hack on
|
||||
the code base::
|
||||
|
||||
# you could use plain ol' venvs
|
||||
# https://docs.astral.sh/uv/pip/environments/
|
||||
uv venv tractor_py313 --python 3.13
|
||||
|
||||
# but @goodboy prefers the more explicit (and shell agnostic)
|
||||
# https://docs.astral.sh/uv/configuration/environment/#uv_project_environment
|
||||
UV_PROJECT_ENVIRONMENT="tractor_py313
|
||||
|
||||
# hint hint, enter @goodboy's fave shell B)
|
||||
uv run --dev xonsh
|
||||
|
||||
Alongside all this we ofc offer "releases" on PyPi::
|
||||
|
||||
pip install tractor
|
||||
|
||||
Just note that YMMV since the main git branch is often much further
|
||||
ahead then any latest release.
|
||||
|
||||
|
||||
Example codez
|
||||
-------------
|
||||
In ``tractor``'s (very lacking) documention we prefer to point to
|
||||
example scripts in the repo over duplicating them in docs, but with
|
||||
that in mind here are some definitive snippets to try and hook you
|
||||
into digging deeper.
|
||||
- **It's just** a ``trio`` API
|
||||
- *Infinitely nesteable* process trees
|
||||
- Built-in inter-process streaming APIs
|
||||
- A (first ever?) "native" multi-core debugger UX for Python using `pdb++`_
|
||||
- Support for a swappable, OS specific, process spawning layer
|
||||
- A modular transport stack, allowing for custom serialization,
|
||||
communications protocols, and environment specific IPC primitives
|
||||
- `structured concurrency`_ from the ground up
|
||||
|
||||
|
||||
Run a func in a process
|
||||
***********************
|
||||
-----------------------
|
||||
Use ``trio``'s style of focussing on *tasks as functions*:
|
||||
|
||||
.. code:: python
|
||||
|
@ -174,7 +88,7 @@ might want to check out `trio-parallel`_.
|
|||
|
||||
|
||||
Zombie safe: self-destruct a process tree
|
||||
*****************************************
|
||||
-----------------------------------------
|
||||
``tractor`` tries to protect you from zombies, no matter what.
|
||||
|
||||
.. code:: python
|
||||
|
@ -200,7 +114,7 @@ Zombie safe: self-destruct a process tree
|
|||
f"running in pid {os.getpid()}"
|
||||
)
|
||||
|
||||
await trio.sleep_forever()
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
async def main():
|
||||
|
@ -213,8 +127,7 @@ Zombie safe: self-destruct a process tree
|
|||
print('This process tree will self-destruct in 1 sec...')
|
||||
await trio.sleep(1)
|
||||
|
||||
# raise an error in root actor/process and trigger
|
||||
# reaping of all minions
|
||||
# you could have done this yourself
|
||||
raise Exception('Self Destructed')
|
||||
|
||||
|
||||
|
@ -230,8 +143,8 @@ it **is a bug**.
|
|||
|
||||
|
||||
"Native" multi-process debugging
|
||||
********************************
|
||||
Using the magic of `pdbp`_ and our internal IPC, we've
|
||||
--------------------------------
|
||||
Using the magic of `pdb++`_ and our internal IPC, we've
|
||||
been able to create a native feeling debugging experience for
|
||||
any (sub-)process in your ``tractor`` tree.
|
||||
|
||||
|
@ -284,100 +197,8 @@ And, yes, there's a built-in crash handling mode B)
|
|||
We're hoping to add a respawn-from-repl system soon!
|
||||
|
||||
|
||||
SC compatible bi-directional streaming
|
||||
**************************************
|
||||
Yes, you saw it here first; we provide 2-way streams
|
||||
with reliable, transitive setup/teardown semantics.
|
||||
|
||||
Our nascent api is remniscent of ``trio.Nursery.start()``
|
||||
style invocation:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def simple_rpc(
|
||||
|
||||
ctx: tractor.Context,
|
||||
data: int,
|
||||
|
||||
) -> None:
|
||||
'''Test a small ping-pong 2-way streaming server.
|
||||
|
||||
'''
|
||||
# signal to parent that we're up much like
|
||||
# ``trio_typing.TaskStatus.started()``
|
||||
await ctx.started(data + 1)
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
count = 0
|
||||
async for msg in stream:
|
||||
|
||||
assert msg == 'ping'
|
||||
await stream.send('pong')
|
||||
count += 1
|
||||
|
||||
else:
|
||||
assert count == 10
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
'rpc_server',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
# XXX: this syntax requires py3.9
|
||||
async with (
|
||||
|
||||
portal.open_context(
|
||||
simple_rpc,
|
||||
data=10,
|
||||
) as (ctx, sent),
|
||||
|
||||
ctx.open_stream() as stream,
|
||||
):
|
||||
|
||||
assert sent == 11
|
||||
|
||||
count = 0
|
||||
# receive msgs using async for style
|
||||
await stream.send('ping')
|
||||
|
||||
async for msg in stream:
|
||||
assert msg == 'pong'
|
||||
await stream.send('ping')
|
||||
count += 1
|
||||
|
||||
if count >= 9:
|
||||
break
|
||||
|
||||
|
||||
# explicitly teardown the daemon-actor
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
||||
|
||||
|
||||
See original proposal and discussion in `#53`_ as well
|
||||
as follow up improvements in `#223`_ that we'd love to
|
||||
hear your thoughts on!
|
||||
|
||||
.. _#53: https://github.com/goodboy/tractor/issues/53
|
||||
.. _#223: https://github.com/goodboy/tractor/issues/223
|
||||
|
||||
|
||||
Worker poolz are easy peasy
|
||||
***************************
|
||||
---------------------------
|
||||
The initial ask from most new users is *"how do I make a worker
|
||||
pool thing?"*.
|
||||
|
||||
|
@ -398,172 +219,20 @@ real time::
|
|||
This uses no extra threads, fancy semaphores or futures; all we need
|
||||
is ``tractor``'s IPC!
|
||||
|
||||
"Infected ``asyncio``" mode
|
||||
***************************
|
||||
Have a bunch of ``asyncio`` code you want to force to be SC at the process level?
|
||||
|
||||
Check out our experimental system for `guest`_-mode controlled
|
||||
``asyncio`` actors:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import asyncio
|
||||
from statistics import mean
|
||||
import time
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
async def aio_echo_server(
|
||||
to_trio: trio.MemorySendChannel,
|
||||
from_trio: asyncio.Queue,
|
||||
) -> None:
|
||||
|
||||
# a first message must be sent **from** this ``asyncio``
|
||||
# task or the ``trio`` side will never unblock from
|
||||
# ``tractor.to_asyncio.open_channel_from():``
|
||||
to_trio.send_nowait('start')
|
||||
|
||||
# XXX: this uses an ``from_trio: asyncio.Queue`` currently but we
|
||||
# should probably offer something better.
|
||||
while True:
|
||||
# echo the msg back
|
||||
to_trio.send_nowait(await from_trio.get())
|
||||
await asyncio.sleep(0)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def trio_to_aio_echo_server(
|
||||
ctx: tractor.Context,
|
||||
):
|
||||
# this will block until the ``asyncio`` task sends a "first"
|
||||
# message.
|
||||
async with tractor.to_asyncio.open_channel_from(
|
||||
aio_echo_server,
|
||||
) as (first, chan):
|
||||
|
||||
assert first == 'start'
|
||||
await ctx.started(first)
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
async for msg in stream:
|
||||
await chan.send(msg)
|
||||
|
||||
out = await chan.receive()
|
||||
# echo back to parent actor-task
|
||||
await stream.send(out)
|
||||
|
||||
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
p = await n.start_actor(
|
||||
'aio_server',
|
||||
enable_modules=[__name__],
|
||||
infect_asyncio=True,
|
||||
)
|
||||
async with p.open_context(
|
||||
trio_to_aio_echo_server,
|
||||
) as (ctx, first):
|
||||
|
||||
assert first == 'start'
|
||||
|
||||
count = 0
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
delays = []
|
||||
send = time.time()
|
||||
|
||||
await stream.send(count)
|
||||
async for msg in stream:
|
||||
recv = time.time()
|
||||
delays.append(recv - send)
|
||||
assert msg == count
|
||||
count += 1
|
||||
send = time.time()
|
||||
await stream.send(count)
|
||||
|
||||
if count >= 1e3:
|
||||
break
|
||||
|
||||
print(f'mean round trip rate (Hz): {1/mean(delays)}')
|
||||
await p.cancel_actor()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
||||
|
||||
|
||||
Yes, we spawn a python process, run ``asyncio``, start ``trio`` on the
|
||||
``asyncio`` loop, then send commands to the ``trio`` scheduled tasks to
|
||||
tell ``asyncio`` tasks what to do XD
|
||||
|
||||
We need help refining the `asyncio`-side channel API to be more
|
||||
`trio`-like. Feel free to sling your opinion in `#273`_!
|
||||
|
||||
|
||||
.. _#273: https://github.com/goodboy/tractor/issues/273
|
||||
|
||||
|
||||
Higher level "cluster" APIs
|
||||
***************************
|
||||
To be extra terse the ``tractor`` devs have started hacking some "higher
|
||||
level" APIs for managing actor trees/clusters. These interfaces should
|
||||
generally be condsidered provisional for now but we encourage you to try
|
||||
them and provide feedback. Here's a new API that let's you quickly
|
||||
spawn a flat cluster:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
async def sleepy_jane():
|
||||
uid = tractor.current_actor().uid
|
||||
print(f'Yo i am actor {uid}')
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
async def main():
|
||||
'''
|
||||
Spawn a flat actor cluster, with one process per
|
||||
detected core.
|
||||
|
||||
'''
|
||||
portal_map: dict[str, tractor.Portal]
|
||||
results: dict[str, str]
|
||||
|
||||
# look at this hip new syntax!
|
||||
async with (
|
||||
|
||||
tractor.open_actor_cluster(
|
||||
modules=[__name__]
|
||||
) as portal_map,
|
||||
|
||||
trio.open_nursery() as n,
|
||||
):
|
||||
|
||||
for (name, portal) in portal_map.items():
|
||||
n.start_soon(portal.run, sleepy_jane)
|
||||
|
||||
await trio.sleep(0.5)
|
||||
|
||||
# kill the cluster with a cancel
|
||||
raise KeyboardInterrupt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
trio.run(main)
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
.. _full worker pool re-implementation: https://github.com/goodboy/tractor/blob/master/examples/parallelism/concurrent_actors_primes.py
|
||||
|
||||
Install
|
||||
-------
|
||||
From PyPi::
|
||||
|
||||
pip install tractor
|
||||
|
||||
|
||||
From git::
|
||||
|
||||
pip install git+git://github.com/goodboy/tractor.git
|
||||
|
||||
|
||||
Under the hood
|
||||
--------------
|
||||
|
@ -628,22 +297,12 @@ properties of the system.
|
|||
|
||||
What's on the TODO:
|
||||
-------------------
|
||||
Help us push toward the future of distributed `Python`.
|
||||
Help us push toward the future.
|
||||
|
||||
- Erlang-style supervisors via composed context managers (see `#22
|
||||
<https://github.com/goodboy/tractor/issues/22>`_)
|
||||
- Typed messaging protocols (ex. via ``msgspec.Struct``, see `#36
|
||||
<https://github.com/goodboy/tractor/issues/36>`_)
|
||||
- Typed capability-based (dialog) protocols ( see `#196
|
||||
<https://github.com/goodboy/tractor/issues/196>`_ with draft work
|
||||
started in `#311 <https://github.com/goodboy/tractor/pull/311>`_)
|
||||
- We **recently disabled CI-testing on windows** and need help getting
|
||||
it running again! (see `#327
|
||||
<https://github.com/goodboy/tractor/pull/327>`_). **We do have windows
|
||||
support** (and have for quite a while) but since no active hacker
|
||||
exists in the user-base to help test on that OS, for now we're not
|
||||
actively maintaining testing due to the added hassle and general
|
||||
latency..
|
||||
- (Soon to land) ``asyncio`` support allowing for "infected" actors where
|
||||
`trio` drives the `asyncio` scheduler via the astounding "`guest mode`_"
|
||||
- Typed messaging protocols (ex. via ``msgspec``)
|
||||
- Erlang-style supervisors via composed context managers
|
||||
|
||||
|
||||
Feel like saying hi?
|
||||
|
@ -655,39 +314,26 @@ say hi, please feel free to reach us in our `matrix channel`_. If
|
|||
matrix seems too hip, we're also mostly all in the the `trio gitter
|
||||
channel`_!
|
||||
|
||||
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
|
||||
.. _distributed: https://en.wikipedia.org/wiki/Distributed_computing
|
||||
.. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing
|
||||
.. _trio: https://github.com/python-trio/trio
|
||||
.. _nurseries: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/#nurseries-a-structured-replacement-for-go-statements
|
||||
.. _actor model: https://en.wikipedia.org/wiki/Actor_model
|
||||
.. _trio: https://github.com/python-trio/trio
|
||||
.. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing
|
||||
.. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles
|
||||
.. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich
|
||||
.. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228
|
||||
.. _3 axioms: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=162s
|
||||
.. .. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts
|
||||
.. _adherance to: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=1821s
|
||||
.. _trio gitter channel: https://gitter.im/python-trio/general
|
||||
.. _matrix channel: https://matrix.to/#/!tractor:matrix.org
|
||||
.. _broadcasting: https://github.com/goodboy/tractor/pull/229
|
||||
.. _modern procotol: https://en.wikipedia.org/wiki/Rendezvous_protocol
|
||||
.. _pdbp: https://github.com/mdmintz/pdbp
|
||||
.. _pdb++: https://github.com/pdbpp/pdbpp
|
||||
.. _cheap or nasty: https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern
|
||||
.. _(un)protocol: https://zguide.zeromq.org/docs/chapter7/#Unprotocols
|
||||
.. _discovery: https://zguide.zeromq.org/docs/chapter8/#Discovery
|
||||
.. _modern protocol: https://en.wikipedia.org/wiki/Rendezvous_protocol
|
||||
.. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops
|
||||
.. _messages: https://en.wikipedia.org/wiki/Message_passing
|
||||
.. _trio docs: https://trio.readthedocs.io/en/latest/
|
||||
.. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
|
||||
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
|
||||
.. _SC: https://en.wikipedia.org/wiki/Structured_concurrency
|
||||
.. _libdill-docs: https://sustrik.github.io/libdill/structured-concurrency.html
|
||||
.. _structured concurrency: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/
|
||||
.. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony
|
||||
.. _async generators: https://www.python.org/dev/peps/pep-0525/
|
||||
.. _trio-parallel: https://github.com/richardsheridan/trio-parallel
|
||||
.. _uv: https://docs.astral.sh/uv/
|
||||
.. _msgspec: https://jcristharif.com/msgspec/
|
||||
.. _guest: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops
|
||||
|
||||
|
||||
.. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
Hot tips for ``tractor`` hackers
|
||||
================================
|
||||
|
||||
This is a WIP guide for newcomers to the project mostly to do with
|
||||
dev, testing, CI and release gotchas, reminders and best practises.
|
||||
|
||||
``tractor`` is a fairly novel project compared to most since it is
|
||||
effectively a new way of doing distributed computing in Python and is
|
||||
much closer to working with an "application level runtime" (like erlang
|
||||
OTP or scala's akka project) then it is a traditional Python library.
|
||||
As such, having an arsenal of tools and recipes for figuring out the
|
||||
right way to debug problems when they do arise is somewhat of
|
||||
a necessity.
|
||||
|
||||
|
||||
Making a Release
|
||||
----------------
|
||||
We currently do nothing special here except the traditional
|
||||
PyPa release recipe as in `documented by twine`_. I personally
|
||||
create sub-dirs within the generated `dist/` with an explicit
|
||||
release name such as `alpha3/` when there's been a sequence of
|
||||
releases I've made, but it really is up to you how you like to
|
||||
organize generated sdists locally.
|
||||
|
||||
The resulting build cmds are approximately:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
python setup.py sdist -d ./dist/XXX.X/
|
||||
|
||||
twine upload -r testpypi dist/XXX.X/*
|
||||
|
||||
twine upload dist/XXX.X/*
|
||||
|
||||
|
||||
|
||||
.. _documented by twine: https://twine.readthedocs.io/en/latest/#using-twine
|
||||
|
||||
|
||||
Debugging and monitoring actor trees
|
||||
------------------------------------
|
||||
TODO: but there are tips in the readme for some terminal commands
|
||||
which can be used to see the process trees easily on Linux.
|
||||
|
||||
|
||||
Using the log system to trace `trio` task flow
|
||||
----------------------------------------------
|
||||
TODO: the logging system is meant to be oriented around
|
||||
stack "layers" of the runtime such that you can track
|
||||
"logical abstraction layers" in the code such as errors, cancellation,
|
||||
IPC and streaming, and the low level transport and wire protocols.
|
|
@ -396,7 +396,7 @@ tasks spawned via multiple RPC calls to an actor can modify
|
|||
|
||||
|
||||
# a per process cache
|
||||
_actor_cache: dict[str, bool] = {}
|
||||
_actor_cache: Dict[str, bool] = {}
|
||||
|
||||
|
||||
def ping_endpoints(endpoints: List[str]):
|
||||
|
|
|
@ -1,259 +0,0 @@
|
|||
'''
|
||||
Complex edge case where during real-time streaming the IPC tranport
|
||||
channels are wiped out (purposely in this example though it could have
|
||||
been an outage) and we want to ensure that despite being in debug mode
|
||||
(or not) the user can sent SIGINT once they notice the hang and the
|
||||
actor tree will eventually be cancelled without leaving any zombies.
|
||||
|
||||
'''
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from functools import partial
|
||||
|
||||
from tractor import (
|
||||
open_nursery,
|
||||
context,
|
||||
Context,
|
||||
ContextCancelled,
|
||||
MsgStream,
|
||||
_testing,
|
||||
)
|
||||
import trio
|
||||
import pytest
|
||||
|
||||
|
||||
async def break_ipc_then_error(
|
||||
stream: MsgStream,
|
||||
break_ipc_with: str|None = None,
|
||||
pre_close: bool = False,
|
||||
):
|
||||
await _testing.break_ipc(
|
||||
stream=stream,
|
||||
method=break_ipc_with,
|
||||
pre_close=pre_close,
|
||||
)
|
||||
async for msg in stream:
|
||||
await stream.send(msg)
|
||||
|
||||
assert 0
|
||||
|
||||
|
||||
async def iter_ipc_stream(
|
||||
stream: MsgStream,
|
||||
break_ipc_with: str|None = None,
|
||||
pre_close: bool = False,
|
||||
):
|
||||
async for msg in stream:
|
||||
await stream.send(msg)
|
||||
|
||||
|
||||
@context
|
||||
async def recv_and_spawn_net_killers(
|
||||
|
||||
ctx: Context,
|
||||
break_ipc_after: bool|int = False,
|
||||
pre_close: bool = False,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Receive stream msgs and spawn some IPC killers mid-stream.
|
||||
|
||||
'''
|
||||
broke_ipc: bool = False
|
||||
await ctx.started()
|
||||
async with (
|
||||
ctx.open_stream() as stream,
|
||||
trio.open_nursery(
|
||||
strict_exception_groups=False,
|
||||
) as tn,
|
||||
):
|
||||
async for i in stream:
|
||||
print(f'child echoing {i}')
|
||||
if not broke_ipc:
|
||||
await stream.send(i)
|
||||
else:
|
||||
await trio.sleep(0.01)
|
||||
|
||||
if (
|
||||
break_ipc_after
|
||||
and
|
||||
i >= break_ipc_after
|
||||
):
|
||||
broke_ipc = True
|
||||
tn.start_soon(
|
||||
iter_ipc_stream,
|
||||
stream,
|
||||
)
|
||||
tn.start_soon(
|
||||
partial(
|
||||
break_ipc_then_error,
|
||||
stream=stream,
|
||||
pre_close=pre_close,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@acm
|
||||
async def stuff_hangin_ctlc(timeout: float = 1) -> None:
|
||||
|
||||
with trio.move_on_after(timeout) as cs:
|
||||
yield timeout
|
||||
|
||||
if cs.cancelled_caught:
|
||||
# pretend to be a user seeing no streaming action
|
||||
# thinking it's a hang, and then hitting ctl-c..
|
||||
print(
|
||||
f"i'm a user on the PARENT side and thingz hangin "
|
||||
f'after timeout={timeout} ???\n\n'
|
||||
'MASHING CTlR-C..!?\n'
|
||||
)
|
||||
raise KeyboardInterrupt
|
||||
|
||||
|
||||
async def main(
|
||||
debug_mode: bool = False,
|
||||
start_method: str = 'trio',
|
||||
loglevel: str = 'cancel',
|
||||
|
||||
# by default we break the parent IPC first (if configured to break
|
||||
# at all), but this can be changed so the child does first (even if
|
||||
# both are set to break).
|
||||
break_parent_ipc_after: int|bool = False,
|
||||
break_child_ipc_after: int|bool = False,
|
||||
pre_close: bool = False,
|
||||
|
||||
) -> None:
|
||||
|
||||
async with (
|
||||
open_nursery(
|
||||
start_method=start_method,
|
||||
|
||||
# NOTE: even debugger is used we shouldn't get
|
||||
# a hang since it never engages due to broken IPC
|
||||
debug_mode=debug_mode,
|
||||
loglevel=loglevel,
|
||||
|
||||
) as an,
|
||||
):
|
||||
sub_name: str = 'chitty_hijo'
|
||||
portal = await an.start_actor(
|
||||
sub_name,
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async with (
|
||||
stuff_hangin_ctlc(timeout=2) as timeout,
|
||||
_testing.expect_ctxc(
|
||||
yay=(
|
||||
break_parent_ipc_after
|
||||
or break_child_ipc_after
|
||||
),
|
||||
# TODO: we CAN'T remove this right?
|
||||
# since we need the ctxc to bubble up from either
|
||||
# the stream API after the `None` msg is sent
|
||||
# (which actually implicitly cancels all remote
|
||||
# tasks in the hijo) or from simluated
|
||||
# KBI-mash-from-user
|
||||
# or should we expect that a KBI triggers the ctxc
|
||||
# and KBI in an eg?
|
||||
reraise=True,
|
||||
),
|
||||
|
||||
portal.open_context(
|
||||
recv_and_spawn_net_killers,
|
||||
break_ipc_after=break_child_ipc_after,
|
||||
pre_close=pre_close,
|
||||
) as (ctx, sent),
|
||||
):
|
||||
rx_eoc: bool = False
|
||||
ipc_break_sent: bool = False
|
||||
async with ctx.open_stream() as stream:
|
||||
for i in range(1000):
|
||||
|
||||
if (
|
||||
break_parent_ipc_after
|
||||
and
|
||||
i > break_parent_ipc_after
|
||||
and
|
||||
not ipc_break_sent
|
||||
):
|
||||
print(
|
||||
'#################################\n'
|
||||
'Simulating PARENT-side IPC BREAK!\n'
|
||||
'#################################\n'
|
||||
)
|
||||
|
||||
# TODO: other methods? see break func above.
|
||||
# await stream._ctx.chan.send(None)
|
||||
# await stream._ctx.chan.transport.stream.send_eof()
|
||||
await stream._ctx.chan.transport.stream.aclose()
|
||||
ipc_break_sent = True
|
||||
|
||||
# it actually breaks right here in the
|
||||
# mp_spawn/forkserver backends and thus the
|
||||
# zombie reaper never even kicks in?
|
||||
try:
|
||||
print(f'parent sending {i}')
|
||||
await stream.send(i)
|
||||
except ContextCancelled as ctxc:
|
||||
print(
|
||||
'parent received ctxc on `stream.send()`\n'
|
||||
f'{ctxc}\n'
|
||||
)
|
||||
assert 'root' in ctxc.canceller
|
||||
assert sub_name in ctx.canceller
|
||||
|
||||
# TODO: is this needed or no?
|
||||
raise
|
||||
|
||||
except trio.ClosedResourceError:
|
||||
# NOTE: don't send if we already broke the
|
||||
# connection to avoid raising a closed-error
|
||||
# such that we drop through to the ctl-c
|
||||
# mashing by user.
|
||||
await trio.sleep(0.01)
|
||||
|
||||
# timeout: int = 1
|
||||
# with trio.move_on_after(timeout) as cs:
|
||||
async with stuff_hangin_ctlc() as timeout:
|
||||
print(
|
||||
f'PARENT `stream.receive()` with timeout={timeout}\n'
|
||||
)
|
||||
# NOTE: in the parent side IPC failure case this
|
||||
# will raise an ``EndOfChannel`` after the child
|
||||
# is killed and sends a stop msg back to it's
|
||||
# caller/this-parent.
|
||||
try:
|
||||
rx = await stream.receive()
|
||||
print(
|
||||
"I'm a happy PARENT user and echoed to me is\n"
|
||||
f'{rx}\n'
|
||||
)
|
||||
except trio.EndOfChannel:
|
||||
rx_eoc: bool = True
|
||||
print('MsgStream got EoC for PARENT')
|
||||
raise
|
||||
|
||||
print(
|
||||
'Streaming finished and we got Eoc.\n'
|
||||
'Canceling `.open_context()` in root with\n'
|
||||
'CTlR-C..'
|
||||
)
|
||||
if rx_eoc:
|
||||
assert stream.closed
|
||||
try:
|
||||
await stream.send(i)
|
||||
pytest.fail('stream not closed?')
|
||||
except (
|
||||
trio.ClosedResourceError,
|
||||
trio.EndOfChannel,
|
||||
) as send_err:
|
||||
if rx_eoc:
|
||||
assert send_err is stream._eoc
|
||||
else:
|
||||
assert send_err is stream._closed
|
||||
|
||||
raise KeyboardInterrupt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -1,41 +1,39 @@
|
|||
from typing import AsyncIterator
|
||||
from itertools import repeat
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
tractor.log.get_console_log("INFO")
|
||||
|
||||
async def stream_forever() -> AsyncIterator[int]:
|
||||
|
||||
async def stream_forever():
|
||||
for i in repeat("I can see these little future bubble things"):
|
||||
# each yielded value is sent over the ``Channel`` to the parent actor
|
||||
# each yielded value is sent over the ``Channel`` to the
|
||||
# parent actor
|
||||
yield i
|
||||
await trio.sleep(0.01)
|
||||
|
||||
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
# stream for at most 1 seconds
|
||||
with trio.move_on_after(1) as cancel_scope:
|
||||
|
||||
portal = await n.start_actor(
|
||||
'donny',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
# this async for loop streams values from the above
|
||||
# async generator running in a separate process
|
||||
async with portal.open_stream_from(stream_forever) as stream:
|
||||
count = 0
|
||||
async for letter in stream:
|
||||
print(letter)
|
||||
count += 1
|
||||
portal = await n.start_actor(
|
||||
'donny',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
if count > 50:
|
||||
break
|
||||
# this async for loop streams values from the above
|
||||
# async generator running in a separate process
|
||||
async with portal.open_stream_from(stream_forever) as stream:
|
||||
async for letter in stream:
|
||||
print(letter)
|
||||
|
||||
print('stream terminated')
|
||||
|
||||
await portal.cancel_actor()
|
||||
# we support trio's cancellation system
|
||||
assert cancel_scope.cancelled_caught
|
||||
assert n.cancelled
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -1,136 +0,0 @@
|
|||
'''
|
||||
Examples of using the builtin `breakpoint()` from an `asyncio.Task`
|
||||
running in a subactor spawned with `infect_asyncio=True`.
|
||||
|
||||
'''
|
||||
import asyncio
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
from tractor import (
|
||||
to_asyncio,
|
||||
Portal,
|
||||
)
|
||||
|
||||
|
||||
async def aio_sleep_forever():
|
||||
await asyncio.sleep(float('inf'))
|
||||
|
||||
|
||||
async def bp_then_error(
|
||||
to_trio: trio.MemorySendChannel,
|
||||
from_trio: asyncio.Queue,
|
||||
|
||||
raise_after_bp: bool = True,
|
||||
|
||||
) -> None:
|
||||
|
||||
# sync with `trio`-side (caller) task
|
||||
to_trio.send_nowait('start')
|
||||
|
||||
# NOTE: what happens here inside the hook needs some refinement..
|
||||
# => seems like it's still `._debug._set_trace()` but
|
||||
# we set `Lock.local_task_in_debug = 'sync'`, we probably want
|
||||
# some further, at least, meta-data about the task/actor in debug
|
||||
# in terms of making it clear it's `asyncio` mucking about.
|
||||
breakpoint() # asyncio-side
|
||||
|
||||
# short checkpoint / delay
|
||||
await asyncio.sleep(0.5) # asyncio-side
|
||||
|
||||
if raise_after_bp:
|
||||
raise ValueError('asyncio side error!')
|
||||
|
||||
# TODO: test case with this so that it gets cancelled?
|
||||
else:
|
||||
# XXX NOTE: this is required in order to get the SIGINT-ignored
|
||||
# hang case documented in the module script section!
|
||||
await aio_sleep_forever()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def trio_ctx(
|
||||
ctx: tractor.Context,
|
||||
bp_before_started: bool = False,
|
||||
):
|
||||
|
||||
# this will block until the ``asyncio`` task sends a "first"
|
||||
# message, see first line in above func.
|
||||
async with (
|
||||
to_asyncio.open_channel_from(
|
||||
bp_then_error,
|
||||
# raise_after_bp=not bp_before_started,
|
||||
) as (first, chan),
|
||||
|
||||
trio.open_nursery() as tn,
|
||||
):
|
||||
assert first == 'start'
|
||||
|
||||
if bp_before_started:
|
||||
await tractor.pause() # trio-side
|
||||
|
||||
await ctx.started(first) # trio-side
|
||||
|
||||
tn.start_soon(
|
||||
to_asyncio.run_task,
|
||||
aio_sleep_forever,
|
||||
)
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
async def main(
|
||||
bps_all_over: bool = True,
|
||||
|
||||
# TODO, WHICH OF THESE HAZ BUGZ?
|
||||
cancel_from_root: bool = False,
|
||||
err_from_root: bool = False,
|
||||
|
||||
) -> None:
|
||||
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
maybe_enable_greenback=True,
|
||||
# loglevel='devx',
|
||||
) as an:
|
||||
ptl: Portal = await an.start_actor(
|
||||
'aio_daemon',
|
||||
enable_modules=[__name__],
|
||||
infect_asyncio=True,
|
||||
debug_mode=True,
|
||||
# loglevel='cancel',
|
||||
)
|
||||
|
||||
async with ptl.open_context(
|
||||
trio_ctx,
|
||||
bp_before_started=bps_all_over,
|
||||
) as (ctx, first):
|
||||
|
||||
assert first == 'start'
|
||||
|
||||
# pause in parent to ensure no cross-actor
|
||||
# locking problems exist!
|
||||
await tractor.pause() # trio-root
|
||||
|
||||
if cancel_from_root:
|
||||
await ctx.cancel()
|
||||
|
||||
if err_from_root:
|
||||
assert 0
|
||||
else:
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
# TODO: case where we cancel from trio-side while asyncio task
|
||||
# has debugger lock?
|
||||
# await ptl.cancel_actor()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
# works fine B)
|
||||
trio.run(main)
|
||||
|
||||
# will hang and ignores SIGINT !!
|
||||
# NOTE: you'll need to send a SIGQUIT (via ctl-\) to kill it
|
||||
# manually..
|
||||
# trio.run(main, True)
|
|
@ -1,9 +0,0 @@
|
|||
'''
|
||||
Reproduce a bug where enabling debug mode for a sub-actor actually causes
|
||||
a hang on teardown...
|
||||
|
||||
'''
|
||||
import asyncio
|
||||
|
||||
import trio
|
||||
import tractor
|
|
@ -1,9 +1,8 @@
|
|||
'''
|
||||
Fast fail test with a `Context`.
|
||||
|
||||
Ensure the partially initialized sub-actor process
|
||||
fast fail test with a context.
|
||||
ensure the partially initialized sub-actor process
|
||||
doesn't cause a hang on error/cancel of the parent
|
||||
nursery.
|
||||
nrusery.
|
||||
|
||||
'''
|
||||
import trio
|
||||
|
@ -20,7 +19,7 @@ async def sleep(
|
|||
|
||||
|
||||
async def open_ctx(
|
||||
n: tractor._supervise.ActorNursery
|
||||
n: tractor._trionics.ActorNursery
|
||||
):
|
||||
|
||||
# spawn both actors
|
||||
|
|
|
@ -4,15 +4,9 @@ import trio
|
|||
|
||||
async def breakpoint_forever():
|
||||
"Indefinitely re-enter debugger in child actor."
|
||||
try:
|
||||
while True:
|
||||
yield 'yo'
|
||||
await tractor.pause()
|
||||
except BaseException:
|
||||
tractor.log.get_console_log().exception(
|
||||
'Cancelled while trying to enter pause point!'
|
||||
)
|
||||
raise
|
||||
while True:
|
||||
yield 'yo'
|
||||
await tractor.breakpoint()
|
||||
|
||||
|
||||
async def name_error():
|
||||
|
@ -21,14 +15,11 @@ async def name_error():
|
|||
|
||||
|
||||
async def main():
|
||||
'''
|
||||
Test breakpoint in a streaming actor.
|
||||
|
||||
'''
|
||||
"""Test breakpoint in a streaming actor.
|
||||
"""
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
loglevel='cancel',
|
||||
# loglevel='devx',
|
||||
loglevel='error',
|
||||
) as n:
|
||||
|
||||
p0 = await n.start_actor('bp_forever', enable_modules=[__name__])
|
||||
|
@ -36,18 +27,7 @@ async def main():
|
|||
|
||||
# retreive results
|
||||
async with p0.open_stream_from(breakpoint_forever) as stream:
|
||||
|
||||
# triggers the first name error
|
||||
try:
|
||||
await p1.run(name_error)
|
||||
except tractor.RemoteActorError as rae:
|
||||
assert rae.boxed_type is NameError
|
||||
|
||||
async for i in stream:
|
||||
|
||||
# a second time try the failing subactor and this tie
|
||||
# let error propagate up to the parent/nursery.
|
||||
await p1.run(name_error)
|
||||
await p1.run(name_error)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -10,12 +10,7 @@ async def name_error():
|
|||
async def breakpoint_forever():
|
||||
"Indefinitely re-enter debugger in child actor."
|
||||
while True:
|
||||
await tractor.pause()
|
||||
|
||||
# NOTE: if the test never sent 'q'/'quit' commands
|
||||
# on the pdb repl, without this checkpoint line the
|
||||
# repl would spin in this actor forever.
|
||||
# await trio.sleep(0)
|
||||
await tractor.breakpoint()
|
||||
|
||||
|
||||
async def spawn_until(depth=0):
|
||||
|
@ -23,20 +18,12 @@ async def spawn_until(depth=0):
|
|||
"""
|
||||
async with tractor.open_nursery() as n:
|
||||
if depth < 1:
|
||||
|
||||
await n.run_in_actor(breakpoint_forever)
|
||||
|
||||
p = await n.run_in_actor(
|
||||
# await n.run_in_actor('breakpoint_forever', breakpoint_forever)
|
||||
await n.run_in_actor(
|
||||
name_error,
|
||||
name='name_error'
|
||||
)
|
||||
await trio.sleep(0.5)
|
||||
# rx and propagate error from child
|
||||
await p.result()
|
||||
|
||||
else:
|
||||
# recusrive call to spawn another process branching layer of
|
||||
# the tree
|
||||
depth -= 1
|
||||
await n.run_in_actor(
|
||||
spawn_until,
|
||||
|
@ -45,7 +32,6 @@ async def spawn_until(depth=0):
|
|||
)
|
||||
|
||||
|
||||
# TODO: notes on the new boxed-relayed errors through proxy actors
|
||||
async def main():
|
||||
"""The main ``tractor`` routine.
|
||||
|
||||
|
@ -67,7 +53,6 @@ async def main():
|
|||
"""
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
# loglevel='cancel',
|
||||
) as n:
|
||||
|
||||
# spawn both actors
|
||||
|
@ -82,16 +67,8 @@ async def main():
|
|||
name='spawner1',
|
||||
)
|
||||
|
||||
# TODO: test this case as well where the parent don't see
|
||||
# the sub-actor errors by default and instead expect a user
|
||||
# ctrl-c to kill the root.
|
||||
with trio.move_on_after(3):
|
||||
await trio.sleep_forever()
|
||||
|
||||
# gah still an issue here.
|
||||
await portal.result()
|
||||
|
||||
# should never get here
|
||||
await portal1.result()
|
||||
|
||||
|
||||
|
|
|
@ -1,8 +1,3 @@
|
|||
'''
|
||||
Test that a nested nursery will avoid clobbering
|
||||
the debugger latched by a broken child.
|
||||
|
||||
'''
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
@ -40,7 +35,6 @@ async def main():
|
|||
"""
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
loglevel='devx',
|
||||
) as n:
|
||||
|
||||
# spawn both actors
|
||||
|
|
|
@ -6,7 +6,7 @@ async def breakpoint_forever():
|
|||
"Indefinitely re-enter debugger in child actor."
|
||||
while True:
|
||||
await trio.sleep(0.1)
|
||||
await tractor.pause()
|
||||
await tractor.breakpoint()
|
||||
|
||||
|
||||
async def name_error():
|
||||
|
@ -38,7 +38,6 @@ async def main():
|
|||
"""
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
# loglevel='runtime',
|
||||
) as n:
|
||||
|
||||
# Spawn both actors, don't bother with collecting results
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def just_sleep(
|
||||
|
||||
ctx: tractor.Context,
|
||||
**kwargs,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Start and sleep.
|
||||
|
||||
'''
|
||||
await ctx.started()
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
) as n:
|
||||
portal = await n.start_actor(
|
||||
'ctx_child',
|
||||
|
||||
# XXX: we don't enable the current module in order
|
||||
# to trigger `ModuleNotFound`.
|
||||
enable_modules=[],
|
||||
)
|
||||
|
||||
async with portal.open_context(
|
||||
just_sleep, # taken from pytest parameterization
|
||||
) as (ctx, sent):
|
||||
raise KeyboardInterrupt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -1,28 +0,0 @@
|
|||
import trio
|
||||
import tractor
|
||||
|
||||
async def die():
|
||||
raise RuntimeError
|
||||
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery() as tn:
|
||||
|
||||
debug_actor = await tn.start_actor(
|
||||
'debugged_boi',
|
||||
enable_modules=[__name__],
|
||||
debug_mode=True,
|
||||
)
|
||||
crash_boi = await tn.start_actor(
|
||||
'crash_boi',
|
||||
enable_modules=[__name__],
|
||||
# debug_mode=True,
|
||||
)
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
n.start_soon(debug_actor.run, die)
|
||||
n.start_soon(crash_boi.run, die)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -1,56 +0,0 @@
|
|||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def name_error(
|
||||
ctx: tractor.Context,
|
||||
):
|
||||
'''
|
||||
Raise a `NameError`, catch it and enter `.post_mortem()`, then
|
||||
expect the `._rpc._invoke()` crash handler to also engage.
|
||||
|
||||
'''
|
||||
try:
|
||||
getattr(doggypants) # noqa (on purpose)
|
||||
except NameError:
|
||||
await tractor.post_mortem()
|
||||
raise
|
||||
|
||||
|
||||
async def main():
|
||||
'''
|
||||
Test 3 `PdbREPL` entries:
|
||||
- one in the child due to manual `.post_mortem()`,
|
||||
- another in the child due to runtime RPC crash handling.
|
||||
- final one here in parent from the RAE.
|
||||
|
||||
'''
|
||||
# XXX NOTE: ideally the REPL arrives at this frame in the parent
|
||||
# ONE UP FROM the inner ctx block below!
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
# loglevel='cancel',
|
||||
) as an:
|
||||
p: tractor.Portal = await an.start_actor(
|
||||
'child',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
# XXX should raise `RemoteActorError[NameError]`
|
||||
# AND be the active frame when REPL enters!
|
||||
try:
|
||||
async with p.open_context(name_error) as (ctx, first):
|
||||
assert first
|
||||
except tractor.RemoteActorError as rae:
|
||||
assert rae.boxed_type is NameError
|
||||
|
||||
# manually handle in root's parent task
|
||||
await tractor.post_mortem()
|
||||
raise
|
||||
else:
|
||||
raise RuntimeError('IPC ctx should have remote errored!?')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -1,49 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
|
||||
# intially unset, no entry.
|
||||
orig_pybp_var: int = os.environ.get('PYTHONBREAKPOINT')
|
||||
assert orig_pybp_var in {None, "0"}
|
||||
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
) as an:
|
||||
assert an
|
||||
assert (
|
||||
(pybp_var := os.environ['PYTHONBREAKPOINT'])
|
||||
==
|
||||
'tractor.devx._debug._sync_pause_from_builtin'
|
||||
)
|
||||
|
||||
# TODO: an assert that verifies the hook has indeed been, hooked
|
||||
# XD
|
||||
assert (
|
||||
(pybp_hook := sys.breakpointhook)
|
||||
is not tractor.devx._debug._set_trace
|
||||
)
|
||||
|
||||
print(
|
||||
f'$PYTHONOBREAKPOINT: {pybp_var!r}\n'
|
||||
f'`sys.breakpointhook`: {pybp_hook!r}\n'
|
||||
)
|
||||
breakpoint() # first bp, tractor hook set.
|
||||
|
||||
# XXX AFTER EXIT (of actor-runtime) verify the hook is unset..
|
||||
#
|
||||
# YES, this is weird but it's how stdlib docs say to do it..
|
||||
# https://docs.python.org/3/library/sys.html#sys.breakpointhook
|
||||
assert os.environ.get('PYTHONBREAKPOINT') is orig_pybp_var
|
||||
assert sys.breakpointhook
|
||||
|
||||
# now ensure a regular builtin pause still works
|
||||
breakpoint() # last bp, stdlib hook restored
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -10,7 +10,7 @@ async def main():
|
|||
|
||||
await trio.sleep(0.1)
|
||||
|
||||
await tractor.pause()
|
||||
await tractor.breakpoint()
|
||||
|
||||
await trio.sleep(0.1)
|
||||
|
||||
|
|
|
@ -2,16 +2,13 @@ import trio
|
|||
import tractor
|
||||
|
||||
|
||||
async def main(
|
||||
registry_addrs: tuple[str, int]|None = None
|
||||
):
|
||||
async def main():
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
debug_mode=True,
|
||||
# loglevel='runtime',
|
||||
):
|
||||
while True:
|
||||
await tractor.pause()
|
||||
await tractor.breakpoint()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -20,9 +20,9 @@ async def main():
|
|||
# spawn both actors
|
||||
portal = await n.run_in_actor(key_error)
|
||||
|
||||
# XXX: originally a bug caused by this is where root would enter
|
||||
# the debugger and clobber the tty used by the repl even though
|
||||
# child should have it locked.
|
||||
# XXX: originally a bug causes by this
|
||||
# where root would enter debugger even
|
||||
# though child should have it locked.
|
||||
with trio.fail_after(1):
|
||||
await trio.Event().wait()
|
||||
|
||||
|
|
|
@ -1,83 +0,0 @@
|
|||
'''
|
||||
Verify we can dump a `stackscope` tree on a hang.
|
||||
|
||||
'''
|
||||
import os
|
||||
import signal
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
@tractor.context
|
||||
async def start_n_shield_hang(
|
||||
ctx: tractor.Context,
|
||||
):
|
||||
# actor: tractor.Actor = tractor.current_actor()
|
||||
|
||||
# sync to parent-side task
|
||||
await ctx.started(os.getpid())
|
||||
|
||||
print('Entering shield sleep..')
|
||||
with trio.CancelScope(shield=True):
|
||||
await trio.sleep_forever() # in subactor
|
||||
|
||||
# XXX NOTE ^^^ since this shields, we expect
|
||||
# the zombie reaper (aka T800) to engage on
|
||||
# SIGINT from the user and eventually hard-kill
|
||||
# this subprocess!
|
||||
|
||||
|
||||
async def main(
|
||||
from_test: bool = False,
|
||||
) -> None:
|
||||
|
||||
async with (
|
||||
tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
enable_stack_on_sig=True,
|
||||
# maybe_enable_greenback=False,
|
||||
loglevel='devx',
|
||||
) as an,
|
||||
):
|
||||
ptl: tractor.Portal = await an.start_actor(
|
||||
'hanger',
|
||||
enable_modules=[__name__],
|
||||
debug_mode=True,
|
||||
)
|
||||
async with ptl.open_context(
|
||||
start_n_shield_hang,
|
||||
) as (ctx, cpid):
|
||||
|
||||
_, proc, _ = an._children[ptl.chan.uid]
|
||||
assert cpid == proc.pid
|
||||
|
||||
print(
|
||||
'Yo my child hanging..?\n'
|
||||
# "i'm a user who wants to see a `stackscope` tree!\n"
|
||||
)
|
||||
|
||||
# XXX simulate the wrapping test's "user actions"
|
||||
# (i.e. if a human didn't run this manually but wants to
|
||||
# know what they should do to reproduce test behaviour)
|
||||
if from_test:
|
||||
print(
|
||||
f'Sending SIGUSR1 to {cpid!r}!\n'
|
||||
)
|
||||
os.kill(
|
||||
cpid,
|
||||
signal.SIGUSR1,
|
||||
)
|
||||
|
||||
# simulate user cancelling program
|
||||
await trio.sleep(0.5)
|
||||
os.kill(
|
||||
os.getpid(),
|
||||
signal.SIGINT,
|
||||
)
|
||||
else:
|
||||
# actually let user send the ctl-c
|
||||
await trio.sleep_forever() # in root
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -1,88 +0,0 @@
|
|||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
async def cancellable_pause_loop(
|
||||
task_status: trio.TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED
|
||||
):
|
||||
with trio.CancelScope() as cs:
|
||||
task_status.started(cs)
|
||||
for _ in range(3):
|
||||
try:
|
||||
# ON first entry, there is no level triggered
|
||||
# cancellation yet, so this cp does a parent task
|
||||
# ctx-switch so that this scope raises for the NEXT
|
||||
# checkpoint we hit.
|
||||
await trio.lowlevel.checkpoint()
|
||||
await tractor.pause()
|
||||
|
||||
cs.cancel()
|
||||
|
||||
# parent should have called `cs.cancel()` by now
|
||||
await trio.lowlevel.checkpoint()
|
||||
|
||||
except trio.Cancelled:
|
||||
print('INSIDE SHIELDED PAUSE')
|
||||
await tractor.pause(shield=True)
|
||||
else:
|
||||
# should raise it again, bubbling up to parent
|
||||
print('BUBBLING trio.Cancelled to parent task-nursery')
|
||||
await trio.lowlevel.checkpoint()
|
||||
|
||||
|
||||
async def pm_on_cancelled():
|
||||
async with trio.open_nursery() as tn:
|
||||
tn.cancel_scope.cancel()
|
||||
try:
|
||||
await trio.sleep_forever()
|
||||
except trio.Cancelled:
|
||||
# should also raise `Cancelled` since
|
||||
# we didn't pass `shield=True`.
|
||||
try:
|
||||
await tractor.post_mortem(hide_tb=False)
|
||||
except trio.Cancelled as taskc:
|
||||
|
||||
# should enter just fine, in fact it should
|
||||
# be debugging the internals of the previous
|
||||
# sin-shield call above Bo
|
||||
await tractor.post_mortem(
|
||||
hide_tb=False,
|
||||
shield=True,
|
||||
)
|
||||
raise taskc
|
||||
|
||||
else:
|
||||
raise RuntimeError('Dint cancel as expected!?')
|
||||
|
||||
|
||||
async def cancelled_before_pause(
|
||||
):
|
||||
'''
|
||||
Verify that using a shielded pause works despite surrounding
|
||||
cancellation called state in the calling task.
|
||||
|
||||
'''
|
||||
async with trio.open_nursery() as tn:
|
||||
cs: trio.CancelScope = await tn.start(cancellable_pause_loop)
|
||||
await trio.sleep(0.1)
|
||||
|
||||
assert cs.cancelled_caught
|
||||
|
||||
await pm_on_cancelled()
|
||||
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
) as n:
|
||||
portal: tractor.Portal = await n.run_in_actor(
|
||||
cancelled_before_pause,
|
||||
)
|
||||
await portal.result()
|
||||
|
||||
# ensure the same works in the root actor!
|
||||
await pm_on_cancelled()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -1,50 +0,0 @@
|
|||
import tractor
|
||||
import trio
|
||||
|
||||
|
||||
async def gen():
|
||||
yield 'yo'
|
||||
await tractor.pause()
|
||||
yield 'yo'
|
||||
await tractor.pause()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def just_bp(
|
||||
ctx: tractor.Context,
|
||||
) -> None:
|
||||
|
||||
await ctx.started()
|
||||
await tractor.pause()
|
||||
|
||||
# TODO: bps and errors in this call..
|
||||
async for val in gen():
|
||||
print(val)
|
||||
|
||||
# await trio.sleep(0.5)
|
||||
|
||||
# prematurely destroy the connection
|
||||
await ctx.chan.aclose()
|
||||
|
||||
# THIS CAUSES AN UNRECOVERABLE HANG
|
||||
# without latest ``pdbpp``:
|
||||
assert 0
|
||||
|
||||
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
) as n:
|
||||
p = await n.start_actor(
|
||||
'bp_boi',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
async with p.open_context(
|
||||
just_bp,
|
||||
) as (ctx, first):
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -3,20 +3,17 @@ import tractor
|
|||
|
||||
|
||||
async def breakpoint_forever():
|
||||
'''
|
||||
Indefinitely re-enter debugger in child actor.
|
||||
|
||||
'''
|
||||
"""Indefinitely re-enter debugger in child actor.
|
||||
"""
|
||||
while True:
|
||||
await trio.sleep(0.1)
|
||||
await tractor.pause()
|
||||
await tractor.breakpoint()
|
||||
|
||||
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
loglevel='cancel',
|
||||
) as n:
|
||||
|
||||
portal = await n.run_in_actor(
|
||||
|
|
|
@ -3,26 +3,16 @@ import tractor
|
|||
|
||||
|
||||
async def name_error():
|
||||
getattr(doggypants) # noqa (on purpose)
|
||||
getattr(doggypants)
|
||||
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
# loglevel='transport',
|
||||
) as an:
|
||||
) as n:
|
||||
|
||||
# TODO: ideally the REPL arrives at this frame in the parent,
|
||||
# ABOVE the @api_frame of `Portal.run_in_actor()` (which
|
||||
# should eventually not even be a portal method ... XD)
|
||||
# await tractor.pause()
|
||||
p: tractor.Portal = await an.run_in_actor(name_error)
|
||||
|
||||
# with this style, should raise on this line
|
||||
await p.result()
|
||||
|
||||
# with this alt style should raise at `open_nusery()`
|
||||
# return await p.result()
|
||||
portal = await n.run_in_actor(name_error)
|
||||
await portal.result()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -1,169 +0,0 @@
|
|||
from functools import partial
|
||||
import time
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
# TODO: only import these when not running from test harness?
|
||||
# can we detect `pexpect` usage maybe?
|
||||
# from tractor.devx._debug import (
|
||||
# get_lock,
|
||||
# get_debug_req,
|
||||
# )
|
||||
|
||||
|
||||
def sync_pause(
|
||||
use_builtin: bool = False,
|
||||
error: bool = False,
|
||||
hide_tb: bool = True,
|
||||
pre_sleep: float|None = None,
|
||||
):
|
||||
if pre_sleep:
|
||||
time.sleep(pre_sleep)
|
||||
|
||||
if use_builtin:
|
||||
breakpoint(hide_tb=hide_tb)
|
||||
|
||||
else:
|
||||
# TODO: maybe for testing some kind of cm style interface
|
||||
# where the `._set_trace()` call doesn't happen until block
|
||||
# exit?
|
||||
# assert get_lock().ctx_in_debug is None
|
||||
# assert get_debug_req().repl is None
|
||||
tractor.pause_from_sync()
|
||||
# assert get_debug_req().repl is None
|
||||
|
||||
if error:
|
||||
raise RuntimeError('yoyo sync code error')
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def start_n_sync_pause(
|
||||
ctx: tractor.Context,
|
||||
):
|
||||
actor: tractor.Actor = tractor.current_actor()
|
||||
|
||||
# sync to parent-side task
|
||||
await ctx.started()
|
||||
|
||||
print(f'Entering `sync_pause()` in subactor: {actor.uid}\n')
|
||||
sync_pause()
|
||||
print(f'Exited `sync_pause()` in subactor: {actor.uid}\n')
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
async with (
|
||||
tractor.open_nursery(
|
||||
debug_mode=True,
|
||||
maybe_enable_greenback=True,
|
||||
enable_stack_on_sig=True,
|
||||
# loglevel='warning',
|
||||
# loglevel='devx',
|
||||
) as an,
|
||||
trio.open_nursery() as tn,
|
||||
):
|
||||
# just from root task
|
||||
sync_pause()
|
||||
|
||||
p: tractor.Portal = await an.start_actor(
|
||||
'subactor',
|
||||
enable_modules=[__name__],
|
||||
# infect_asyncio=True,
|
||||
debug_mode=True,
|
||||
)
|
||||
|
||||
# TODO: 3 sub-actor usage cases:
|
||||
# -[x] via a `.open_context()`
|
||||
# -[ ] via a `.run_in_actor()` call
|
||||
# -[ ] via a `.run()`
|
||||
# -[ ] via a `.to_thread.run_sync()` in subactor
|
||||
async with p.open_context(
|
||||
start_n_sync_pause,
|
||||
) as (ctx, first):
|
||||
assert first is None
|
||||
|
||||
# TODO: handle bg-thread-in-root-actor special cases!
|
||||
#
|
||||
# there are a couple very subtle situations possible here
|
||||
# and they are likely to become more important as cpython
|
||||
# moves to support no-GIL.
|
||||
#
|
||||
# Cases:
|
||||
# 1. root-actor bg-threads that call `.pause_from_sync()`
|
||||
# whilst an in-tree subactor also is using ` .pause()`.
|
||||
# |_ since the root-actor bg thread can not
|
||||
# `Lock._debug_lock.acquire_nowait()` without running
|
||||
# a `trio.Task`, AND because the
|
||||
# `PdbREPL.set_continue()` is called from that
|
||||
# bg-thread, we can not `._debug_lock.release()`
|
||||
# either!
|
||||
# |_ this results in no actor-tree `Lock` being used
|
||||
# on behalf of the bg-thread and thus the subactor's
|
||||
# task and the thread trying to to use stdio
|
||||
# simultaneously which results in the classic TTY
|
||||
# clobbering!
|
||||
#
|
||||
# 2. mutiple sync-bg-threads that call
|
||||
# `.pause_from_sync()` where one is scheduled via
|
||||
# `Nursery.start_soon(to_thread.run_sync)` in a bg
|
||||
# task.
|
||||
#
|
||||
# Due to the GIL, the threads never truly try to step
|
||||
# through the REPL simultaneously, BUT their `logging`
|
||||
# and traceback outputs are interleaved since the GIL
|
||||
# (seemingly) on every REPL-input from the user
|
||||
# switches threads..
|
||||
#
|
||||
# Soo, the context switching semantics of the GIL
|
||||
# result in a very confusing and messy interaction UX
|
||||
# since eval and (tb) print output is NOT synced to
|
||||
# each REPL-cycle (like we normally make it via
|
||||
# a `.set_continue()` callback triggering the
|
||||
# `Lock.release()`). Ideally we can solve this
|
||||
# usability issue NOW because this will of course be
|
||||
# that much more important when eventually there is no
|
||||
# GIL!
|
||||
|
||||
# XXX should cause double REPL entry and thus TTY
|
||||
# clobbering due to case 1. above!
|
||||
tn.start_soon(
|
||||
partial(
|
||||
trio.to_thread.run_sync,
|
||||
partial(
|
||||
sync_pause,
|
||||
use_builtin=False,
|
||||
# pre_sleep=0.5,
|
||||
),
|
||||
abandon_on_cancel=True,
|
||||
thread_name='start_soon_root_bg_thread',
|
||||
)
|
||||
)
|
||||
|
||||
await tractor.pause()
|
||||
|
||||
# XXX should cause double REPL entry and thus TTY
|
||||
# clobbering due to case 2. above!
|
||||
await trio.to_thread.run_sync(
|
||||
partial(
|
||||
sync_pause,
|
||||
# NOTE this already works fine since in the new
|
||||
# thread the `breakpoint()` built-in is never
|
||||
# overloaded, thus NO locking is used, HOWEVER
|
||||
# the case 2. from above still exists!
|
||||
use_builtin=True,
|
||||
),
|
||||
# TODO: with this `False` we can hang!??!
|
||||
# abandon_on_cancel=False,
|
||||
abandon_on_cancel=True,
|
||||
thread_name='inline_root_bg_thread',
|
||||
)
|
||||
|
||||
await ctx.cancel()
|
||||
|
||||
# TODO: case where we cancel from trio-side while asyncio task
|
||||
# has debugger lock?
|
||||
await p.cancel_actor()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -1,34 +1,25 @@
|
|||
import time
|
||||
import trio
|
||||
import tractor
|
||||
from tractor import (
|
||||
ActorNursery,
|
||||
MsgStream,
|
||||
Portal,
|
||||
)
|
||||
|
||||
|
||||
# this is the first 2 actors, streamer_1 and streamer_2
|
||||
async def stream_data(seed):
|
||||
for i in range(seed):
|
||||
yield i
|
||||
await trio.sleep(0.0001) # trigger scheduler
|
||||
await trio.sleep(0) # trigger scheduler
|
||||
|
||||
|
||||
# this is the third actor; the aggregator
|
||||
async def aggregate(seed):
|
||||
'''
|
||||
Ensure that the two streams we receive match but only stream
|
||||
"""Ensure that the two streams we receive match but only stream
|
||||
a single set of values to the parent.
|
||||
|
||||
'''
|
||||
an: ActorNursery
|
||||
async with tractor.open_nursery() as an:
|
||||
portals: list[Portal] = []
|
||||
"""
|
||||
async with tractor.open_nursery() as nursery:
|
||||
portals = []
|
||||
for i in range(1, 3):
|
||||
|
||||
# fork/spawn call
|
||||
portal = await an.start_actor(
|
||||
# fork point
|
||||
portal = await nursery.start_actor(
|
||||
name=f'streamer_{i}',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
@ -52,11 +43,7 @@ async def aggregate(seed):
|
|||
async with trio.open_nursery() as n:
|
||||
|
||||
for portal in portals:
|
||||
n.start_soon(
|
||||
push_to_chan,
|
||||
portal,
|
||||
send_chan.clone(),
|
||||
)
|
||||
n.start_soon(push_to_chan, portal, send_chan.clone())
|
||||
|
||||
# close this local task's reference to send side
|
||||
await send_chan.aclose()
|
||||
|
@ -73,36 +60,26 @@ async def aggregate(seed):
|
|||
|
||||
print("FINISHED ITERATING in aggregator")
|
||||
|
||||
await an.cancel()
|
||||
await nursery.cancel()
|
||||
print("WAITING on `ActorNursery` to finish")
|
||||
print("AGGREGATOR COMPLETE!")
|
||||
|
||||
|
||||
async def main() -> list[int]:
|
||||
'''
|
||||
This is the "root" actor's main task's entrypoint.
|
||||
|
||||
By default (and if not otherwise specified) that root process
|
||||
also acts as a "registry actor" / "registrar" on the localhost
|
||||
for the purposes of multi-actor "service discovery".
|
||||
|
||||
'''
|
||||
# yes, a nursery which spawns `trio`-"actors" B)
|
||||
an: ActorNursery
|
||||
# this is the main actor and *arbiter*
|
||||
async def main():
|
||||
# a nursery which spawns "actors"
|
||||
async with tractor.open_nursery(
|
||||
loglevel='cancel',
|
||||
# debug_mode=True,
|
||||
) as an:
|
||||
arbiter_addr=('127.0.0.1', 1616)
|
||||
) as nursery:
|
||||
|
||||
seed = int(1e3)
|
||||
pre_start = time.time()
|
||||
|
||||
portal: Portal = await an.start_actor(
|
||||
portal = await nursery.start_actor(
|
||||
name='aggregator',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
stream: MsgStream
|
||||
async with portal.open_stream_from(
|
||||
aggregate,
|
||||
seed=seed,
|
||||
|
@ -111,12 +88,11 @@ async def main() -> list[int]:
|
|||
start = time.time()
|
||||
# the portal call returns exactly what you'd expect
|
||||
# as if the remote "aggregate" function was called locally
|
||||
result_stream: list[int] = []
|
||||
result_stream = []
|
||||
async for value in stream:
|
||||
result_stream.append(value)
|
||||
|
||||
cancelled: bool = await portal.cancel_actor()
|
||||
assert cancelled
|
||||
await portal.cancel_actor()
|
||||
|
||||
print(f"STREAM TIME = {time.time() - start}")
|
||||
print(f"STREAM + SPAWN TIME = {time.time() - pre_start}")
|
||||
|
|
|
@ -1,92 +0,0 @@
|
|||
'''
|
||||
An SC compliant infected ``asyncio`` echo server.
|
||||
|
||||
'''
|
||||
import asyncio
|
||||
from statistics import mean
|
||||
import time
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
async def aio_echo_server(
|
||||
to_trio: trio.MemorySendChannel,
|
||||
from_trio: asyncio.Queue,
|
||||
|
||||
) -> None:
|
||||
|
||||
# a first message must be sent **from** this ``asyncio``
|
||||
# task or the ``trio`` side will never unblock from
|
||||
# ``tractor.to_asyncio.open_channel_from():``
|
||||
to_trio.send_nowait('start')
|
||||
|
||||
# XXX: this uses an ``from_trio: asyncio.Queue`` currently but we
|
||||
# should probably offer something better.
|
||||
while True:
|
||||
# echo the msg back
|
||||
to_trio.send_nowait(await from_trio.get())
|
||||
await asyncio.sleep(0)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def trio_to_aio_echo_server(
|
||||
ctx: tractor.Context,
|
||||
):
|
||||
# this will block until the ``asyncio`` task sends a "first"
|
||||
# message.
|
||||
async with tractor.to_asyncio.open_channel_from(
|
||||
aio_echo_server,
|
||||
) as (first, chan):
|
||||
|
||||
assert first == 'start'
|
||||
await ctx.started(first)
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
async for msg in stream:
|
||||
await chan.send(msg)
|
||||
|
||||
out = await chan.receive()
|
||||
# echo back to parent actor-task
|
||||
await stream.send(out)
|
||||
|
||||
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
p = await n.start_actor(
|
||||
'aio_server',
|
||||
enable_modules=[__name__],
|
||||
infect_asyncio=True,
|
||||
)
|
||||
async with p.open_context(
|
||||
trio_to_aio_echo_server,
|
||||
) as (ctx, first):
|
||||
|
||||
assert first == 'start'
|
||||
|
||||
count = 0
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
delays = []
|
||||
send = time.time()
|
||||
|
||||
await stream.send(count)
|
||||
async for msg in stream:
|
||||
recv = time.time()
|
||||
delays.append(recv - send)
|
||||
assert msg == count
|
||||
count += 1
|
||||
send = time.time()
|
||||
await stream.send(count)
|
||||
|
||||
if count >= 1e3:
|
||||
break
|
||||
|
||||
print(f'mean round trip rate (Hz): {1/mean(delays)}')
|
||||
await p.cancel_actor()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -1,49 +0,0 @@
|
|||
import trio
|
||||
import click
|
||||
import tractor
|
||||
import pydantic
|
||||
# from multiprocessing import shared_memory
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def just_sleep(
|
||||
|
||||
ctx: tractor.Context,
|
||||
**kwargs,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Test a small ping-pong 2-way streaming server.
|
||||
|
||||
'''
|
||||
await ctx.started()
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
|
||||
proc = await trio.open_process( (
|
||||
'python',
|
||||
'-c',
|
||||
'import trio; trio.run(trio.sleep_forever)',
|
||||
))
|
||||
await proc.wait()
|
||||
# await trio.sleep_forever()
|
||||
# async with tractor.open_nursery() as n:
|
||||
|
||||
# portal = await n.start_actor(
|
||||
# 'rpc_server',
|
||||
# enable_modules=[__name__],
|
||||
# )
|
||||
|
||||
# async with portal.open_context(
|
||||
# just_sleep, # taken from pytest parameterization
|
||||
# ) as (ctx, sent):
|
||||
# await trio.sleep_forever()
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import time
|
||||
# time.sleep(999)
|
||||
trio.run(main)
|
|
@ -8,17 +8,15 @@ This uses no extra threads, fancy semaphores or futures; all we need
|
|||
is ``tractor``'s channels.
|
||||
|
||||
"""
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
aclosing,
|
||||
)
|
||||
from typing import Callable
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import List, Callable
|
||||
import itertools
|
||||
import math
|
||||
import time
|
||||
|
||||
import tractor
|
||||
import trio
|
||||
from async_generator import aclosing
|
||||
|
||||
|
||||
PRIMES = [
|
||||
|
@ -46,7 +44,7 @@ async def is_prime(n):
|
|||
return True
|
||||
|
||||
|
||||
@acm
|
||||
@asynccontextmanager
|
||||
async def worker_pool(workers=4):
|
||||
"""Though it's a trivial special case for ``tractor``, the well
|
||||
known "worker pool" seems to be the defacto "but, I want this
|
||||
|
@ -73,8 +71,8 @@ async def worker_pool(workers=4):
|
|||
|
||||
async def _map(
|
||||
worker_func: Callable[[int], bool],
|
||||
sequence: list[int]
|
||||
) -> list[bool]:
|
||||
sequence: List[int]
|
||||
) -> List[bool]:
|
||||
|
||||
# define an async (local) task to collect results from workers
|
||||
async def send_result(func, value, portal):
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
async def sleepy_jane() -> None:
|
||||
uid: tuple = tractor.current_actor().uid
|
||||
print(f'Yo i am actor {uid}')
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
async def main():
|
||||
'''
|
||||
Spawn a flat actor cluster, with one process per detected core.
|
||||
|
||||
'''
|
||||
portal_map: dict[str, tractor.Portal]
|
||||
|
||||
# look at this hip new syntax!
|
||||
async with (
|
||||
|
||||
tractor.open_actor_cluster(
|
||||
modules=[__name__]
|
||||
) as portal_map,
|
||||
|
||||
trio.open_nursery(
|
||||
strict_exception_groups=False,
|
||||
) as tn,
|
||||
):
|
||||
|
||||
for (name, portal) in portal_map.items():
|
||||
tn.start_soon(
|
||||
portal.run,
|
||||
sleepy_jane,
|
||||
)
|
||||
|
||||
await trio.sleep(0.5)
|
||||
|
||||
# kill the cluster with a cancel
|
||||
raise KeyboardInterrupt
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
trio.run(main)
|
||||
except KeyboardInterrupt:
|
||||
print('trio cancelled by KBI')
|
|
@ -1,72 +0,0 @@
|
|||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def simple_rpc(
|
||||
|
||||
ctx: tractor.Context,
|
||||
data: int,
|
||||
|
||||
) -> None:
|
||||
'''Test a small ping-pong 2-way streaming server.
|
||||
|
||||
'''
|
||||
# signal to parent that we're up much like
|
||||
# ``trio.TaskStatus.started()``
|
||||
await ctx.started(data + 1)
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
count = 0
|
||||
async for msg in stream:
|
||||
|
||||
assert msg == 'ping'
|
||||
await stream.send('pong')
|
||||
count += 1
|
||||
|
||||
else:
|
||||
assert count == 10
|
||||
|
||||
|
||||
async def main() -> None:
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
'rpc_server',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
# XXX: syntax requires py3.9
|
||||
async with (
|
||||
|
||||
portal.open_context(
|
||||
simple_rpc, # taken from pytest parameterization
|
||||
data=10,
|
||||
|
||||
) as (ctx, sent),
|
||||
|
||||
ctx.open_stream() as stream,
|
||||
):
|
||||
|
||||
assert sent == 11
|
||||
|
||||
count = 0
|
||||
# receive msgs using async for style
|
||||
await stream.send('ping')
|
||||
|
||||
async for msg in stream:
|
||||
assert msg == 'pong'
|
||||
await stream.send('ping')
|
||||
count += 1
|
||||
|
||||
if count >= 9:
|
||||
break
|
||||
|
||||
# explicitly teardown the daemon-actor
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
trio.run(main)
|
|
@ -9,7 +9,7 @@ async def main(service_name):
|
|||
async with tractor.open_nursery() as an:
|
||||
await an.start_actor(service_name)
|
||||
|
||||
async with tractor.get_registry('127.0.0.1', 1616) as portal:
|
||||
async with tractor.get_arbiter('127.0.0.1', 1616) as portal:
|
||||
print(f"Arbiter is listening on {portal.channel}")
|
||||
|
||||
async with tractor.wait_for_actor(service_name) as sockaddr:
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
!.gitignore
|
|
@ -1,16 +0,0 @@
|
|||
Strictly support Python 3.10+, start runtime machinery reorg
|
||||
|
||||
Since we want to push forward using the new `match:` syntax for our
|
||||
internal RPC-msg loops, we officially drop 3.9 support for the next
|
||||
release which should coincide well with the first release of 3.11.
|
||||
|
||||
This patch set also officially removes the ``tractor.run()`` API (which
|
||||
has been deprecated for some time) as well as starts an initial re-org
|
||||
of the internal runtime core by:
|
||||
- renaming ``tractor._actor`` -> ``._runtime``
|
||||
- moving the ``._runtime.ActorActor._process_messages()`` and
|
||||
``._async_main()`` to be module level singleton-task-functions since
|
||||
they are only started once for each connection and actor spawn
|
||||
respectively; this internal API thus looks more similar to (at the
|
||||
time of writing) the ``trio``-internals in ``trio._core._run``.
|
||||
- officially remove ``tractor.run()``, now deprecated for some time.
|
|
@ -1,4 +0,0 @@
|
|||
Only set `._debug.Lock.local_pdb_complete` if has been created.
|
||||
|
||||
This can be triggered by a very rare race condition (and thus we have no
|
||||
working test yet) but it is known to exist in (a) consumer project(s).
|
|
@ -1,25 +0,0 @@
|
|||
Add support for ``trio >= 0.22`` and support for the new Python 3.11
|
||||
``[Base]ExceptionGroup`` from `pep 654`_ via the backported
|
||||
`exceptiongroup`_ package and some final fixes to the debug mode
|
||||
subsystem.
|
||||
|
||||
This port ended up driving some (hopefully) final fixes to our debugger
|
||||
subsystem including the solution to all lingering stdstreams locking
|
||||
race-conditions and deadlock scenarios. This includes extending the
|
||||
debugger tests suite as well as cancellation and ``asyncio`` mode cases.
|
||||
Some of the notable details:
|
||||
|
||||
- always reverting to the ``trio`` SIGINT handler when leaving debug
|
||||
mode.
|
||||
- bypassing child attempts to acquire the debug lock when detected
|
||||
to be amdist actor-runtime-cancellation.
|
||||
- allowing the root actor to cancel local but IPC-stale subactor
|
||||
requests-tasks for the debug lock when in a "no IPC peers" state.
|
||||
|
||||
Further we refined our ``ActorNursery`` semantics to be more similar to
|
||||
``trio`` in the sense that parent task errors are always packed into the
|
||||
actor-nursery emitted exception group and adjusted all tests and
|
||||
examples accordingly.
|
||||
|
||||
.. _pep 654: https://peps.python.org/pep-0654/#handling-exception-groups
|
||||
.. _exceptiongroup: https://github.com/python-trio/exceptiongroup
|
|
@ -1,5 +0,0 @@
|
|||
Establish an explicit "backend spawning" method table; use it from CI
|
||||
|
||||
More clearly lays out the current set of (3) backends: ``['trio',
|
||||
'mp_spawn', 'mp_forkserver']`` and adjusts the ``._spawn.py`` internals
|
||||
as well as the test suite to accommodate.
|
|
@ -1,4 +0,0 @@
|
|||
Add ``key: Callable[..., Hashable]`` support to ``.trionics.maybe_open_context()``
|
||||
|
||||
Gives users finer grained control over cache hit behaviour using
|
||||
a callable which receives the input ``kwargs: dict``.
|
|
@ -1,41 +0,0 @@
|
|||
Add support for debug-lock blocking using a ``._debug.Lock._blocked:
|
||||
set[tuple]`` and add ids when no-more IPC connections with the
|
||||
root actor are detected.
|
||||
|
||||
This is an enhancement which (mostly) solves a lingering debugger
|
||||
locking race case we needed to handle:
|
||||
|
||||
- child crashes acquires TTY lock in root and attaches to ``pdb``
|
||||
- child IPC goes down such that all channels to the root are broken
|
||||
/ non-functional.
|
||||
- root is stuck thinking the child is still in debug even though it
|
||||
can't be contacted and the child actor machinery hasn't been
|
||||
cancelled by its parent.
|
||||
- root get's stuck in deadlock with child since it won't send a cancel
|
||||
request until the child is finished debugging (to avoid clobbering
|
||||
a child that is actually using the debugger), but the child can't
|
||||
unlock the debugger bc IPC is down and it can't contact the root.
|
||||
|
||||
To avoid this scenario add debug lock blocking list via
|
||||
`._debug.Lock._blocked: set[tuple]` which holds actor uids for any actor
|
||||
that is detected by the root as having no transport channel connections
|
||||
(of which at least one should exist if this sub-actor at some point
|
||||
acquired the debug lock). The root consequently checks this list for any
|
||||
actor that tries to (re)acquire the lock and blocks with
|
||||
a ``ContextCancelled``. Further, when a debug condition is tested in
|
||||
``._runtime._invoke``, the context's ``._enter_debugger_on_cancel`` is
|
||||
set to `False` if the actor was put on the block list then all
|
||||
post-mortem / crash handling will be bypassed for that task.
|
||||
|
||||
In theory this approach to block list management may cause problems
|
||||
where some nested child actor acquires and releases the lock multiple
|
||||
times and it gets stuck on the block list after the first use? If this
|
||||
turns out to be an issue we can try changing the strat so blocks are
|
||||
only added when the root has zero IPC peers left?
|
||||
|
||||
Further, this adds a root-locking-task side cancel scope,
|
||||
``Lock._root_local_task_cs_in_debug``, which can be ``.cancel()``-ed by the root
|
||||
runtime when a stale lock is detected during the IPC channel testing.
|
||||
However, right now we're NOT using this since it seems to cause test
|
||||
failures likely due to causing pre-mature cancellation and maybe needs
|
||||
a bit more experimenting?
|
|
@ -1,19 +0,0 @@
|
|||
Rework our ``.trionics.BroadcastReceiver`` internals to avoid method
|
||||
recursion and approach a design and interface closer to ``trio``'s
|
||||
``MemoryReceiveChannel``.
|
||||
|
||||
The details of the internal changes include:
|
||||
|
||||
- implementing a ``BroadcastReceiver.receive_nowait()`` and using it
|
||||
within the async ``.receive()`` thus avoiding recursion from
|
||||
``.receive()``.
|
||||
- failing over to an internal ``._receive_from_underlying()`` when the
|
||||
``_nowait()`` call raises ``trio.WouldBlock``
|
||||
- adding ``BroadcastState.statistics()`` for debugging and testing both
|
||||
internals and by users.
|
||||
- add an internal ``BroadcastReceiver._raise_on_lag: bool`` which can be
|
||||
set to avoid ``Lagged`` raising for possible use cases where a user
|
||||
wants to choose between a [cheap or nasty
|
||||
pattern](https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern)
|
||||
the the particular stream (we use this in ``piker``'s dark clearing
|
||||
engine to avoid fast feeds breaking during HFT periods).
|
|
@ -1,11 +0,0 @@
|
|||
Always ``list``-cast the ``mngrs`` input to
|
||||
``.trionics.gather_contexts()`` and ensure its size otherwise raise
|
||||
a ``ValueError``.
|
||||
|
||||
Turns out that trying to pass an inline-style generator comprehension
|
||||
doesn't seem to work inside the ``async with`` expression? Further, in
|
||||
such a case we can get a hang waiting on the all-entered event
|
||||
completion when the internal mngrs iteration is a noop. Instead we
|
||||
always greedily check a size and error on empty input; the lazy
|
||||
iteration of a generator input is not beneficial anyway since we're
|
||||
entering all manager instances in concurrent tasks.
|
|
@ -1,15 +0,0 @@
|
|||
Fixes to ensure IPC (channel) breakage doesn't result in hung actor
|
||||
trees; the zombie reaping and general supervision machinery will always
|
||||
clean up and terminate.
|
||||
|
||||
This includes not only the (mostly minor) fixes to solve these cases but
|
||||
also a new extensive test suite in `test_advanced_faults.py` with an
|
||||
accompanying highly configurable example module-script in
|
||||
`examples/advanced_faults/ipc_failure_during_stream.py`. Tests ensure we
|
||||
never get hang or zombies despite operating in debug mode and attempt to
|
||||
simulate all possible IPC transport failure cases for a local-host actor
|
||||
tree.
|
||||
|
||||
Further we simplify `Context.open_stream.__aexit__()` to just call
|
||||
`MsgStream.aclose()` directly more or less avoiding a pure duplicate
|
||||
code path.
|
|
@ -1,10 +0,0 @@
|
|||
Always redraw the `pdbpp` prompt on `SIGINT` during REPL use.
|
||||
|
||||
There was recent changes todo with Python 3.10 that required us to pin
|
||||
to a specific commit in `pdbpp` which have recently been fixed minus
|
||||
this last issue with `SIGINT` shielding: not clobbering or not
|
||||
showing the `(Pdb++)` prompt on ctlr-c by the user. This repairs all
|
||||
that by firstly removing the standard KBI intercepting of the std lib's
|
||||
`pdb.Pdb._cmdloop()` as well as ensuring that only the actor with REPL
|
||||
control ever reports `SIGINT` handler log msgs and prompt redraws. With
|
||||
this we move back to using pypi `pdbpp` release.
|
|
@ -1,7 +0,0 @@
|
|||
Drop `trio.Process.aclose()` usage, copy into our spawning code.
|
||||
|
||||
The details are laid out in https://github.com/goodboy/tractor/issues/330.
|
||||
`trio` changed is process running quite some time ago, this just copies
|
||||
out the small bit we needed (from the old `.aclose()`) for hard kills
|
||||
where a soft runtime cancel request fails and our "zombie killer"
|
||||
implementation kicks in.
|
|
@ -1,15 +0,0 @@
|
|||
Switch to using the fork & fix of `pdb++`, `pdbp`:
|
||||
https://github.com/mdmintz/pdbp
|
||||
|
||||
Allows us to sidestep a variety of issues that aren't being maintained
|
||||
in the upstream project thanks to the hard work of @mdmintz!
|
||||
|
||||
We also include some default settings adjustments as per recent
|
||||
development on the fork:
|
||||
|
||||
- sticky mode is still turned on by default but now activates when
|
||||
a using the `ll` repl command.
|
||||
- turn off line truncation by default to avoid inter-line gaps when
|
||||
resizing the terimnal during use.
|
||||
- when using the backtrace cmd either by `w` or `bt`, the config
|
||||
automatically switches to non-sticky mode.
|
|
@ -1,8 +0,0 @@
|
|||
See both the `towncrier docs`_ and the `pluggy release readme`_ for hot
|
||||
tips. We basically have the most minimal setup and release process right
|
||||
now and use the default `fragment set`_.
|
||||
|
||||
|
||||
.. _towncrier docs: https://github.com/twisted/towncrier#quick-start
|
||||
.. _pluggy release readme: https://github.com/pytest-dev/pluggy/blob/main/changelog/README.rst
|
||||
.. _fragment set: https://github.com/twisted/towncrier#news-fragments
|
|
@ -1,37 +0,0 @@
|
|||
{% for section in sections %}
|
||||
{% set underline = "-" %}
|
||||
{% if section %}
|
||||
{{section}}
|
||||
{{ underline * section|length }}{% set underline = "~" %}
|
||||
|
||||
{% endif %}
|
||||
{% if sections[section] %}
|
||||
{% for category, val in definitions.items() if category in sections[section] %}
|
||||
|
||||
{{ definitions[category]['name'] }}
|
||||
{{ underline * definitions[category]['name']|length }}
|
||||
|
||||
{% if definitions[category]['showcontent'] %}
|
||||
{% for text, values in sections[section][category]|dictsort(by='value') %}
|
||||
{% set issue_joiner = joiner(', ') %}
|
||||
- {% for value in values|sort %}{{ issue_joiner() }}`{{ value }} <https://github.com/goodboy/tractor/issues/{{ value[1:] }}>`_{% endfor %}: {{ text }}
|
||||
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
- {{ sections[section][category]['']|sort|join(', ') }}
|
||||
|
||||
|
||||
{% endif %}
|
||||
{% if sections[section][category]|length == 0 %}
|
||||
|
||||
No significant changes.
|
||||
|
||||
{% else %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
|
||||
No significant changes.
|
||||
|
||||
{% endif %}
|
||||
{% endfor %}
|
|
@ -1,18 +0,0 @@
|
|||
First generate a built disti:
|
||||
|
||||
```
|
||||
python -m pip install --upgrade build
|
||||
python -m build --sdist --outdir dist/alpha5/
|
||||
```
|
||||
|
||||
Then try a test ``pypi`` upload:
|
||||
|
||||
```
|
||||
python -m twine upload --repository testpypi dist/alpha5/*
|
||||
```
|
||||
|
||||
The push to `pypi` for realz.
|
||||
|
||||
```
|
||||
python -m twine upload --repository testpypi dist/alpha5/*
|
||||
```
|
158
pyproject.toml
158
pyproject.toml
|
@ -1,158 +0,0 @@
|
|||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
# ------ build-system ------
|
||||
|
||||
[project]
|
||||
name = "tractor"
|
||||
version = "0.1.0a6dev0"
|
||||
description = 'structured concurrent `trio`-"actors"'
|
||||
authors = [{ name = "Tyler Goodlet", email = "goodboy_foss@protonmail.com" }]
|
||||
requires-python = ">= 3.11"
|
||||
readme = "docs/README.rst"
|
||||
license = "AGPL-3.0-or-later"
|
||||
keywords = [
|
||||
"trio",
|
||||
"async",
|
||||
"concurrency",
|
||||
"structured concurrency",
|
||||
"actor model",
|
||||
"distributed",
|
||||
"multiprocessing",
|
||||
]
|
||||
classifiers = [
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Operating System :: POSIX :: Linux",
|
||||
"Framework :: Trio",
|
||||
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
|
||||
"Programming Language :: Python :: Implementation :: CPython",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Topic :: System :: Distributed Computing",
|
||||
]
|
||||
dependencies = [
|
||||
# trio runtime and friends
|
||||
# (poetry) proper range specs,
|
||||
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5
|
||||
# TODO, for 3.13 we must go go `0.27` which means we have to
|
||||
# disable strict egs or port to handling them internally!
|
||||
"trio>0.27",
|
||||
"tricycle>=0.4.1,<0.5",
|
||||
"wrapt>=1.16.0,<2",
|
||||
"colorlog>=6.8.2,<7",
|
||||
# built-in multi-actor `pdb` REPL
|
||||
"pdbp>=1.6,<2", # windows only (from `pdbp`)
|
||||
# typed IPC msging
|
||||
"msgspec>=0.19.0",
|
||||
]
|
||||
|
||||
# ------ project ------
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
# test suite
|
||||
# TODO: maybe some of these layout choices?
|
||||
# https://docs.pytest.org/en/8.0.x/explanation/goodpractices.html#choosing-a-test-layout-import-rules
|
||||
"pytest>=8.3.5",
|
||||
"pexpect>=4.9.0,<5",
|
||||
# `tractor.devx` tooling
|
||||
"greenback>=1.2.1,<2",
|
||||
"stackscope>=0.2.2,<0.3",
|
||||
"pyperclip>=1.9.0",
|
||||
"prompt-toolkit>=3.0.50",
|
||||
"xonsh>=0.19.2",
|
||||
]
|
||||
# TODO, add these with sane versions; were originally in
|
||||
# `requirements-docs.txt`..
|
||||
# docs = [
|
||||
# "sphinx>="
|
||||
# "sphinx_book_theme>="
|
||||
# ]
|
||||
|
||||
# ------ dependency-groups ------
|
||||
|
||||
# ------ dependency-groups ------
|
||||
|
||||
[tool.uv.sources]
|
||||
# XXX NOTE, only for @goodboy's hacking on `pprint(sort_dicts=False)`
|
||||
# for the `pp` alias..
|
||||
# pdbp = { path = "../pdbp", editable = true }
|
||||
|
||||
# ------ tool.uv.sources ------
|
||||
# TODO, distributed (multi-host) extensions
|
||||
# linux kernel networking
|
||||
# 'pyroute2
|
||||
|
||||
# ------ tool.uv.sources ------
|
||||
|
||||
[tool.uv]
|
||||
# XXX NOTE, prefer the sys python bc apparently the distis from
|
||||
# `astral` are built in a way that breaks `pdbp`+`tabcompleter`'s
|
||||
# likely due to linking against `libedit` over `readline`..
|
||||
# |_https://docs.astral.sh/uv/concepts/python-versions/#managed-python-distributions
|
||||
# |_https://gregoryszorc.com/docs/python-build-standalone/main/quirks.html#use-of-libedit-on-linux
|
||||
#
|
||||
# https://docs.astral.sh/uv/reference/settings/#python-preference
|
||||
python-preference = 'system'
|
||||
|
||||
# ------ tool.uv ------
|
||||
|
||||
[tool.hatch.build.targets.sdist]
|
||||
include = ["tractor"]
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
include = ["tractor"]
|
||||
|
||||
# ------ tool.hatch ------
|
||||
|
||||
[tool.towncrier]
|
||||
package = "tractor"
|
||||
filename = "NEWS.rst"
|
||||
directory = "nooz/"
|
||||
version = "0.1.0a6"
|
||||
title_format = "tractor {version} ({project_date})"
|
||||
template = "nooz/_template.rst"
|
||||
all_bullets = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "feature"
|
||||
name = "Features"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "bugfix"
|
||||
name = "Bug Fixes"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "doc"
|
||||
name = "Improved Documentation"
|
||||
showcontent = true
|
||||
|
||||
[[tool.towncrier.type]]
|
||||
directory = "trivial"
|
||||
name = "Trivial/Internal Changes"
|
||||
showcontent = true
|
||||
|
||||
# ------ tool.towncrier ------
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
minversion = '6.0'
|
||||
testpaths = [
|
||||
'tests'
|
||||
]
|
||||
addopts = [
|
||||
# TODO: figure out why this isn't working..
|
||||
'--rootdir=./tests',
|
||||
|
||||
'--import-mode=importlib',
|
||||
# don't show frickin captured logs AGAIN in the report..
|
||||
'--show-capture=no',
|
||||
]
|
||||
log_cli = false
|
||||
# TODO: maybe some of these layout choices?
|
||||
# https://docs.pytest.org/en/8.0.x/explanation/goodpractices.html#choosing-a-test-layout-import-rules
|
||||
# pythonpath = "src"
|
||||
|
||||
# ------ tool.pytest ------
|
|
@ -1,8 +0,0 @@
|
|||
# vim: ft=ini
|
||||
# pytest.ini for tractor
|
||||
|
||||
[pytest]
|
||||
# don't show frickin captured logs AGAIN in the report..
|
||||
addopts = --show-capture='no'
|
||||
log_cli = false
|
||||
; minversion = 6.0
|
|
@ -0,0 +1,2 @@
|
|||
sphinx
|
||||
sphinx_book_theme
|
|
@ -0,0 +1,6 @@
|
|||
pytest
|
||||
pytest-trio
|
||||
pdbpp
|
||||
mypy
|
||||
trio_typing
|
||||
pexpect
|
82
ruff.toml
82
ruff.toml
|
@ -1,82 +0,0 @@
|
|||
# from default `ruff.toml` @
|
||||
# https://docs.astral.sh/ruff/configuration/
|
||||
|
||||
# Exclude a variety of commonly ignored directories.
|
||||
exclude = [
|
||||
".bzr",
|
||||
".direnv",
|
||||
".eggs",
|
||||
".git",
|
||||
".git-rewrite",
|
||||
".hg",
|
||||
".ipynb_checkpoints",
|
||||
".mypy_cache",
|
||||
".nox",
|
||||
".pants.d",
|
||||
".pyenv",
|
||||
".pytest_cache",
|
||||
".pytype",
|
||||
".ruff_cache",
|
||||
".svn",
|
||||
".tox",
|
||||
".venv",
|
||||
".vscode",
|
||||
"__pypackages__",
|
||||
"_build",
|
||||
"buck-out",
|
||||
"build",
|
||||
"dist",
|
||||
"node_modules",
|
||||
"site-packages",
|
||||
"venv",
|
||||
]
|
||||
|
||||
# Same as Black.
|
||||
line-length = 88
|
||||
indent-width = 4
|
||||
|
||||
# Assume Python 3.9
|
||||
target-version = "py311"
|
||||
|
||||
[lint]
|
||||
# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default.
|
||||
# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or
|
||||
# McCabe complexity (`C901`) by default.
|
||||
select = ["E4", "E7", "E9", "F"]
|
||||
ignore = [
|
||||
'E402', # https://docs.astral.sh/ruff/rules/module-import-not-at-top-of-file/
|
||||
]
|
||||
|
||||
# Allow fix for all enabled rules (when `--fix`) is provided.
|
||||
fixable = ["ALL"]
|
||||
unfixable = []
|
||||
|
||||
# Allow unused variables when underscore-prefixed.
|
||||
# dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
|
||||
|
||||
[format]
|
||||
# Use single quotes in `ruff format`.
|
||||
quote-style = "single"
|
||||
|
||||
# Like Black, indent with spaces, rather than tabs.
|
||||
indent-style = "space"
|
||||
|
||||
# Like Black, respect magic trailing commas.
|
||||
skip-magic-trailing-comma = false
|
||||
|
||||
# Like Black, automatically detect the appropriate line ending.
|
||||
line-ending = "auto"
|
||||
|
||||
# Enable auto-formatting of code examples in docstrings. Markdown,
|
||||
# reStructuredText code/literal blocks and doctests are all supported.
|
||||
#
|
||||
# This is currently disabled by default, but it is planned for this
|
||||
# to be opt-out in the future.
|
||||
docstring-code-format = false
|
||||
|
||||
# Set the line length limit used when formatting code snippets in
|
||||
# docstrings.
|
||||
#
|
||||
# This only has an effect when the `docstring-code-format` setting is
|
||||
# enabled.
|
||||
docstring-code-line-length = "dynamic"
|
|
@ -0,0 +1,81 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# tractor: a trionic actor model built on `multiprocessing` and `trio`
|
||||
#
|
||||
# Copyright (C) 2018-2020 Tyler Goodlet
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
from setuptools import setup
|
||||
|
||||
with open('docs/README.rst', encoding='utf-8') as f:
|
||||
readme = f.read()
|
||||
|
||||
|
||||
setup(
|
||||
name="tractor",
|
||||
version='0.1.0a1', # first ever alpha
|
||||
description='structured concurrrent "actors"',
|
||||
long_description=readme,
|
||||
license='GPLv3',
|
||||
author='Tyler Goodlet',
|
||||
maintainer='Tyler Goodlet',
|
||||
maintainer_email='jgbt@protonmail.com',
|
||||
url='https://github.com/goodboy/tractor',
|
||||
platforms=['linux', 'windows'],
|
||||
packages=[
|
||||
'tractor',
|
||||
'tractor.testing',
|
||||
],
|
||||
install_requires=[
|
||||
|
||||
# trio related
|
||||
'trio>0.8',
|
||||
'async_generator',
|
||||
'trio_typing',
|
||||
|
||||
# tooling
|
||||
'colorlog',
|
||||
'wrapt',
|
||||
'pdbpp',
|
||||
|
||||
# serialization
|
||||
'msgpack',
|
||||
|
||||
],
|
||||
tests_require=['pytest'],
|
||||
python_requires=">=3.7",
|
||||
keywords=[
|
||||
'trio',
|
||||
"async",
|
||||
"concurrency",
|
||||
"actor model",
|
||||
"distributed",
|
||||
'multiprocessing'
|
||||
],
|
||||
classifiers=[
|
||||
"Development Status :: 3 - Alpha",
|
||||
"Operating System :: POSIX :: Linux",
|
||||
"Operating System :: Microsoft :: Windows",
|
||||
"Framework :: Trio",
|
||||
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
|
||||
"Programming Language :: Python :: Implementation :: CPython",
|
||||
"Programming Language :: Python :: 3 :: Only",
|
||||
"Programming Language :: Python :: 3.7",
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Intended Audience :: Science/Research",
|
||||
"Intended Audience :: Developers",
|
||||
"Topic :: System :: Distributed Computing",
|
||||
],
|
||||
)
|
|
@ -11,14 +11,14 @@ import time
|
|||
|
||||
import pytest
|
||||
import tractor
|
||||
from tractor._testing import (
|
||||
examples_dir as examples_dir,
|
||||
tractor_test as tractor_test,
|
||||
expect_ctxc as expect_ctxc,
|
||||
)
|
||||
|
||||
# TODO: include wtv plugin(s) we build in `._testing.pytest`?
|
||||
# export for tests
|
||||
from tractor.testing import tractor_test # noqa
|
||||
|
||||
|
||||
pytest_plugins = ['pytester']
|
||||
_arb_addr = '127.0.0.1', random.randint(1000, 9999)
|
||||
|
||||
|
||||
# Sending signal.SIGINT on subprocess fails on windows. Use CTRL_* alternatives
|
||||
if platform.system() == 'Windows':
|
||||
|
@ -39,136 +39,83 @@ no_windows = pytest.mark.skipif(
|
|||
)
|
||||
|
||||
|
||||
def repodir():
|
||||
"""Return the abspath to the repo directory.
|
||||
"""
|
||||
dirname = os.path.dirname
|
||||
dirpath = os.path.abspath(
|
||||
dirname(dirname(os.path.realpath(__file__)))
|
||||
)
|
||||
return dirpath
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--ll",
|
||||
action="store",
|
||||
dest='loglevel',
|
||||
default='ERROR', help="logging level to set when testing"
|
||||
"--ll", action="store", dest='loglevel',
|
||||
default=None, help="logging level to set when testing"
|
||||
)
|
||||
|
||||
parser.addoption(
|
||||
"--spawn-backend",
|
||||
action="store",
|
||||
dest='spawn_backend',
|
||||
"--spawn-backend", action="store", dest='spawn_backend',
|
||||
default='trio',
|
||||
help="Processing spawning backend to use for test run",
|
||||
)
|
||||
|
||||
parser.addoption(
|
||||
"--tpdb", "--debug-mode",
|
||||
action="store_true",
|
||||
dest='tractor_debug_mode',
|
||||
# default=False,
|
||||
help=(
|
||||
'Enable a flag that can be used by tests to to set the '
|
||||
'`debug_mode: bool` for engaging the internal '
|
||||
'multi-proc debugger sys.'
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
backend = config.option.spawn_backend
|
||||
tractor._spawn.try_set_start_method(backend)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def debug_mode(request):
|
||||
debug_mode: bool = request.config.option.tractor_debug_mode
|
||||
# if debug_mode:
|
||||
# breakpoint()
|
||||
return debug_mode
|
||||
if backend == 'mp':
|
||||
tractor._spawn.try_set_start_method('spawn')
|
||||
elif backend == 'trio':
|
||||
tractor._spawn.try_set_start_method(backend)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
def loglevel(request):
|
||||
orig = tractor.log._default_loglevel
|
||||
level = tractor.log._default_loglevel = request.config.option.loglevel
|
||||
tractor.log.get_console_log(level)
|
||||
yield level
|
||||
tractor.log._default_loglevel = orig
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def spawn_backend(request) -> str:
|
||||
def spawn_backend(request):
|
||||
return request.config.option.spawn_backend
|
||||
|
||||
|
||||
# @pytest.fixture(scope='function', autouse=True)
|
||||
# def debug_enabled(request) -> str:
|
||||
# from tractor import _state
|
||||
# if _state._runtime_vars['_debug_mode']:
|
||||
# breakpoint()
|
||||
|
||||
_ci_env: bool = os.environ.get('CI', False)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def ci_env() -> bool:
|
||||
'''
|
||||
Detect CI envoirment.
|
||||
|
||||
'''
|
||||
return _ci_env
|
||||
|
||||
|
||||
# TODO: also move this to `._testing` for now?
|
||||
# -[ ] possibly generalize and re-use for multi-tree spawning
|
||||
# along with the new stuff for multi-addrs in distribute_dis
|
||||
# branch?
|
||||
#
|
||||
# choose randomly at import time
|
||||
_reg_addr: tuple[str, int] = (
|
||||
'127.0.0.1',
|
||||
random.randint(1000, 9999),
|
||||
)
|
||||
"""Detect CI envoirment.
|
||||
"""
|
||||
return os.environ.get('TRAVIS', False) or os.environ.get('CI', False)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def reg_addr() -> tuple[str, int]:
|
||||
|
||||
# globally override the runtime to the per-test-session-dynamic
|
||||
# addr so that all tests never conflict with any other actor
|
||||
# tree using the default.
|
||||
from tractor import _root
|
||||
_root._default_lo_addrs = [_reg_addr]
|
||||
|
||||
return _reg_addr
|
||||
def arb_addr():
|
||||
return _arb_addr
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
spawn_backend = metafunc.config.option.spawn_backend
|
||||
|
||||
if not spawn_backend:
|
||||
# XXX some weird windows bug with `pytest`?
|
||||
spawn_backend = 'trio'
|
||||
spawn_backend = 'mp'
|
||||
assert spawn_backend in ('mp', 'trio')
|
||||
|
||||
# TODO: maybe just use the literal `._spawn.SpawnMethodKey`?
|
||||
assert spawn_backend in (
|
||||
'mp_spawn',
|
||||
'mp_forkserver',
|
||||
'trio',
|
||||
)
|
||||
|
||||
# NOTE: used to be used to dyanmically parametrize tests for when
|
||||
# you just passed --spawn-backend=`mp` on the cli, but now we expect
|
||||
# that cli input to be manually specified, BUT, maybe we'll do
|
||||
# something like this again in the future?
|
||||
if 'start_method' in metafunc.fixturenames:
|
||||
metafunc.parametrize("start_method", [spawn_backend], scope='module')
|
||||
if spawn_backend == 'mp':
|
||||
from multiprocessing import get_all_start_methods
|
||||
methods = get_all_start_methods()
|
||||
if 'fork' in methods:
|
||||
# fork not available on windows, so check before
|
||||
# removing XXX: the fork method is in general
|
||||
# incompatible with trio's global scheduler state
|
||||
methods.remove('fork')
|
||||
elif spawn_backend == 'trio':
|
||||
methods = ['trio']
|
||||
|
||||
|
||||
# TODO: a way to let test scripts (like from `examples/`)
|
||||
# guarantee they won't registry addr collide!
|
||||
# @pytest.fixture
|
||||
# def open_test_runtime(
|
||||
# reg_addr: tuple,
|
||||
# ) -> AsyncContextManager:
|
||||
# return partial(
|
||||
# tractor.open_nursery,
|
||||
# registry_addrs=[reg_addr],
|
||||
# )
|
||||
metafunc.parametrize("start_method", methods, scope='module')
|
||||
|
||||
|
||||
def sig_prog(proc, sig):
|
||||
|
@ -183,40 +130,28 @@ def sig_prog(proc, sig):
|
|||
assert ret
|
||||
|
||||
|
||||
# TODO: factor into @cm and move to `._testing`?
|
||||
@pytest.fixture
|
||||
def daemon(
|
||||
loglevel: str,
|
||||
testdir,
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
'''
|
||||
Run a daemon root actor as a separate actor-process tree and
|
||||
"remote registrar" for discovery-protocol related tests.
|
||||
|
||||
'''
|
||||
def daemon(loglevel, testdir, arb_addr):
|
||||
"""Run a daemon actor as a "remote arbiter".
|
||||
"""
|
||||
if loglevel in ('trace', 'debug'):
|
||||
# XXX: too much logging will lock up the subproc (smh)
|
||||
loglevel: str = 'info'
|
||||
# too much logging will lock up the subproc (smh)
|
||||
loglevel = 'info'
|
||||
|
||||
code: str = (
|
||||
"import tractor; "
|
||||
"tractor.run_daemon([], registry_addrs={reg_addrs}, loglevel={ll})"
|
||||
).format(
|
||||
reg_addrs=str([reg_addr]),
|
||||
ll="'{}'".format(loglevel) if loglevel else None,
|
||||
)
|
||||
cmd: list[str] = [
|
||||
sys.executable,
|
||||
'-c', code,
|
||||
cmdargs = [
|
||||
sys.executable, '-c',
|
||||
"import tractor; tractor.run_daemon([], arbiter_addr={}, loglevel={})"
|
||||
.format(
|
||||
arb_addr,
|
||||
"'{}'".format(loglevel) if loglevel else None)
|
||||
]
|
||||
kwargs = {}
|
||||
kwargs = dict()
|
||||
if platform.system() == 'Windows':
|
||||
# without this, tests hang on windows forever
|
||||
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
|
||||
proc = testdir.popen(
|
||||
cmd,
|
||||
cmdargs,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
**kwargs,
|
||||
|
|
|
@ -1,243 +0,0 @@
|
|||
'''
|
||||
`tractor.devx.*` tooling sub-pkg test space.
|
||||
|
||||
'''
|
||||
import time
|
||||
from typing import (
|
||||
Callable,
|
||||
)
|
||||
|
||||
import pytest
|
||||
from pexpect.exceptions import (
|
||||
TIMEOUT,
|
||||
)
|
||||
from pexpect.spawnbase import SpawnBase
|
||||
|
||||
from tractor._testing import (
|
||||
mk_cmd,
|
||||
)
|
||||
from tractor.devx._debug import (
|
||||
_pause_msg as _pause_msg,
|
||||
_crash_msg as _crash_msg,
|
||||
_repl_fail_msg as _repl_fail_msg,
|
||||
_ctlc_ignore_header as _ctlc_ignore_header,
|
||||
)
|
||||
from ..conftest import (
|
||||
_ci_env,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def spawn(
|
||||
start_method,
|
||||
testdir: pytest.Pytester,
|
||||
reg_addr: tuple[str, int],
|
||||
|
||||
) -> Callable[[str], None]:
|
||||
'''
|
||||
Use the `pexpect` module shipped via `testdir.spawn()` to
|
||||
run an `./examples/..` script by name.
|
||||
|
||||
'''
|
||||
if start_method != 'trio':
|
||||
pytest.skip(
|
||||
'`pexpect` based tests only supported on `trio` backend'
|
||||
)
|
||||
|
||||
def unset_colors():
|
||||
'''
|
||||
Python 3.13 introduced colored tracebacks that break patt
|
||||
matching,
|
||||
|
||||
https://docs.python.org/3/using/cmdline.html#envvar-PYTHON_COLORS
|
||||
https://docs.python.org/3/using/cmdline.html#using-on-controlling-color
|
||||
|
||||
'''
|
||||
import os
|
||||
os.environ['PYTHON_COLORS'] = '0'
|
||||
|
||||
def _spawn(
|
||||
cmd: str,
|
||||
**mkcmd_kwargs,
|
||||
):
|
||||
unset_colors()
|
||||
return testdir.spawn(
|
||||
cmd=mk_cmd(
|
||||
cmd,
|
||||
**mkcmd_kwargs,
|
||||
),
|
||||
expect_timeout=3,
|
||||
# preexec_fn=unset_colors,
|
||||
# ^TODO? get `pytest` core to expose underlying
|
||||
# `pexpect.spawn()` stuff?
|
||||
)
|
||||
|
||||
# such that test-dep can pass input script name.
|
||||
return _spawn
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=[False, True],
|
||||
ids='ctl-c={}'.format,
|
||||
)
|
||||
def ctlc(
|
||||
request,
|
||||
ci_env: bool,
|
||||
|
||||
) -> bool:
|
||||
|
||||
use_ctlc = request.param
|
||||
|
||||
node = request.node
|
||||
markers = node.own_markers
|
||||
for mark in markers:
|
||||
if mark.name == 'has_nested_actors':
|
||||
pytest.skip(
|
||||
f'Test {node} has nested actors and fails with Ctrl-C.\n'
|
||||
f'The test can sometimes run fine locally but until'
|
||||
' we solve' 'this issue this CI test will be xfail:\n'
|
||||
'https://github.com/goodboy/tractor/issues/320'
|
||||
)
|
||||
|
||||
if mark.name == 'ctlcs_bish':
|
||||
pytest.skip(
|
||||
f'Test {node} prolly uses something from the stdlib (namely `asyncio`..)\n'
|
||||
f'The test and/or underlying example script can *sometimes* run fine '
|
||||
f'locally but more then likely until the cpython peeps get their sh#$ together, '
|
||||
f'this test will definitely not behave like `trio` under SIGINT..\n'
|
||||
)
|
||||
|
||||
if use_ctlc:
|
||||
# XXX: disable pygments highlighting for auto-tests
|
||||
# since some envs (like actions CI) will struggle
|
||||
# the the added color-char encoding..
|
||||
from tractor.devx._debug import TractorConfig
|
||||
TractorConfig.use_pygements = False
|
||||
|
||||
yield use_ctlc
|
||||
|
||||
|
||||
def expect(
|
||||
child,
|
||||
|
||||
# normally a `pdb` prompt by default
|
||||
patt: str,
|
||||
|
||||
**kwargs,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Expect wrapper that prints last seen console
|
||||
data before failing.
|
||||
|
||||
'''
|
||||
try:
|
||||
child.expect(
|
||||
patt,
|
||||
**kwargs,
|
||||
)
|
||||
except TIMEOUT:
|
||||
before = str(child.before.decode())
|
||||
print(before)
|
||||
raise
|
||||
|
||||
|
||||
PROMPT = r"\(Pdb\+\)"
|
||||
|
||||
|
||||
def in_prompt_msg(
|
||||
child: SpawnBase,
|
||||
parts: list[str],
|
||||
|
||||
pause_on_false: bool = False,
|
||||
err_on_false: bool = False,
|
||||
print_prompt_on_false: bool = True,
|
||||
|
||||
) -> bool:
|
||||
'''
|
||||
Predicate check if (the prompt's) std-streams output has all
|
||||
`str`-parts in it.
|
||||
|
||||
Can be used in test asserts for bulk matching expected
|
||||
log/REPL output for a given `pdb` interact point.
|
||||
|
||||
'''
|
||||
__tracebackhide__: bool = False
|
||||
|
||||
before: str = str(child.before.decode())
|
||||
for part in parts:
|
||||
if part not in before:
|
||||
if pause_on_false:
|
||||
import pdbp
|
||||
pdbp.set_trace()
|
||||
|
||||
if print_prompt_on_false:
|
||||
print(before)
|
||||
|
||||
if err_on_false:
|
||||
raise ValueError(
|
||||
f'Could not find pattern in `before` output?\n'
|
||||
f'part: {part!r}\n'
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# TODO: todo support terminal color-chars stripping so we can match
|
||||
# against call stack frame output from the the 'll' command the like!
|
||||
# -[ ] SO answer for stipping ANSI codes: https://stackoverflow.com/a/14693789
|
||||
def assert_before(
|
||||
child: SpawnBase,
|
||||
patts: list[str],
|
||||
|
||||
**kwargs,
|
||||
|
||||
) -> None:
|
||||
__tracebackhide__: bool = False
|
||||
|
||||
assert in_prompt_msg(
|
||||
child=child,
|
||||
parts=patts,
|
||||
|
||||
# since this is an "assert" helper ;)
|
||||
err_on_false=True,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
|
||||
def do_ctlc(
|
||||
child,
|
||||
count: int = 3,
|
||||
delay: float = 0.1,
|
||||
patt: str|None = None,
|
||||
|
||||
# expect repl UX to reprint the prompt after every
|
||||
# ctrl-c send.
|
||||
# XXX: no idea but, in CI this never seems to work even on 3.10 so
|
||||
# needs some further investigation potentially...
|
||||
expect_prompt: bool = not _ci_env,
|
||||
|
||||
) -> str|None:
|
||||
|
||||
before: str|None = None
|
||||
|
||||
# make sure ctl-c sends don't do anything but repeat output
|
||||
for _ in range(count):
|
||||
time.sleep(delay)
|
||||
child.sendcontrol('c')
|
||||
|
||||
# TODO: figure out why this makes CI fail..
|
||||
# if you run this test manually it works just fine..
|
||||
if expect_prompt:
|
||||
time.sleep(delay)
|
||||
child.expect(PROMPT)
|
||||
before = str(child.before.decode())
|
||||
time.sleep(delay)
|
||||
|
||||
if patt:
|
||||
# should see the last line on console
|
||||
assert patt in before
|
||||
|
||||
# return the console content up to the final prompt
|
||||
return before
|
File diff suppressed because it is too large
Load Diff
|
@ -1,381 +0,0 @@
|
|||
'''
|
||||
That "foreign loop/thread" debug REPL support better ALSO WORK!
|
||||
|
||||
Same as `test_native_pause.py`.
|
||||
All these tests can be understood (somewhat) by running the
|
||||
equivalent `examples/debugging/` scripts manually.
|
||||
|
||||
'''
|
||||
from contextlib import (
|
||||
contextmanager as cm,
|
||||
)
|
||||
# from functools import partial
|
||||
# import itertools
|
||||
import time
|
||||
# from typing import (
|
||||
# Iterator,
|
||||
# )
|
||||
|
||||
import pytest
|
||||
from pexpect.exceptions import (
|
||||
TIMEOUT,
|
||||
EOF,
|
||||
)
|
||||
|
||||
from .conftest import (
|
||||
# _ci_env,
|
||||
do_ctlc,
|
||||
PROMPT,
|
||||
# expect,
|
||||
in_prompt_msg,
|
||||
assert_before,
|
||||
_pause_msg,
|
||||
_crash_msg,
|
||||
_ctlc_ignore_header,
|
||||
# _repl_fail_msg,
|
||||
)
|
||||
|
||||
@cm
|
||||
def maybe_expect_timeout(
|
||||
ctlc: bool = False,
|
||||
) -> None:
|
||||
try:
|
||||
yield
|
||||
except TIMEOUT:
|
||||
# breakpoint()
|
||||
if ctlc:
|
||||
pytest.xfail(
|
||||
'Some kinda redic threading SIGINT bug i think?\n'
|
||||
'See the notes in `examples/debugging/sync_bp.py`..\n'
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
@pytest.mark.ctlcs_bish
|
||||
def test_pause_from_sync(
|
||||
spawn,
|
||||
ctlc: bool,
|
||||
):
|
||||
'''
|
||||
Verify we can use the `pdbp` REPL from sync functions AND from
|
||||
any thread spawned with `trio.to_thread.run_sync()`.
|
||||
|
||||
`examples/debugging/sync_bp.py`
|
||||
|
||||
'''
|
||||
child = spawn('sync_bp')
|
||||
|
||||
# first `sync_pause()` after nurseries open
|
||||
child.expect(PROMPT)
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
# pre-prompt line
|
||||
_pause_msg,
|
||||
"<Task '__main__.main'",
|
||||
"('root'",
|
||||
]
|
||||
)
|
||||
if ctlc:
|
||||
do_ctlc(child)
|
||||
# ^NOTE^ subactor not spawned yet; don't need extra delay.
|
||||
|
||||
child.sendline('c')
|
||||
|
||||
# first `await tractor.pause()` inside `p.open_context()` body
|
||||
child.expect(PROMPT)
|
||||
|
||||
# XXX shouldn't see gb loaded message with PDB loglevel!
|
||||
# assert not in_prompt_msg(
|
||||
# child,
|
||||
# ['`greenback` portal opened!'],
|
||||
# )
|
||||
# should be same root task
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
_pause_msg,
|
||||
"<Task '__main__.main'",
|
||||
"('root'",
|
||||
]
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(
|
||||
child,
|
||||
# NOTE: setting this to 0 (or some other sufficient
|
||||
# small val) can cause the test to fail since the
|
||||
# `subactor` suffers a race where the root/parent
|
||||
# sends an actor-cancel prior to it hitting its pause
|
||||
# point; by def the value is 0.1
|
||||
delay=0.4,
|
||||
)
|
||||
|
||||
# XXX, fwiw without a brief sleep here the SIGINT might actually
|
||||
# trigger "subactor" cancellation by its parent before the
|
||||
# shield-handler is engaged.
|
||||
#
|
||||
# => similar to the `delay` input to `do_ctlc()` below, setting
|
||||
# this too low can cause the test to fail since the `subactor`
|
||||
# suffers a race where the root/parent sends an actor-cancel
|
||||
# prior to the context task hitting its pause point (and thus
|
||||
# engaging the `sigint_shield()` handler in time); this value
|
||||
# seems be good enuf?
|
||||
time.sleep(0.6)
|
||||
|
||||
# one of the bg thread or subactor should have
|
||||
# `Lock.acquire()`-ed
|
||||
# (NOT both, which will result in REPL clobbering!)
|
||||
attach_patts: dict[str, list[str]] = {
|
||||
'subactor': [
|
||||
"'start_n_sync_pause'",
|
||||
"('subactor'",
|
||||
],
|
||||
'inline_root_bg_thread': [
|
||||
"<Thread(inline_root_bg_thread",
|
||||
"('root'",
|
||||
],
|
||||
'start_soon_root_bg_thread': [
|
||||
"<Thread(start_soon_root_bg_thread",
|
||||
"('root'",
|
||||
],
|
||||
}
|
||||
conts: int = 0 # for debugging below matching logic on failure
|
||||
while attach_patts:
|
||||
child.sendline('c')
|
||||
conts += 1
|
||||
child.expect(PROMPT)
|
||||
before = str(child.before.decode())
|
||||
for key in attach_patts:
|
||||
if key in before:
|
||||
attach_key: str = key
|
||||
expected_patts: str = attach_patts.pop(key)
|
||||
assert_before(
|
||||
child,
|
||||
[_pause_msg]
|
||||
+
|
||||
expected_patts
|
||||
)
|
||||
break
|
||||
else:
|
||||
pytest.fail(
|
||||
f'No keys found?\n\n'
|
||||
f'{attach_patts.keys()}\n\n'
|
||||
f'{before}\n'
|
||||
)
|
||||
|
||||
# ensure no other task/threads engaged a REPL
|
||||
# at the same time as the one that was detected above.
|
||||
for key, other_patts in attach_patts.copy().items():
|
||||
assert not in_prompt_msg(
|
||||
child,
|
||||
other_patts,
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(
|
||||
child,
|
||||
patt=attach_key,
|
||||
# NOTE same as comment above
|
||||
delay=0.4,
|
||||
)
|
||||
|
||||
child.sendline('c')
|
||||
|
||||
# XXX TODO, weird threading bug it seems despite the
|
||||
# `abandon_on_cancel: bool` setting to
|
||||
# `trio.to_thread.run_sync()`..
|
||||
with maybe_expect_timeout(
|
||||
ctlc=ctlc,
|
||||
):
|
||||
child.expect(EOF)
|
||||
|
||||
|
||||
def expect_any_of(
|
||||
attach_patts: dict[str, list[str]],
|
||||
child, # what type?
|
||||
ctlc: bool = False,
|
||||
prompt: str = _ctlc_ignore_header,
|
||||
ctlc_delay: float = .4,
|
||||
|
||||
) -> list[str]:
|
||||
'''
|
||||
Receive any of a `list[str]` of patterns provided in
|
||||
`attach_patts`.
|
||||
|
||||
Used to test racing prompts from multiple actors and/or
|
||||
tasks using a common root process' `pdbp` REPL.
|
||||
|
||||
'''
|
||||
assert attach_patts
|
||||
|
||||
child.expect(PROMPT)
|
||||
before = str(child.before.decode())
|
||||
|
||||
for attach_key in attach_patts:
|
||||
if attach_key in before:
|
||||
expected_patts: str = attach_patts.pop(attach_key)
|
||||
assert_before(
|
||||
child,
|
||||
expected_patts
|
||||
)
|
||||
break # from for
|
||||
else:
|
||||
pytest.fail(
|
||||
f'No keys found?\n\n'
|
||||
f'{attach_patts.keys()}\n\n'
|
||||
f'{before}\n'
|
||||
)
|
||||
|
||||
# ensure no other task/threads engaged a REPL
|
||||
# at the same time as the one that was detected above.
|
||||
for key, other_patts in attach_patts.copy().items():
|
||||
assert not in_prompt_msg(
|
||||
child,
|
||||
other_patts,
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(
|
||||
child,
|
||||
patt=prompt,
|
||||
# NOTE same as comment above
|
||||
delay=ctlc_delay,
|
||||
)
|
||||
|
||||
return expected_patts
|
||||
|
||||
|
||||
@pytest.mark.ctlcs_bish
|
||||
def test_sync_pause_from_aio_task(
|
||||
spawn,
|
||||
|
||||
ctlc: bool
|
||||
# ^TODO, fix for `asyncio`!!
|
||||
):
|
||||
'''
|
||||
Verify we can use the `pdbp` REPL from an `asyncio.Task` spawned using
|
||||
APIs in `.to_asyncio`.
|
||||
|
||||
`examples/debugging/asycio_bp.py`
|
||||
|
||||
'''
|
||||
child = spawn('asyncio_bp')
|
||||
|
||||
# RACE on whether trio/asyncio task bps first
|
||||
attach_patts: dict[str, list[str]] = {
|
||||
|
||||
# first pause in guest-mode (aka "infecting")
|
||||
# `trio.Task`.
|
||||
'trio-side': [
|
||||
_pause_msg,
|
||||
"<Task 'trio_ctx'",
|
||||
"('aio_daemon'",
|
||||
],
|
||||
|
||||
# `breakpoint()` from `asyncio.Task`.
|
||||
'asyncio-side': [
|
||||
_pause_msg,
|
||||
"<Task pending name='Task-2' coro=<greenback_shim()",
|
||||
"('aio_daemon'",
|
||||
],
|
||||
}
|
||||
|
||||
while attach_patts:
|
||||
expect_any_of(
|
||||
attach_patts=attach_patts,
|
||||
child=child,
|
||||
ctlc=ctlc,
|
||||
)
|
||||
child.sendline('c')
|
||||
|
||||
# NOW in race order,
|
||||
# - the asyncio-task will error
|
||||
# - the root-actor parent task will pause
|
||||
#
|
||||
attach_patts: dict[str, list[str]] = {
|
||||
|
||||
# error raised in `asyncio.Task`
|
||||
"raise ValueError('asyncio side error!')": [
|
||||
_crash_msg,
|
||||
"<Task 'trio_ctx'",
|
||||
"@ ('aio_daemon'",
|
||||
"ValueError: asyncio side error!",
|
||||
|
||||
# XXX, we no longer show this frame by default!
|
||||
# 'return await chan.receive()', # `.to_asyncio` impl internals in tb
|
||||
],
|
||||
|
||||
# parent-side propagation via actor-nursery/portal
|
||||
# "tractor._exceptions.RemoteActorError: remote task raised a 'ValueError'": [
|
||||
"remote task raised a 'ValueError'": [
|
||||
_crash_msg,
|
||||
"src_uid=('aio_daemon'",
|
||||
"('aio_daemon'",
|
||||
],
|
||||
|
||||
# a final pause in root-actor
|
||||
"<Task '__main__.main'": [
|
||||
_pause_msg,
|
||||
"<Task '__main__.main'",
|
||||
"('root'",
|
||||
],
|
||||
}
|
||||
while attach_patts:
|
||||
expect_any_of(
|
||||
attach_patts=attach_patts,
|
||||
child=child,
|
||||
ctlc=ctlc,
|
||||
)
|
||||
child.sendline('c')
|
||||
|
||||
assert not attach_patts
|
||||
|
||||
# final boxed error propagates to root
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
_crash_msg,
|
||||
"<Task '__main__.main'",
|
||||
"('root'",
|
||||
"remote task raised a 'ValueError'",
|
||||
"ValueError: asyncio side error!",
|
||||
]
|
||||
)
|
||||
|
||||
if ctlc:
|
||||
do_ctlc(
|
||||
child,
|
||||
# NOTE: setting this to 0 (or some other sufficient
|
||||
# small val) can cause the test to fail since the
|
||||
# `subactor` suffers a race where the root/parent
|
||||
# sends an actor-cancel prior to it hitting its pause
|
||||
# point; by def the value is 0.1
|
||||
delay=0.4,
|
||||
)
|
||||
|
||||
child.sendline('c')
|
||||
# with maybe_expect_timeout():
|
||||
child.expect(EOF)
|
||||
|
||||
|
||||
def test_sync_pause_from_non_greenbacked_aio_task():
|
||||
'''
|
||||
Where the `breakpoint()` caller task is NOT spawned by
|
||||
`tractor.to_asyncio` and thus never activates
|
||||
a `greenback.ensure_portal()` beforehand, presumably bc the task
|
||||
was started by some lib/dep as in often seen in the field.
|
||||
|
||||
Ensure sync pausing works when the pause is in,
|
||||
|
||||
- the root actor running in infected-mode?
|
||||
|_ since we don't need any IPC to acquire the debug lock?
|
||||
|_ is there some way to handle this like the non-main-thread case?
|
||||
|
||||
All other cases need to error out appropriately right?
|
||||
|
||||
- for any subactor we can't avoid needing the repl lock..
|
||||
|_ is there a way to hook into `asyncio.ensure_future(obj)`?
|
||||
|
||||
'''
|
||||
pass
|
|
@ -1,172 +0,0 @@
|
|||
'''
|
||||
That "native" runtime-hackin toolset better be dang useful!
|
||||
|
||||
Verify the funtion of a variety of "developer-experience" tools we
|
||||
offer from the `.devx` sub-pkg:
|
||||
|
||||
- use of the lovely `stackscope` for dumping actor `trio`-task trees
|
||||
during operation and hangs.
|
||||
|
||||
TODO:
|
||||
- demonstration of `CallerInfo` call stack frame filtering such that
|
||||
for logging and REPL purposes a user sees exactly the layers needed
|
||||
when debugging a problem inside the stack vs. in their app.
|
||||
|
||||
'''
|
||||
import os
|
||||
import signal
|
||||
import time
|
||||
|
||||
from .conftest import (
|
||||
expect,
|
||||
assert_before,
|
||||
in_prompt_msg,
|
||||
PROMPT,
|
||||
_pause_msg,
|
||||
)
|
||||
from pexpect.exceptions import (
|
||||
# TIMEOUT,
|
||||
EOF,
|
||||
)
|
||||
|
||||
|
||||
def test_shield_pause(
|
||||
spawn,
|
||||
):
|
||||
'''
|
||||
Verify the `tractor.pause()/.post_mortem()` API works inside an
|
||||
already cancelled `trio.CancelScope` and that you can step to the
|
||||
next checkpoint wherein the cancelled will get raised.
|
||||
|
||||
'''
|
||||
child = spawn(
|
||||
'shield_hang_in_sub'
|
||||
)
|
||||
expect(
|
||||
child,
|
||||
'Yo my child hanging..?',
|
||||
)
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
'Entering shield sleep..',
|
||||
'Enabling trace-trees on `SIGUSR1` since `stackscope` is installed @',
|
||||
]
|
||||
)
|
||||
|
||||
script_pid: int = child.pid
|
||||
print(
|
||||
f'Sending SIGUSR1 to {script_pid}\n'
|
||||
f'(kill -s SIGUSR1 {script_pid})\n'
|
||||
)
|
||||
os.kill(
|
||||
script_pid,
|
||||
signal.SIGUSR1,
|
||||
)
|
||||
time.sleep(0.2)
|
||||
expect(
|
||||
child,
|
||||
# end-of-tree delimiter
|
||||
"end-of-\('root'",
|
||||
)
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
# 'Srying to dump `stackscope` tree..',
|
||||
# 'Dumping `stackscope` tree for actor',
|
||||
"('root'", # uid line
|
||||
|
||||
# TODO!? this used to show?
|
||||
# -[ ] mk reproducable for @oremanj?
|
||||
#
|
||||
# parent block point (non-shielded)
|
||||
# 'await trio.sleep_forever() # in root',
|
||||
]
|
||||
)
|
||||
expect(
|
||||
child,
|
||||
# end-of-tree delimiter
|
||||
"end-of-\('hanger'",
|
||||
)
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
# relay to the sub should be reported
|
||||
'Relaying `SIGUSR1`[10] to sub-actor',
|
||||
|
||||
"('hanger'", # uid line
|
||||
|
||||
# TODO!? SEE ABOVE
|
||||
# hanger LOC where it's shield-halted
|
||||
# 'await trio.sleep_forever() # in subactor',
|
||||
]
|
||||
)
|
||||
|
||||
# simulate the user sending a ctl-c to the hanging program.
|
||||
# this should result in the terminator kicking in since
|
||||
# the sub is shield blocking and can't respond to SIGINT.
|
||||
os.kill(
|
||||
child.pid,
|
||||
signal.SIGINT,
|
||||
)
|
||||
expect(
|
||||
child,
|
||||
'Shutting down actor runtime',
|
||||
timeout=6,
|
||||
)
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
'raise KeyboardInterrupt',
|
||||
# 'Shutting down actor runtime',
|
||||
'#T-800 deployed to collect zombie B0',
|
||||
"'--uid', \"('hanger',",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_breakpoint_hook_restored(
|
||||
spawn,
|
||||
):
|
||||
'''
|
||||
Ensures our actor runtime sets a custom `breakpoint()` hook
|
||||
on open then restores the stdlib's default on close.
|
||||
|
||||
The hook state validation is done via `assert`s inside the
|
||||
invoked script with only `breakpoint()` (not `tractor.pause()`)
|
||||
calls used.
|
||||
|
||||
'''
|
||||
child = spawn('restore_builtin_breakpoint')
|
||||
|
||||
child.expect(PROMPT)
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
_pause_msg,
|
||||
"<Task '__main__.main'",
|
||||
"('root'",
|
||||
"first bp, tractor hook set",
|
||||
]
|
||||
)
|
||||
child.sendline('c')
|
||||
child.expect(PROMPT)
|
||||
assert_before(
|
||||
child,
|
||||
[
|
||||
"last bp, stdlib hook restored",
|
||||
]
|
||||
)
|
||||
|
||||
# since the stdlib hook was already restored there should be NO
|
||||
# `tractor` `log.pdb()` content from console!
|
||||
assert not in_prompt_msg(
|
||||
child,
|
||||
[
|
||||
_pause_msg,
|
||||
"<Task '__main__.main'",
|
||||
"('root'",
|
||||
],
|
||||
)
|
||||
child.sendline('c')
|
||||
child.expect(EOF)
|
|
@ -1,11 +1,382 @@
|
|||
"""
|
||||
Bidirectional streaming.
|
||||
Bidirectional streaming and context API.
|
||||
|
||||
"""
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
from conftest import tractor_test
|
||||
|
||||
# the general stream semantics are
|
||||
# - normal termination: far end relays a stop message which
|
||||
# terminates an ongoing ``MsgStream`` iteration
|
||||
# - cancel termination: context is cancelled on either side cancelling
|
||||
# the "linked" inter-actor task context
|
||||
|
||||
|
||||
_state: bool = False
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def simple_setup_teardown(
|
||||
|
||||
ctx: tractor.Context,
|
||||
data: int,
|
||||
block_forever: bool = False,
|
||||
|
||||
) -> None:
|
||||
|
||||
# startup phase
|
||||
global _state
|
||||
_state = True
|
||||
|
||||
# signal to parent that we're up
|
||||
await ctx.started(data + 1)
|
||||
|
||||
try:
|
||||
if block_forever:
|
||||
# block until cancelled
|
||||
await trio.sleep_forever()
|
||||
else:
|
||||
return 'yo'
|
||||
finally:
|
||||
_state = False
|
||||
|
||||
|
||||
async def assert_state(value: bool):
|
||||
global _state
|
||||
assert _state == value
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'error_parent',
|
||||
[False, True],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
'callee_blocks_forever',
|
||||
[False, True],
|
||||
)
|
||||
def test_simple_context(
|
||||
error_parent,
|
||||
callee_blocks_forever,
|
||||
):
|
||||
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
'simple_context',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async with portal.open_context(
|
||||
simple_setup_teardown,
|
||||
data=10,
|
||||
block_forever=callee_blocks_forever,
|
||||
) as (ctx, sent):
|
||||
|
||||
assert sent == 11
|
||||
|
||||
if callee_blocks_forever:
|
||||
await portal.run(assert_state, value=True)
|
||||
await ctx.cancel()
|
||||
else:
|
||||
assert await ctx.result() == 'yo'
|
||||
|
||||
# after cancellation
|
||||
await portal.run(assert_state, value=False)
|
||||
|
||||
if error_parent:
|
||||
raise ValueError
|
||||
|
||||
# shut down daemon
|
||||
await portal.cancel_actor()
|
||||
|
||||
if error_parent:
|
||||
try:
|
||||
trio.run(main)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
trio.run(main)
|
||||
|
||||
|
||||
# basic stream terminations:
|
||||
# - callee context closes without using stream
|
||||
# - caller context closes without using stream
|
||||
# - caller context calls `Context.cancel()` while streaming
|
||||
# is ongoing resulting in callee being cancelled
|
||||
# - callee calls `Context.cancel()` while streaming and caller
|
||||
# sees stream terminated in `RemoteActorError`
|
||||
|
||||
# TODO: future possible features
|
||||
# - restart request: far end raises `ContextRestart`
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def close_ctx_immediately(
|
||||
|
||||
ctx: tractor.Context,
|
||||
|
||||
) -> None:
|
||||
|
||||
await ctx.started()
|
||||
global _state
|
||||
|
||||
async with ctx.open_stream():
|
||||
pass
|
||||
|
||||
|
||||
@tractor_test
|
||||
async def test_callee_closes_ctx_after_stream_open():
|
||||
'callee context closes without using stream'
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
'fast_stream_closer',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async with portal.open_context(
|
||||
close_ctx_immediately,
|
||||
|
||||
# flag to avoid waiting the final result
|
||||
# cancel_on_exit=True,
|
||||
|
||||
) as (ctx, sent):
|
||||
|
||||
assert sent is None
|
||||
|
||||
with trio.fail_after(0.5):
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
# should fall through since ``StopAsyncIteration``
|
||||
# should be raised through translation of
|
||||
# a ``trio.EndOfChannel`` by
|
||||
# ``trio.abc.ReceiveChannel.__anext__()``
|
||||
async for _ in stream:
|
||||
assert 0
|
||||
else:
|
||||
|
||||
# verify stream is now closed
|
||||
try:
|
||||
await stream.receive()
|
||||
except trio.EndOfChannel:
|
||||
pass
|
||||
|
||||
# TODO: should be just raise the closed resource err
|
||||
# directly here to enforce not allowing a re-open
|
||||
# of a stream to the context (at least until a time of
|
||||
# if/when we decide that's a good idea?)
|
||||
try:
|
||||
async with ctx.open_stream() as stream:
|
||||
pass
|
||||
except trio.ClosedResourceError:
|
||||
pass
|
||||
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def expect_cancelled(
|
||||
|
||||
ctx: tractor.Context,
|
||||
|
||||
) -> None:
|
||||
global _state
|
||||
_state = True
|
||||
|
||||
await ctx.started()
|
||||
|
||||
try:
|
||||
async with ctx.open_stream() as stream:
|
||||
async for msg in stream:
|
||||
await stream.send(msg) # echo server
|
||||
|
||||
except trio.Cancelled:
|
||||
# expected case
|
||||
_state = False
|
||||
raise
|
||||
|
||||
else:
|
||||
assert 0, "Wasn't cancelled!?"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'use_ctx_cancel_method',
|
||||
[False, True],
|
||||
)
|
||||
@tractor_test
|
||||
async def test_caller_closes_ctx_after_callee_opens_stream(
|
||||
use_ctx_cancel_method: bool,
|
||||
):
|
||||
'caller context closes without using stream'
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
'ctx_cancelled',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async with portal.open_context(
|
||||
expect_cancelled,
|
||||
) as (ctx, sent):
|
||||
await portal.run(assert_state, value=True)
|
||||
|
||||
assert sent is None
|
||||
|
||||
# call cancel explicitly
|
||||
if use_ctx_cancel_method:
|
||||
|
||||
await ctx.cancel()
|
||||
|
||||
try:
|
||||
async with ctx.open_stream() as stream:
|
||||
async for msg in stream:
|
||||
pass
|
||||
|
||||
except tractor.ContextCancelled:
|
||||
raise # XXX: must be propagated to __aexit__
|
||||
|
||||
else:
|
||||
assert 0, "Should have context cancelled?"
|
||||
|
||||
# channel should still be up
|
||||
assert portal.channel.connected()
|
||||
|
||||
# ctx is closed here
|
||||
await portal.run(assert_state, value=False)
|
||||
|
||||
else:
|
||||
try:
|
||||
with trio.fail_after(0.2):
|
||||
await ctx.result()
|
||||
assert 0, "Callee should have blocked!?"
|
||||
except trio.TooSlowError:
|
||||
await ctx.cancel()
|
||||
try:
|
||||
async with ctx.open_stream() as stream:
|
||||
async for msg in stream:
|
||||
pass
|
||||
except tractor.ContextCancelled:
|
||||
pass
|
||||
else:
|
||||
assert 0, "Should have received closed resource error?"
|
||||
|
||||
# ctx is closed here
|
||||
await portal.run(assert_state, value=False)
|
||||
|
||||
# channel should not have been destroyed yet, only the
|
||||
# inter-actor-task context
|
||||
assert portal.channel.connected()
|
||||
|
||||
# teardown the actor
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
@tractor_test
|
||||
async def test_multitask_caller_cancels_from_nonroot_task():
|
||||
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
'ctx_cancelled',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async with portal.open_context(
|
||||
expect_cancelled,
|
||||
) as (ctx, sent):
|
||||
|
||||
await portal.run(assert_state, value=True)
|
||||
assert sent is None
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
async def send_msg_then_cancel():
|
||||
await stream.send('yo')
|
||||
await portal.run(assert_state, value=True)
|
||||
await ctx.cancel()
|
||||
await portal.run(assert_state, value=False)
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
n.start_soon(send_msg_then_cancel)
|
||||
|
||||
try:
|
||||
async for msg in stream:
|
||||
assert msg == 'yo'
|
||||
|
||||
except tractor.ContextCancelled:
|
||||
raise # XXX: must be propagated to __aexit__
|
||||
|
||||
# channel should still be up
|
||||
assert portal.channel.connected()
|
||||
|
||||
# ctx is closed here
|
||||
await portal.run(assert_state, value=False)
|
||||
|
||||
# channel should not have been destroyed yet, only the
|
||||
# inter-actor-task context
|
||||
assert portal.channel.connected()
|
||||
|
||||
# teardown the actor
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def cancel_self(
|
||||
|
||||
ctx: tractor.Context,
|
||||
|
||||
) -> None:
|
||||
global _state
|
||||
_state = True
|
||||
|
||||
await ctx.cancel()
|
||||
try:
|
||||
with trio.fail_after(0.1):
|
||||
await trio.sleep_forever()
|
||||
|
||||
except trio.Cancelled:
|
||||
raise
|
||||
|
||||
except trio.TooSlowError:
|
||||
# should never get here
|
||||
assert 0
|
||||
|
||||
|
||||
@tractor_test
|
||||
async def test_callee_cancels_before_started():
|
||||
'''callee calls `Context.cancel()` while streaming and caller
|
||||
sees stream terminated in `ContextCancelled`.
|
||||
|
||||
'''
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
portal = await n.start_actor(
|
||||
'cancels_self',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
try:
|
||||
|
||||
async with portal.open_context(
|
||||
cancel_self,
|
||||
) as (ctx, sent):
|
||||
async with ctx.open_stream():
|
||||
|
||||
await trio.sleep_forever()
|
||||
|
||||
# raises a special cancel signal
|
||||
except tractor.ContextCancelled as ce:
|
||||
ce.type == trio.Cancelled
|
||||
|
||||
# teardown the actor
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def simple_rpc(
|
||||
|
@ -14,10 +385,9 @@ async def simple_rpc(
|
|||
data: int,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Test a small ping-pong server.
|
||||
"""Test a small ping-pong server.
|
||||
|
||||
'''
|
||||
"""
|
||||
# signal to parent that we're up
|
||||
await ctx.started(data + 1)
|
||||
|
||||
|
@ -75,10 +445,9 @@ async def simple_rpc_with_forloop(
|
|||
[simple_rpc, simple_rpc_with_forloop],
|
||||
)
|
||||
def test_simple_rpc(server_func, use_async_for):
|
||||
'''
|
||||
The simplest request response pattern.
|
||||
"""The simplest request response pattern.
|
||||
|
||||
'''
|
||||
"""
|
||||
async def main():
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
|
|
|
@ -1,290 +0,0 @@
|
|||
'''
|
||||
Sketchy network blackoutz, ugly byzantine gens, puedes eschuchar la
|
||||
cancelacion?..
|
||||
|
||||
'''
|
||||
from functools import partial
|
||||
from types import ModuleType
|
||||
|
||||
import pytest
|
||||
from _pytest.pathlib import import_path
|
||||
import trio
|
||||
import tractor
|
||||
from tractor._testing import (
|
||||
examples_dir,
|
||||
break_ipc,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'pre_aclose_msgstream',
|
||||
[
|
||||
False,
|
||||
True,
|
||||
],
|
||||
ids=[
|
||||
'no_msgstream_aclose',
|
||||
'pre_aclose_msgstream',
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
'ipc_break',
|
||||
[
|
||||
# no breaks
|
||||
{
|
||||
'break_parent_ipc_after': False,
|
||||
'break_child_ipc_after': False,
|
||||
},
|
||||
|
||||
# only parent breaks
|
||||
{
|
||||
'break_parent_ipc_after': 500,
|
||||
'break_child_ipc_after': False,
|
||||
},
|
||||
|
||||
# only child breaks
|
||||
{
|
||||
'break_parent_ipc_after': False,
|
||||
'break_child_ipc_after': 500,
|
||||
},
|
||||
|
||||
# both: break parent first
|
||||
{
|
||||
'break_parent_ipc_after': 500,
|
||||
'break_child_ipc_after': 800,
|
||||
},
|
||||
# both: break child first
|
||||
{
|
||||
'break_parent_ipc_after': 800,
|
||||
'break_child_ipc_after': 500,
|
||||
},
|
||||
|
||||
],
|
||||
ids=[
|
||||
'no_break',
|
||||
'break_parent',
|
||||
'break_child',
|
||||
'break_both_parent_first',
|
||||
'break_both_child_first',
|
||||
],
|
||||
)
|
||||
def test_ipc_channel_break_during_stream(
|
||||
debug_mode: bool,
|
||||
loglevel: str,
|
||||
spawn_backend: str,
|
||||
ipc_break: dict|None,
|
||||
pre_aclose_msgstream: bool,
|
||||
):
|
||||
'''
|
||||
Ensure we can have an IPC channel break its connection during
|
||||
streaming and it's still possible for the (simulated) user to kill
|
||||
the actor tree using SIGINT.
|
||||
|
||||
We also verify the type of connection error expected in the parent
|
||||
depending on which side if the IPC breaks first.
|
||||
|
||||
'''
|
||||
if spawn_backend != 'trio':
|
||||
if debug_mode:
|
||||
pytest.skip('`debug_mode` only supported on `trio` spawner')
|
||||
|
||||
# non-`trio` spawners should never hit the hang condition that
|
||||
# requires the user to do ctl-c to cancel the actor tree.
|
||||
# expect_final_exc = trio.ClosedResourceError
|
||||
expect_final_exc = tractor.TransportClosed
|
||||
|
||||
mod: ModuleType = import_path(
|
||||
examples_dir() / 'advanced_faults'
|
||||
/ 'ipc_failure_during_stream.py',
|
||||
root=examples_dir(),
|
||||
consider_namespace_packages=False,
|
||||
)
|
||||
|
||||
# by def we expect KBI from user after a simulated "hang
|
||||
# period" wherein the user eventually hits ctl-c to kill the
|
||||
# root-actor tree.
|
||||
expect_final_exc: BaseException = KeyboardInterrupt
|
||||
if (
|
||||
# only expect EoC if trans is broken on the child side,
|
||||
ipc_break['break_child_ipc_after'] is not False
|
||||
# AND we tell the child to call `MsgStream.aclose()`.
|
||||
and pre_aclose_msgstream
|
||||
):
|
||||
# expect_final_exc = trio.EndOfChannel
|
||||
# ^XXX NOPE! XXX^ since now `.open_stream()` absorbs this
|
||||
# gracefully!
|
||||
expect_final_exc = KeyboardInterrupt
|
||||
|
||||
# NOTE when ONLY the child breaks or it breaks BEFORE the
|
||||
# parent we expect the parent to get a closed resource error
|
||||
# on the next `MsgStream.receive()` and then fail out and
|
||||
# cancel the child from there.
|
||||
#
|
||||
# ONLY CHILD breaks
|
||||
if (
|
||||
ipc_break['break_child_ipc_after']
|
||||
and
|
||||
ipc_break['break_parent_ipc_after'] is False
|
||||
):
|
||||
# NOTE: we DO NOT expect this any more since
|
||||
# the child side's channel will be broken silently
|
||||
# and nothing on the parent side will indicate this!
|
||||
# expect_final_exc = trio.ClosedResourceError
|
||||
|
||||
# NOTE: child will send a 'stop' msg before it breaks
|
||||
# the transport channel BUT, that will be absorbed by the
|
||||
# `ctx.open_stream()` block and thus the `.open_context()`
|
||||
# should hang, after which the test script simulates
|
||||
# a user sending ctl-c by raising a KBI.
|
||||
if pre_aclose_msgstream:
|
||||
expect_final_exc = KeyboardInterrupt
|
||||
|
||||
# XXX OLD XXX
|
||||
# if child calls `MsgStream.aclose()` then expect EoC.
|
||||
# ^ XXX not any more ^ since eoc is always absorbed
|
||||
# gracefully and NOT bubbled to the `.open_context()`
|
||||
# block!
|
||||
# expect_final_exc = trio.EndOfChannel
|
||||
|
||||
# BOTH but, CHILD breaks FIRST
|
||||
elif (
|
||||
ipc_break['break_child_ipc_after'] is not False
|
||||
and (
|
||||
ipc_break['break_parent_ipc_after']
|
||||
> ipc_break['break_child_ipc_after']
|
||||
)
|
||||
):
|
||||
if pre_aclose_msgstream:
|
||||
expect_final_exc = KeyboardInterrupt
|
||||
|
||||
# NOTE when the parent IPC side dies (even if the child does as well
|
||||
# but the child fails BEFORE the parent) we always expect the
|
||||
# IPC layer to raise a closed-resource, NEVER do we expect
|
||||
# a stop msg since the parent-side ctx apis will error out
|
||||
# IMMEDIATELY before the child ever sends any 'stop' msg.
|
||||
#
|
||||
# ONLY PARENT breaks
|
||||
elif (
|
||||
ipc_break['break_parent_ipc_after']
|
||||
and
|
||||
ipc_break['break_child_ipc_after'] is False
|
||||
):
|
||||
# expect_final_exc = trio.ClosedResourceError
|
||||
expect_final_exc = tractor.TransportClosed
|
||||
|
||||
# BOTH but, PARENT breaks FIRST
|
||||
elif (
|
||||
ipc_break['break_parent_ipc_after'] is not False
|
||||
and (
|
||||
ipc_break['break_child_ipc_after']
|
||||
>
|
||||
ipc_break['break_parent_ipc_after']
|
||||
)
|
||||
):
|
||||
# expect_final_exc = trio.ClosedResourceError
|
||||
expect_final_exc = tractor.TransportClosed
|
||||
|
||||
with pytest.raises(
|
||||
expected_exception=(
|
||||
expect_final_exc,
|
||||
ExceptionGroup,
|
||||
),
|
||||
) as excinfo:
|
||||
try:
|
||||
trio.run(
|
||||
partial(
|
||||
mod.main,
|
||||
debug_mode=debug_mode,
|
||||
start_method=spawn_backend,
|
||||
loglevel=loglevel,
|
||||
pre_close=pre_aclose_msgstream,
|
||||
**ipc_break,
|
||||
)
|
||||
)
|
||||
except KeyboardInterrupt as _kbi:
|
||||
kbi = _kbi
|
||||
if expect_final_exc is not KeyboardInterrupt:
|
||||
pytest.fail(
|
||||
'Rxed unexpected KBI !?\n'
|
||||
f'{repr(kbi)}'
|
||||
)
|
||||
|
||||
raise
|
||||
|
||||
except tractor.TransportClosed as _tc:
|
||||
tc = _tc
|
||||
if expect_final_exc is KeyboardInterrupt:
|
||||
pytest.fail(
|
||||
'Unexpected transport failure !?\n'
|
||||
f'{repr(tc)}'
|
||||
)
|
||||
cause: Exception = tc.__cause__
|
||||
assert (
|
||||
type(cause) is trio.ClosedResourceError
|
||||
and
|
||||
cause.args[0] == 'another task closed this fd'
|
||||
)
|
||||
raise
|
||||
|
||||
# get raw instance from pytest wrapper
|
||||
value = excinfo.value
|
||||
if isinstance(value, ExceptionGroup):
|
||||
excs = value.exceptions
|
||||
assert len(excs) == 1
|
||||
final_exc = excs[0]
|
||||
assert isinstance(final_exc, expect_final_exc)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def break_ipc_after_started(
|
||||
ctx: tractor.Context,
|
||||
) -> None:
|
||||
await ctx.started()
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
# TODO: make a test which verifies the error
|
||||
# for this, i.e. raises a `MsgTypeError`
|
||||
# await ctx.chan.send(None)
|
||||
|
||||
await break_ipc(
|
||||
stream=stream,
|
||||
pre_close=True,
|
||||
)
|
||||
print('child broke IPC and terminating')
|
||||
|
||||
|
||||
def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages():
|
||||
'''
|
||||
Verify that is a subactor's IPC goes down just after bringing up
|
||||
a stream the parent can trigger a SIGINT and the child will be
|
||||
reaped out-of-IPC by the localhost process supervision machinery:
|
||||
aka "zombie lord".
|
||||
|
||||
'''
|
||||
async def main():
|
||||
with trio.fail_after(3):
|
||||
async with tractor.open_nursery() as an:
|
||||
portal = await an.start_actor(
|
||||
'ipc_breaker',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
with trio.move_on_after(1):
|
||||
async with (
|
||||
portal.open_context(
|
||||
break_ipc_after_started
|
||||
) as (ctx, sent),
|
||||
):
|
||||
async with ctx.open_stream():
|
||||
await trio.sleep(0.5)
|
||||
|
||||
print('parent waiting on context')
|
||||
|
||||
print(
|
||||
'parent exited context\n'
|
||||
'parent raising KBI..\n'
|
||||
)
|
||||
raise KeyboardInterrupt
|
||||
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
trio.run(main)
|
|
@ -1,21 +1,15 @@
|
|||
'''
|
||||
"""
|
||||
Advanced streaming patterns using bidirectional streams and contexts.
|
||||
|
||||
'''
|
||||
from collections import Counter
|
||||
"""
|
||||
import itertools
|
||||
import platform
|
||||
from typing import Set, Dict, List
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
def is_win():
|
||||
return platform.system() == 'Windows'
|
||||
|
||||
|
||||
_registry: dict[str, set[tractor.MsgStream]] = {
|
||||
_registry: Dict[str, Set[tractor.ReceiveMsgStream]] = {
|
||||
'even': set(),
|
||||
'odd': set(),
|
||||
}
|
||||
|
@ -77,7 +71,7 @@ async def subscribe(
|
|||
|
||||
async def consumer(
|
||||
|
||||
subs: list[str],
|
||||
subs: List[str],
|
||||
|
||||
) -> None:
|
||||
|
||||
|
@ -144,16 +138,8 @@ def test_dynamic_pub_sub():
|
|||
|
||||
try:
|
||||
trio.run(main)
|
||||
except (
|
||||
trio.TooSlowError,
|
||||
ExceptionGroup,
|
||||
) as err:
|
||||
if isinstance(err, ExceptionGroup):
|
||||
for suberr in err.exceptions:
|
||||
if isinstance(suberr, trio.TooSlowError):
|
||||
break
|
||||
else:
|
||||
pytest.fail('Never got a `TooSlowError` ?')
|
||||
except trio.TooSlowError:
|
||||
pass
|
||||
|
||||
|
||||
@tractor.context
|
||||
|
@ -186,22 +172,14 @@ async def one_task_streams_and_one_handles_reqresp(
|
|||
|
||||
|
||||
def test_reqresp_ontopof_streaming():
|
||||
'''
|
||||
Test a subactor that both streams with one task and
|
||||
'''Test a subactor that both streams with one task and
|
||||
spawns another which handles a small requests-response
|
||||
dialogue over the same bidir-stream.
|
||||
|
||||
'''
|
||||
async def main():
|
||||
|
||||
# flat to make sure we get at least one pong
|
||||
got_pong: bool = False
|
||||
timeout: int = 2
|
||||
|
||||
if is_win(): # smh
|
||||
timeout = 4
|
||||
|
||||
with trio.move_on_after(timeout):
|
||||
with trio.move_on_after(2):
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
# name of this actor will be same as target func
|
||||
|
@ -210,6 +188,9 @@ def test_reqresp_ontopof_streaming():
|
|||
enable_modules=[__name__]
|
||||
)
|
||||
|
||||
# flat to make sure we get at least one pong
|
||||
got_pong: bool = False
|
||||
|
||||
async with portal.open_context(
|
||||
one_task_streams_and_one_handles_reqresp,
|
||||
|
||||
|
@ -237,189 +218,3 @@ def test_reqresp_ontopof_streaming():
|
|||
trio.run(main)
|
||||
except trio.TooSlowError:
|
||||
pass
|
||||
|
||||
|
||||
async def async_gen_stream(sequence):
|
||||
for i in sequence:
|
||||
yield i
|
||||
await trio.sleep(0.1)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def echo_ctx_stream(
|
||||
ctx: tractor.Context,
|
||||
) -> None:
|
||||
await ctx.started()
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
async for msg in stream:
|
||||
await stream.send(msg)
|
||||
|
||||
|
||||
def test_sigint_both_stream_types():
|
||||
'''Verify that running a bi-directional and recv only stream
|
||||
side-by-side will cancel correctly from SIGINT.
|
||||
|
||||
'''
|
||||
timeout: float = 2
|
||||
if is_win(): # smh
|
||||
timeout += 1
|
||||
|
||||
async def main():
|
||||
with trio.fail_after(timeout):
|
||||
async with tractor.open_nursery() as n:
|
||||
# name of this actor will be same as target func
|
||||
portal = await n.start_actor(
|
||||
'2_way',
|
||||
enable_modules=[__name__]
|
||||
)
|
||||
|
||||
async with portal.open_context(echo_ctx_stream) as (ctx, _):
|
||||
async with ctx.open_stream() as stream:
|
||||
async with portal.open_stream_from(
|
||||
async_gen_stream,
|
||||
sequence=list(range(1)),
|
||||
) as gen_stream:
|
||||
|
||||
msg = await gen_stream.receive()
|
||||
await stream.send(msg)
|
||||
resp = await stream.receive()
|
||||
assert resp == msg
|
||||
raise KeyboardInterrupt
|
||||
|
||||
try:
|
||||
trio.run(main)
|
||||
assert 0, "Didn't receive KBI!?"
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def inf_streamer(
|
||||
ctx: tractor.Context,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Stream increasing ints until terminated with a 'done' msg.
|
||||
|
||||
'''
|
||||
await ctx.started()
|
||||
|
||||
async with (
|
||||
ctx.open_stream() as stream,
|
||||
|
||||
# XXX TODO, INTERESTING CASE!!
|
||||
# - if we don't collapse the eg then the embedded
|
||||
# `trio.EndOfChannel` doesn't propagate directly to the above
|
||||
# .open_stream() parent, resulting in it also raising instead
|
||||
# of gracefully absorbing as normal.. so how to handle?
|
||||
trio.open_nursery(
|
||||
strict_exception_groups=False,
|
||||
) as tn,
|
||||
):
|
||||
async def close_stream_on_sentinel():
|
||||
async for msg in stream:
|
||||
if msg == 'done':
|
||||
print(
|
||||
'streamer RXed "done" sentinel msg!\n'
|
||||
'CLOSING `MsgStream`!'
|
||||
)
|
||||
await stream.aclose()
|
||||
else:
|
||||
print(f'streamer received {msg}')
|
||||
else:
|
||||
print('streamer exited recv loop')
|
||||
|
||||
# start termination detector
|
||||
tn.start_soon(close_stream_on_sentinel)
|
||||
|
||||
cap: int = 10000 # so that we don't spin forever when bug..
|
||||
for val in range(cap):
|
||||
try:
|
||||
print(f'streamer sending {val}')
|
||||
await stream.send(val)
|
||||
if val > cap:
|
||||
raise RuntimeError(
|
||||
'Streamer never cancelled by setinel?'
|
||||
)
|
||||
await trio.sleep(0.001)
|
||||
|
||||
# close out the stream gracefully
|
||||
except trio.ClosedResourceError:
|
||||
print('transport closed on streamer side!')
|
||||
assert stream.closed
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'Streamer not cancelled before finished sending?'
|
||||
)
|
||||
|
||||
print('streamer exited .open_streamer() block')
|
||||
|
||||
|
||||
def test_local_task_fanout_from_stream(
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
Single stream with multiple local consumer tasks using the
|
||||
``MsgStream.subscribe()` api.
|
||||
|
||||
Ensure all tasks receive all values after stream completes
|
||||
sending.
|
||||
|
||||
'''
|
||||
consumers: int = 22
|
||||
|
||||
async def main():
|
||||
|
||||
counts = Counter()
|
||||
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as tn:
|
||||
p: tractor.Portal = await tn.start_actor(
|
||||
'inf_streamer',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
async with (
|
||||
p.open_context(inf_streamer) as (ctx, _),
|
||||
ctx.open_stream() as stream,
|
||||
):
|
||||
async def pull_and_count(name: str):
|
||||
# name = trio.lowlevel.current_task().name
|
||||
async with stream.subscribe() as recver:
|
||||
assert isinstance(
|
||||
recver,
|
||||
tractor.trionics.BroadcastReceiver
|
||||
)
|
||||
async for val in recver:
|
||||
print(f'bx {name} rx: {val}')
|
||||
counts[name] += 1
|
||||
|
||||
print(f'{name} bcaster ended')
|
||||
|
||||
print(f'{name} completed')
|
||||
|
||||
with trio.fail_after(3):
|
||||
async with trio.open_nursery() as nurse:
|
||||
for i in range(consumers):
|
||||
nurse.start_soon(
|
||||
pull_and_count,
|
||||
i,
|
||||
)
|
||||
|
||||
# delay to let bcast consumers pull msgs
|
||||
await trio.sleep(0.5)
|
||||
print('terminating nursery of bcast rxer consumers!')
|
||||
await stream.send('done')
|
||||
|
||||
print('closed stream connection')
|
||||
|
||||
assert len(counts) == consumers
|
||||
mx = max(counts.values())
|
||||
# make sure each task received all stream values
|
||||
assert all(val == mx for val in counts.values())
|
||||
|
||||
await p.cancel_actor()
|
||||
|
||||
trio.run(main)
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
"""
|
||||
Cancellation and error propagation
|
||||
|
||||
"""
|
||||
import os
|
||||
import signal
|
||||
|
@ -11,14 +10,8 @@ from itertools import repeat
|
|||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
from tractor._testing import (
|
||||
tractor_test,
|
||||
)
|
||||
from .conftest import no_windows
|
||||
|
||||
|
||||
def is_win():
|
||||
return platform.system() == 'Windows'
|
||||
from conftest import tractor_test, no_windows
|
||||
|
||||
|
||||
async def assert_err(delay=0):
|
||||
|
@ -45,82 +38,45 @@ async def do_nuthin():
|
|||
],
|
||||
ids=['no_args', 'unexpected_args'],
|
||||
)
|
||||
def test_remote_error(reg_addr, args_err):
|
||||
'''
|
||||
Verify an error raised in a subactor that is propagated
|
||||
def test_remote_error(arb_addr, args_err):
|
||||
"""Verify an error raised in a subactor that is propagated
|
||||
to the parent nursery, contains the underlying boxed builtin
|
||||
error type info and causes cancellation and reraising all the
|
||||
way up the stack.
|
||||
|
||||
'''
|
||||
"""
|
||||
args, errtype = args_err
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
) as nursery:
|
||||
|
||||
# on a remote type error caused by bad input args
|
||||
# this should raise directly which means we **don't** get
|
||||
# an exception group outside the nursery since the error
|
||||
# here and the far end task error are one in the same?
|
||||
portal = await nursery.run_in_actor(
|
||||
assert_err,
|
||||
name='errorer',
|
||||
**args
|
||||
assert_err, name='errorer', **args
|
||||
)
|
||||
|
||||
# get result(s) from main task
|
||||
try:
|
||||
# this means the root actor will also raise a local
|
||||
# parent task error and thus an eg will propagate out
|
||||
# of this actor nursery.
|
||||
await portal.result()
|
||||
except tractor.RemoteActorError as err:
|
||||
assert err.boxed_type == errtype
|
||||
assert err.type == errtype
|
||||
print("Look Maa that actor failed hard, hehh")
|
||||
raise
|
||||
|
||||
# ensure boxed errors
|
||||
if args:
|
||||
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
||||
trio.run(main)
|
||||
with pytest.raises(tractor.RemoteActorError) as excinfo:
|
||||
trio.run(main)
|
||||
|
||||
assert excinfo.value.boxed_type == errtype
|
||||
|
||||
else:
|
||||
# the root task will also error on the `Portal.result()`
|
||||
# call so we expect an error from there AND the child.
|
||||
# |_ tho seems like on new `trio` this doesn't always
|
||||
# happen?
|
||||
with pytest.raises((
|
||||
BaseExceptionGroup,
|
||||
tractor.RemoteActorError,
|
||||
)) as excinfo:
|
||||
trio.run(main)
|
||||
|
||||
# ensure boxed errors are `errtype`
|
||||
err: BaseException = excinfo.value
|
||||
if isinstance(err, BaseExceptionGroup):
|
||||
suberrs: list[BaseException] = err.exceptions
|
||||
else:
|
||||
suberrs: list[BaseException] = [err]
|
||||
|
||||
for exc in suberrs:
|
||||
assert exc.boxed_type == errtype
|
||||
# ensure boxed error is correct
|
||||
assert excinfo.value.type == errtype
|
||||
|
||||
|
||||
def test_multierror(
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
'''
|
||||
Verify we raise a ``BaseExceptionGroup`` out of a nursery where
|
||||
def test_multierror(arb_addr):
|
||||
"""Verify we raise a ``trio.MultiError`` out of a nursery where
|
||||
more then one actor errors.
|
||||
|
||||
'''
|
||||
"""
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
) as nursery:
|
||||
|
||||
await nursery.run_in_actor(assert_err, name='errorer1')
|
||||
|
@ -130,14 +86,14 @@ def test_multierror(
|
|||
try:
|
||||
await portal2.result()
|
||||
except tractor.RemoteActorError as err:
|
||||
assert err.boxed_type is AssertionError
|
||||
assert err.type == AssertionError
|
||||
print("Look Maa that first actor failed hard, hehh")
|
||||
raise
|
||||
|
||||
# here we should get a ``BaseExceptionGroup`` containing exceptions
|
||||
# here we should get a `trio.MultiError` containing exceptions
|
||||
# from both subactors
|
||||
|
||||
with pytest.raises(BaseExceptionGroup):
|
||||
with pytest.raises(trio.MultiError):
|
||||
trio.run(main)
|
||||
|
||||
|
||||
|
@ -145,14 +101,14 @@ def test_multierror(
|
|||
@pytest.mark.parametrize(
|
||||
'num_subactors', range(25, 26),
|
||||
)
|
||||
def test_multierror_fast_nursery(reg_addr, start_method, num_subactors, delay):
|
||||
"""Verify we raise a ``BaseExceptionGroup`` out of a nursery where
|
||||
def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay):
|
||||
"""Verify we raise a ``trio.MultiError`` out of a nursery where
|
||||
more then one actor errors and also with a delay before failure
|
||||
to test failure during an ongoing spawning.
|
||||
"""
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
) as nursery:
|
||||
|
||||
for i in range(num_subactors):
|
||||
|
@ -162,27 +118,22 @@ def test_multierror_fast_nursery(reg_addr, start_method, num_subactors, delay):
|
|||
delay=delay
|
||||
)
|
||||
|
||||
# with pytest.raises(trio.MultiError) as exc_info:
|
||||
with pytest.raises(BaseExceptionGroup) as exc_info:
|
||||
with pytest.raises(trio.MultiError) as exc_info:
|
||||
trio.run(main)
|
||||
|
||||
assert exc_info.type == ExceptionGroup
|
||||
assert exc_info.type == tractor.MultiError
|
||||
err = exc_info.value
|
||||
exceptions = err.exceptions
|
||||
|
||||
if len(exceptions) == 2:
|
||||
# sometimes oddly now there's an embedded BrokenResourceError ?
|
||||
for exc in exceptions:
|
||||
excs = getattr(exc, 'exceptions', None)
|
||||
if excs:
|
||||
exceptions = excs
|
||||
break
|
||||
exceptions = exceptions[1].exceptions
|
||||
|
||||
assert len(exceptions) == num_subactors
|
||||
|
||||
for exc in exceptions:
|
||||
assert isinstance(exc, tractor.RemoteActorError)
|
||||
assert exc.boxed_type is AssertionError
|
||||
assert exc.type == AssertionError
|
||||
|
||||
|
||||
async def do_nothing():
|
||||
|
@ -190,20 +141,15 @@ async def do_nothing():
|
|||
|
||||
|
||||
@pytest.mark.parametrize('mechanism', ['nursery_cancel', KeyboardInterrupt])
|
||||
def test_cancel_single_subactor(reg_addr, mechanism):
|
||||
'''
|
||||
Ensure a ``ActorNursery.start_actor()`` spawned subactor
|
||||
def test_cancel_single_subactor(arb_addr, mechanism):
|
||||
"""Ensure a ``ActorNursery.start_actor()`` spawned subactor
|
||||
cancels when the nursery is cancelled.
|
||||
|
||||
'''
|
||||
"""
|
||||
async def spawn_actor():
|
||||
'''
|
||||
Spawn an actor that blocks indefinitely then cancel via
|
||||
either `ActorNursery.cancel()` or an exception raise.
|
||||
|
||||
'''
|
||||
"""Spawn an actor that blocks indefinitely.
|
||||
"""
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
) as nursery:
|
||||
|
||||
portal = await nursery.start_actor(
|
||||
|
@ -259,8 +205,8 @@ async def test_cancel_infinite_streamer(start_method):
|
|||
[
|
||||
# daemon actors sit idle while single task actors error out
|
||||
(1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None),
|
||||
(2, BaseExceptionGroup, AssertionError, (assert_err, {}), None),
|
||||
(3, BaseExceptionGroup, AssertionError, (assert_err, {}), None),
|
||||
(2, tractor.MultiError, AssertionError, (assert_err, {}), None),
|
||||
(3, tractor.MultiError, AssertionError, (assert_err, {}), None),
|
||||
|
||||
# 1 daemon actor errors out while single task actors sleep forever
|
||||
(3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}),
|
||||
|
@ -271,7 +217,7 @@ async def test_cancel_infinite_streamer(start_method):
|
|||
(do_nuthin, {}), (assert_err, {'delay': 1}, True)),
|
||||
# daemon complete quickly delay while single task
|
||||
# actors error after brief delay
|
||||
(3, BaseExceptionGroup, AssertionError,
|
||||
(3, tractor.MultiError, AssertionError,
|
||||
(assert_err, {'delay': 1}), (do_nuthin, {}, False)),
|
||||
],
|
||||
ids=[
|
||||
|
@ -323,7 +269,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
|
|||
await portal.run(func, **kwargs)
|
||||
|
||||
except tractor.RemoteActorError as err:
|
||||
assert err.boxed_type == err_type
|
||||
assert err.type == err_type
|
||||
# we only expect this first error to propogate
|
||||
# (all other daemons are cancelled before they
|
||||
# can be scheduled)
|
||||
|
@ -338,15 +284,15 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel):
|
|||
# should error here with a ``RemoteActorError`` or ``MultiError``
|
||||
|
||||
except first_err as err:
|
||||
if isinstance(err, BaseExceptionGroup):
|
||||
if isinstance(err, tractor.MultiError):
|
||||
assert len(err.exceptions) == num_actors
|
||||
for exc in err.exceptions:
|
||||
if isinstance(exc, tractor.RemoteActorError):
|
||||
assert exc.boxed_type == err_type
|
||||
assert exc.type == err_type
|
||||
else:
|
||||
assert isinstance(exc, trio.Cancelled)
|
||||
elif isinstance(err, tractor.RemoteActorError):
|
||||
assert err.boxed_type == err_type
|
||||
assert err.type == err_type
|
||||
|
||||
assert n.cancelled is True
|
||||
assert not n._children
|
||||
|
@ -381,12 +327,10 @@ async def spawn_and_error(breadth, depth) -> None:
|
|||
|
||||
@tractor_test
|
||||
async def test_nested_multierrors(loglevel, start_method):
|
||||
'''
|
||||
Test that failed actor sets are wrapped in `BaseExceptionGroup`s. This
|
||||
test goes only 2 nurseries deep but we should eventually have tests
|
||||
"""Test that failed actor sets are wrapped in `trio.MultiError`s.
|
||||
This test goes only 2 nurseries deep but we should eventually have tests
|
||||
for arbitrary n-depth actor trees.
|
||||
|
||||
'''
|
||||
"""
|
||||
if start_method == 'trio':
|
||||
depth = 3
|
||||
subactor_breadth = 2
|
||||
|
@ -410,37 +354,25 @@ async def test_nested_multierrors(loglevel, start_method):
|
|||
breadth=subactor_breadth,
|
||||
depth=depth,
|
||||
)
|
||||
except BaseExceptionGroup as err:
|
||||
except trio.MultiError as err:
|
||||
assert len(err.exceptions) == subactor_breadth
|
||||
for subexc in err.exceptions:
|
||||
|
||||
# verify first level actor errors are wrapped as remote
|
||||
if is_win():
|
||||
if platform.system() == 'Windows':
|
||||
|
||||
# windows is often too slow and cancellation seems
|
||||
# to happen before an actor is spawned
|
||||
if isinstance(subexc, trio.Cancelled):
|
||||
continue
|
||||
|
||||
elif isinstance(subexc, tractor.RemoteActorError):
|
||||
else:
|
||||
# on windows it seems we can't exactly be sure wtf
|
||||
# will happen..
|
||||
assert subexc.boxed_type in (
|
||||
assert subexc.type in (
|
||||
tractor.RemoteActorError,
|
||||
trio.Cancelled,
|
||||
BaseExceptionGroup,
|
||||
trio.MultiError
|
||||
)
|
||||
|
||||
elif isinstance(subexc, BaseExceptionGroup):
|
||||
for subsub in subexc.exceptions:
|
||||
|
||||
if subsub in (tractor.RemoteActorError,):
|
||||
subsub = subsub.boxed_type
|
||||
|
||||
assert type(subsub) in (
|
||||
trio.Cancelled,
|
||||
BaseExceptionGroup,
|
||||
)
|
||||
else:
|
||||
assert isinstance(subexc, tractor.RemoteActorError)
|
||||
|
||||
|
@ -448,21 +380,14 @@ async def test_nested_multierrors(loglevel, start_method):
|
|||
# XXX not sure what's up with this..
|
||||
# on windows sometimes spawning is just too slow and
|
||||
# we get back the (sent) cancel signal instead
|
||||
if is_win():
|
||||
if isinstance(subexc, tractor.RemoteActorError):
|
||||
assert subexc.boxed_type in (
|
||||
BaseExceptionGroup,
|
||||
tractor.RemoteActorError
|
||||
)
|
||||
else:
|
||||
assert isinstance(subexc, BaseExceptionGroup)
|
||||
if platform.system() == 'Windows':
|
||||
assert (subexc.type is trio.MultiError) or (
|
||||
subexc.type is tractor.RemoteActorError)
|
||||
else:
|
||||
assert subexc.boxed_type is ExceptionGroup
|
||||
assert subexc.type is trio.MultiError
|
||||
else:
|
||||
assert subexc.boxed_type in (
|
||||
tractor.RemoteActorError,
|
||||
trio.Cancelled
|
||||
)
|
||||
assert (subexc.type is tractor.RemoteActorError) or (
|
||||
subexc.type is trio.Cancelled)
|
||||
|
||||
|
||||
@no_windows
|
||||
|
@ -480,7 +405,7 @@ def test_cancel_via_SIGINT(
|
|||
with trio.fail_after(2):
|
||||
async with tractor.open_nursery() as tn:
|
||||
await tn.start_actor('sucka')
|
||||
if 'mp' in spawn_backend:
|
||||
if spawn_backend == 'mp':
|
||||
time.sleep(0.1)
|
||||
os.kill(pid, signal.SIGINT)
|
||||
await trio.sleep_forever()
|
||||
|
@ -500,13 +425,8 @@ def test_cancel_via_SIGINT_other_task(
|
|||
from a seperate ``trio`` child task.
|
||||
"""
|
||||
pid = os.getpid()
|
||||
timeout: float = 2
|
||||
if is_win(): # smh
|
||||
timeout += 1
|
||||
|
||||
async def spawn_and_sleep_forever(
|
||||
task_status=trio.TASK_STATUS_IGNORED
|
||||
):
|
||||
async def spawn_and_sleep_forever(task_status=trio.TASK_STATUS_IGNORED):
|
||||
async with tractor.open_nursery() as tn:
|
||||
for i in range(3):
|
||||
await tn.run_in_actor(
|
||||
|
@ -518,19 +438,16 @@ def test_cancel_via_SIGINT_other_task(
|
|||
|
||||
async def main():
|
||||
# should never timeout since SIGINT should cancel the current program
|
||||
with trio.fail_after(timeout):
|
||||
async with trio.open_nursery(
|
||||
strict_exception_groups=False,
|
||||
) as n:
|
||||
with trio.fail_after(2):
|
||||
async with trio.open_nursery() as n:
|
||||
await n.start(spawn_and_sleep_forever)
|
||||
if 'mp' in spawn_backend:
|
||||
if spawn_backend == 'mp':
|
||||
time.sleep(0.1)
|
||||
os.kill(pid, signal.SIGINT)
|
||||
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
trio.run(main)
|
||||
|
||||
|
||||
async def spin_for(period=3):
|
||||
"Sync sleep."
|
||||
time.sleep(period)
|
||||
|
@ -569,63 +486,3 @@ def test_cancel_while_childs_child_in_sync_sleep(
|
|||
|
||||
with pytest.raises(AssertionError):
|
||||
trio.run(main)
|
||||
|
||||
|
||||
def test_fast_graceful_cancel_when_spawn_task_in_soft_proc_wait_for_daemon(
|
||||
start_method,
|
||||
):
|
||||
'''
|
||||
This is a very subtle test which demonstrates how cancellation
|
||||
during process collection can result in non-optimal teardown
|
||||
performance on daemon actors. The fix for this test was to handle
|
||||
``trio.Cancelled`` specially in the spawn task waiting in
|
||||
`proc.wait()` such that ``Portal.cancel_actor()`` is called before
|
||||
executing the "hard reap" sequence (which has an up to 3 second
|
||||
delay currently).
|
||||
|
||||
In other words, if we can cancel the actor using a graceful remote
|
||||
cancellation, and it's faster, we might as well do it.
|
||||
|
||||
'''
|
||||
kbi_delay = 0.5
|
||||
timeout: float = 2.9
|
||||
|
||||
if is_win(): # smh
|
||||
timeout += 1
|
||||
|
||||
async def main():
|
||||
start = time.time()
|
||||
try:
|
||||
async with trio.open_nursery() as nurse:
|
||||
async with tractor.open_nursery() as tn:
|
||||
p = await tn.start_actor(
|
||||
'fast_boi',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async def delayed_kbi():
|
||||
await trio.sleep(kbi_delay)
|
||||
print(f'RAISING KBI after {kbi_delay} s')
|
||||
raise KeyboardInterrupt
|
||||
|
||||
# start task which raises a kbi **after**
|
||||
# the actor nursery ``__aexit__()`` has
|
||||
# been run.
|
||||
nurse.start_soon(delayed_kbi)
|
||||
|
||||
await p.run(do_nuthin)
|
||||
|
||||
# need to explicitly re-raise the lone kbi..now
|
||||
except* KeyboardInterrupt as kbi_eg:
|
||||
assert (len(excs := kbi_eg.exceptions) == 1)
|
||||
raise excs[0]
|
||||
|
||||
finally:
|
||||
duration = time.time() - start
|
||||
if duration > timeout:
|
||||
raise trio.TooSlowError(
|
||||
'daemon cancel was slower then necessary..'
|
||||
)
|
||||
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
trio.run(main)
|
||||
|
|
|
@ -1,176 +0,0 @@
|
|||
'''
|
||||
Test a service style daemon that maintains a nursery for spawning
|
||||
"remote async tasks" including both spawning other long living
|
||||
sub-sub-actor daemons.
|
||||
|
||||
'''
|
||||
from typing import Optional
|
||||
import asyncio
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
aclosing,
|
||||
)
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
from tractor import RemoteActorError
|
||||
|
||||
|
||||
async def aio_streamer(
|
||||
from_trio: asyncio.Queue,
|
||||
to_trio: trio.abc.SendChannel,
|
||||
) -> trio.abc.ReceiveChannel:
|
||||
|
||||
# required first msg to sync caller
|
||||
to_trio.send_nowait(None)
|
||||
|
||||
from itertools import cycle
|
||||
for i in cycle(range(10)):
|
||||
to_trio.send_nowait(i)
|
||||
await asyncio.sleep(0.01)
|
||||
|
||||
|
||||
async def trio_streamer():
|
||||
from itertools import cycle
|
||||
for i in cycle(range(10)):
|
||||
yield i
|
||||
await trio.sleep(0.01)
|
||||
|
||||
|
||||
async def trio_sleep_and_err(delay: float = 0.5):
|
||||
await trio.sleep(delay)
|
||||
# name error
|
||||
doggy() # noqa
|
||||
|
||||
|
||||
_cached_stream: Optional[
|
||||
trio.abc.ReceiveChannel
|
||||
] = None
|
||||
|
||||
|
||||
@acm
|
||||
async def wrapper_mngr(
|
||||
):
|
||||
from tractor.trionics import broadcast_receiver
|
||||
global _cached_stream
|
||||
in_aio = tractor.current_actor().is_infected_aio()
|
||||
|
||||
if in_aio:
|
||||
if _cached_stream:
|
||||
|
||||
from_aio = _cached_stream
|
||||
|
||||
# if we already have a cached feed deliver a rx side clone
|
||||
# to consumer
|
||||
async with broadcast_receiver(from_aio, 6) as from_aio:
|
||||
yield from_aio
|
||||
return
|
||||
else:
|
||||
async with tractor.to_asyncio.open_channel_from(
|
||||
aio_streamer,
|
||||
) as (first, from_aio):
|
||||
assert not first
|
||||
|
||||
# cache it so next task uses broadcast receiver
|
||||
_cached_stream = from_aio
|
||||
|
||||
yield from_aio
|
||||
else:
|
||||
async with aclosing(trio_streamer()) as stream:
|
||||
# cache it so next task uses broadcast receiver
|
||||
_cached_stream = stream
|
||||
yield stream
|
||||
|
||||
|
||||
_nursery: trio.Nursery = None
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def trio_main(
|
||||
ctx: tractor.Context,
|
||||
):
|
||||
# sync
|
||||
await ctx.started()
|
||||
|
||||
# stash a "service nursery" as "actor local" (aka a Python global)
|
||||
global _nursery
|
||||
tn = _nursery
|
||||
assert tn
|
||||
|
||||
async def consume_stream():
|
||||
async with wrapper_mngr() as stream:
|
||||
async for msg in stream:
|
||||
print(msg)
|
||||
|
||||
# run 2 tasks to ensure broadcaster chan use
|
||||
tn.start_soon(consume_stream)
|
||||
tn.start_soon(consume_stream)
|
||||
|
||||
tn.start_soon(trio_sleep_and_err)
|
||||
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def open_actor_local_nursery(
|
||||
ctx: tractor.Context,
|
||||
):
|
||||
global _nursery
|
||||
async with trio.open_nursery(
|
||||
strict_exception_groups=False,
|
||||
) as tn:
|
||||
_nursery = tn
|
||||
await ctx.started()
|
||||
await trio.sleep(10)
|
||||
# await trio.sleep(1)
|
||||
|
||||
# XXX: this causes the hang since
|
||||
# the caller does not unblock from its own
|
||||
# ``trio.sleep_forever()``.
|
||||
|
||||
# TODO: we need to test a simple ctx task starting remote tasks
|
||||
# that error and then blocking on a ``Nursery.start()`` which
|
||||
# never yields back.. aka a scenario where the
|
||||
# ``tractor.context`` task IS NOT in the service n's cancel
|
||||
# scope.
|
||||
tn.cancel_scope.cancel()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'asyncio_mode',
|
||||
[True, False],
|
||||
ids='asyncio_mode={}'.format,
|
||||
)
|
||||
def test_actor_managed_trio_nursery_task_error_cancels_aio(
|
||||
asyncio_mode: bool,
|
||||
reg_addr: tuple,
|
||||
):
|
||||
'''
|
||||
Verify that a ``trio`` nursery created managed in a child actor
|
||||
correctly relays errors to the parent actor when one of its spawned
|
||||
tasks errors even when running in infected asyncio mode and using
|
||||
broadcast receivers for multi-task-per-actor subscription.
|
||||
|
||||
'''
|
||||
async def main():
|
||||
|
||||
# cancel the nursery shortly after boot
|
||||
async with tractor.open_nursery() as n:
|
||||
p = await n.start_actor(
|
||||
'nursery_mngr',
|
||||
infect_asyncio=asyncio_mode, # TODO, is this enabling debug mode?
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
async with (
|
||||
p.open_context(open_actor_local_nursery) as (ctx, first),
|
||||
p.open_context(trio_main) as (ctx, first),
|
||||
):
|
||||
await trio.sleep_forever()
|
||||
|
||||
with pytest.raises(RemoteActorError) as excinfo:
|
||||
trio.run(main)
|
||||
|
||||
# verify boxed error
|
||||
err = excinfo.value
|
||||
assert err.boxed_type is NameError
|
|
@ -1,82 +0,0 @@
|
|||
import itertools
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
from tractor import open_actor_cluster
|
||||
from tractor.trionics import gather_contexts
|
||||
from tractor._testing import tractor_test
|
||||
|
||||
MESSAGE = 'tractoring at full speed'
|
||||
|
||||
|
||||
def test_empty_mngrs_input_raises() -> None:
|
||||
|
||||
async def main():
|
||||
with trio.fail_after(1):
|
||||
async with (
|
||||
open_actor_cluster(
|
||||
modules=[__name__],
|
||||
|
||||
# NOTE: ensure we can passthrough runtime opts
|
||||
loglevel='info',
|
||||
# debug_mode=True,
|
||||
|
||||
) as portals,
|
||||
|
||||
gather_contexts(
|
||||
# NOTE: it's the use of inline-generator syntax
|
||||
# here that causes the empty input.
|
||||
mngrs=(
|
||||
p.open_context(worker) for p in portals.values()
|
||||
),
|
||||
),
|
||||
):
|
||||
assert 0
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
trio.run(main)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def worker(
|
||||
ctx: tractor.Context,
|
||||
|
||||
) -> None:
|
||||
|
||||
await ctx.started()
|
||||
|
||||
async with ctx.open_stream(
|
||||
allow_overruns=True,
|
||||
) as stream:
|
||||
|
||||
# TODO: this with the below assert causes a hang bug?
|
||||
# with trio.move_on_after(1):
|
||||
|
||||
async for msg in stream:
|
||||
# do something with msg
|
||||
print(msg)
|
||||
assert msg == MESSAGE
|
||||
|
||||
# TODO: does this ever cause a hang
|
||||
# assert 0
|
||||
|
||||
|
||||
@tractor_test
|
||||
async def test_streaming_to_actor_cluster() -> None:
|
||||
|
||||
async with (
|
||||
open_actor_cluster(modules=[__name__]) as portals,
|
||||
|
||||
gather_contexts(
|
||||
mngrs=[p.open_context(worker) for p in portals.values()],
|
||||
) as contexts,
|
||||
|
||||
gather_contexts(
|
||||
mngrs=[ctx[0].open_stream() for ctx in contexts],
|
||||
) as streams,
|
||||
|
||||
):
|
||||
with trio.move_on_after(1):
|
||||
for stream in itertools.cycle(streams):
|
||||
await stream.send(MESSAGE)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,501 @@
|
|||
"""
|
||||
That native debug better work!
|
||||
|
||||
All these tests can be understood (somewhat) by running the equivalent
|
||||
`examples/debugging/` scripts manually.
|
||||
|
||||
TODO: None of these tests have been run successfully on windows yet.
|
||||
"""
|
||||
import time
|
||||
from os import path
|
||||
|
||||
import pytest
|
||||
import pexpect
|
||||
|
||||
from conftest import repodir
|
||||
|
||||
|
||||
# TODO: The next great debugger audit could be done by you!
|
||||
# - recurrent entry to breakpoint() from single actor *after* and an
|
||||
# error in another task?
|
||||
# - root error before child errors
|
||||
# - root error after child errors
|
||||
# - root error before child breakpoint
|
||||
# - root error after child breakpoint
|
||||
# - recurrent root errors
|
||||
|
||||
|
||||
def examples_dir():
|
||||
"""Return the abspath to the examples directory.
|
||||
"""
|
||||
return path.join(repodir(), 'examples', 'debugging/')
|
||||
|
||||
|
||||
def mk_cmd(ex_name: str) -> str:
|
||||
"""Generate a command suitable to pass to ``pexpect.spawn()``.
|
||||
"""
|
||||
return ' '.join(
|
||||
['python',
|
||||
path.join(examples_dir(), f'{ex_name}.py')]
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def spawn(
|
||||
start_method,
|
||||
testdir,
|
||||
arb_addr,
|
||||
) -> 'pexpect.spawn':
|
||||
|
||||
if start_method != 'trio':
|
||||
pytest.skip(
|
||||
"Debugger tests are only supported on the trio backend"
|
||||
)
|
||||
|
||||
def _spawn(cmd):
|
||||
return testdir.spawn(
|
||||
cmd=mk_cmd(cmd),
|
||||
expect_timeout=3,
|
||||
)
|
||||
|
||||
return _spawn
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'user_in_out',
|
||||
[
|
||||
('c', 'AssertionError'),
|
||||
('q', 'AssertionError'),
|
||||
],
|
||||
ids=lambda item: f'{item[0]} -> {item[1]}',
|
||||
)
|
||||
def test_root_actor_error(spawn, user_in_out):
|
||||
"""Demonstrate crash handler entering pdbpp from basic error in root actor.
|
||||
"""
|
||||
user_input, expect_err_str = user_in_out
|
||||
|
||||
child = spawn('root_actor_error')
|
||||
|
||||
# scan for the pdbpp prompt
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
|
||||
# make sure expected logging and error arrives
|
||||
assert "Attaching to pdb in crashed actor: ('root'" in before
|
||||
assert 'AssertionError' in before
|
||||
|
||||
# send user command
|
||||
child.sendline(user_input)
|
||||
|
||||
# process should exit
|
||||
child.expect(pexpect.EOF)
|
||||
assert expect_err_str in str(child.before)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'user_in_out',
|
||||
[
|
||||
('c', None),
|
||||
('q', 'bdb.BdbQuit'),
|
||||
],
|
||||
ids=lambda item: f'{item[0]} -> {item[1]}',
|
||||
)
|
||||
def test_root_actor_bp(spawn, user_in_out):
|
||||
"""Demonstrate breakpoint from in root actor.
|
||||
"""
|
||||
user_input, expect_err_str = user_in_out
|
||||
child = spawn('root_actor_breakpoint')
|
||||
|
||||
# scan for the pdbpp prompt
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
assert 'Error' not in str(child.before)
|
||||
|
||||
# send user command
|
||||
child.sendline(user_input)
|
||||
child.expect('\r\n')
|
||||
|
||||
# process should exit
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
if expect_err_str is None:
|
||||
assert 'Error' not in str(child.before)
|
||||
else:
|
||||
assert expect_err_str in str(child.before)
|
||||
|
||||
|
||||
def test_root_actor_bp_forever(spawn):
|
||||
"Re-enter a breakpoint from the root actor-task."
|
||||
child = spawn('root_actor_breakpoint_forever')
|
||||
|
||||
# do some "next" commands to demonstrate recurrent breakpoint
|
||||
# entries
|
||||
for _ in range(10):
|
||||
child.sendline('next')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
# do one continue which should trigger a new task to lock the tty
|
||||
child.sendline('continue')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
# XXX: this previously caused a bug!
|
||||
child.sendline('n')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
child.sendline('n')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
|
||||
def test_subactor_error(spawn):
|
||||
"Single subactor raising an error"
|
||||
|
||||
child = spawn('subactor_error')
|
||||
|
||||
# scan for the pdbpp prompt
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching to pdb in crashed actor: ('name_error'" in before
|
||||
|
||||
# send user command
|
||||
# (in this case it's the same for 'continue' vs. 'quit')
|
||||
child.sendline('continue')
|
||||
|
||||
# the debugger should enter a second time in the nursery
|
||||
# creating actor
|
||||
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
|
||||
# root actor gets debugger engaged
|
||||
assert "Attaching to pdb in crashed actor: ('root'" in before
|
||||
|
||||
# error is a remote error propagated from the subactor
|
||||
assert "RemoteActorError: ('name_error'" in before
|
||||
|
||||
child.sendline('c')
|
||||
child.expect('\r\n')
|
||||
|
||||
# process should exit
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
|
||||
def test_subactor_breakpoint(spawn):
|
||||
"Single subactor with an infinite breakpoint loop"
|
||||
|
||||
child = spawn('subactor_breakpoint')
|
||||
|
||||
# scan for the pdbpp prompt
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||
|
||||
# do some "next" commands to demonstrate recurrent breakpoint
|
||||
# entries
|
||||
for _ in range(10):
|
||||
child.sendline('next')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
# now run some "continues" to show re-entries
|
||||
for _ in range(5):
|
||||
child.sendline('continue')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||
|
||||
# finally quit the loop
|
||||
child.sendline('q')
|
||||
|
||||
# child process should exit but parent will capture pdb.BdbQuit
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "RemoteActorError: ('breakpoint_forever'" in before
|
||||
assert 'bdb.BdbQuit' in before
|
||||
|
||||
# quit the parent
|
||||
child.sendline('c')
|
||||
|
||||
# process should exit
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "RemoteActorError: ('breakpoint_forever'" in before
|
||||
assert 'bdb.BdbQuit' in before
|
||||
|
||||
|
||||
def test_multi_subactors(spawn):
|
||||
"""Multiple subactors, both erroring and breakpointing as well as
|
||||
a nested subactor erroring.
|
||||
"""
|
||||
child = spawn(r'multi_subactors')
|
||||
|
||||
# scan for the pdbpp prompt
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||
|
||||
# do some "next" commands to demonstrate recurrent breakpoint
|
||||
# entries
|
||||
for _ in range(10):
|
||||
child.sendline('next')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
# continue to next error
|
||||
child.sendline('c')
|
||||
|
||||
# first name_error failure
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
assert "NameError" in before
|
||||
|
||||
# continue again
|
||||
child.sendline('c')
|
||||
|
||||
# 2nd name_error failure
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
assert "NameError" in before
|
||||
|
||||
# breakpoint loop should re-engage
|
||||
child.sendline('c')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('breakpoint_forever'" in before
|
||||
|
||||
# now run some "continues" to show re-entries
|
||||
for _ in range(5):
|
||||
child.sendline('c')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
# quit the loop and expect parent to attach
|
||||
child.sendline('q')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching to pdb in crashed actor: ('root'" in before
|
||||
assert "RemoteActorError: ('breakpoint_forever'" in before
|
||||
assert 'bdb.BdbQuit' in before
|
||||
|
||||
# process should exit
|
||||
child.sendline('c')
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "RemoteActorError: ('breakpoint_forever'" in before
|
||||
assert 'bdb.BdbQuit' in before
|
||||
|
||||
|
||||
def test_multi_daemon_subactors(spawn, loglevel):
|
||||
"""Multiple daemon subactors, both erroring and breakpointing within a
|
||||
stream.
|
||||
"""
|
||||
child = spawn('multi_daemon_subactors')
|
||||
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('bp_forever'" in before
|
||||
|
||||
child.sendline('c')
|
||||
|
||||
# first name_error failure
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
assert "NameError" in before
|
||||
|
||||
# XXX: hoorayy the root clobering the child here was fixed!
|
||||
|
||||
# now the root actor won't clobber the bp_forever child
|
||||
# during it's first access to the debug lock, but will instead
|
||||
# wait for the lock to release, by the edge triggered
|
||||
# ``_debug._no_remote_has_tty`` event before sending cancel messages
|
||||
# (via portals) to its underlings B)
|
||||
|
||||
# IMO, this demonstrates the true power of SC system design.
|
||||
child.sendline('c')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
assert "Attaching pdb to actor: ('bp_forever'," in before
|
||||
|
||||
|
||||
child.sendline('c')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
|
||||
try:
|
||||
# final error in root
|
||||
assert "tractor._exceptions.RemoteActorError: ('name_error'" in before
|
||||
|
||||
except AssertionError:
|
||||
# except pexpect.exceptions.TIMEOUT:
|
||||
|
||||
# one last entry in the root
|
||||
child.sendline('c')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
before = str(child.before.decode())
|
||||
assert "tractor._exceptions.RemoteActorError: ('name_error'" in before
|
||||
|
||||
# theory there should have been some msg like this from
|
||||
# root announcing it avoided a clobber of the child's lock,
|
||||
# but it seems unreliable in testing here to gnab it.
|
||||
# assert "in use by child ('bp_forever'," in before
|
||||
|
||||
child.sendline('c')
|
||||
# final error in root
|
||||
assert "tractor._exceptions.RemoteActorError: ('name_error'" in before
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
|
||||
def test_multi_subactors_root_errors(spawn):
|
||||
"""Multiple subactors, both erroring and breakpointing as well as
|
||||
a nested subactor erroring.
|
||||
"""
|
||||
child = spawn('multi_subactor_root_errors')
|
||||
|
||||
# scan for the pdbpp prompt
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
# at most one subactor should attach before the root is cancelled
|
||||
before = str(child.before.decode())
|
||||
assert "NameError: name 'doggypants' is not defined" in before
|
||||
|
||||
# continue again
|
||||
child.sendline('c')
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
# should now get attached in root with assert error
|
||||
before = str(child.before.decode())
|
||||
|
||||
# should have come just after priot prompt
|
||||
assert "Attaching to pdb in crashed actor: ('root'" in before
|
||||
assert "AssertionError" in before
|
||||
|
||||
# warnings assert we probably don't need
|
||||
# assert "Cancelling nursery in ('spawn_error'," in before
|
||||
|
||||
# continue again
|
||||
child.sendline('c')
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "AssertionError" in before
|
||||
|
||||
|
||||
def test_multi_nested_subactors_error_through_nurseries(spawn):
|
||||
"""Verify deeply nested actors that error trigger debugger entries
|
||||
at each actor nurserly (level) all the way up the tree.
|
||||
"""
|
||||
|
||||
# NOTE: previously, inside this script was a a bug where if the
|
||||
# parent errors before a 2-levels-lower actor has released the lock,
|
||||
# the parent tries to cancel it but it's stuck in the debugger?
|
||||
# A test (below) has now been added to explicitly verify this is
|
||||
# fixed.
|
||||
|
||||
child = spawn('multi_nested_subactors_error_up_through_nurseries')
|
||||
|
||||
# startup time can be iffy
|
||||
# time.sleep(1)
|
||||
|
||||
for i in range(12):
|
||||
try:
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
child.sendline('c')
|
||||
time.sleep(0.1)
|
||||
|
||||
except (
|
||||
pexpect.exceptions.EOF,
|
||||
pexpect.exceptions.TIMEOUT,
|
||||
):
|
||||
# races all over..
|
||||
|
||||
print(f"Failed early on {i}?")
|
||||
before = str(child.before.decode())
|
||||
|
||||
timed_out_early = True
|
||||
|
||||
# race conditions on how fast the continue is sent?
|
||||
break
|
||||
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
if not timed_out_early:
|
||||
before = str(child.before.decode())
|
||||
assert "NameError" in before
|
||||
|
||||
|
||||
def test_root_nursery_cancels_before_child_releases_tty_lock(
|
||||
spawn,
|
||||
start_method
|
||||
):
|
||||
"""Test that when the root sends a cancel message before a nested
|
||||
child has unblocked (which can happen when it has the tty lock and
|
||||
is engaged in pdb) it is indeed cancelled after exiting the debugger.
|
||||
"""
|
||||
timed_out_early = False
|
||||
|
||||
child = spawn('root_cancelled_but_child_is_in_tty_lock')
|
||||
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "NameError: name 'doggypants' is not defined" in before
|
||||
assert "tractor._exceptions.RemoteActorError: ('name_error'" not in before
|
||||
time.sleep(0.5)
|
||||
|
||||
child.sendline('c')
|
||||
|
||||
for i in range(4):
|
||||
time.sleep(0.5)
|
||||
try:
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
except (
|
||||
pexpect.exceptions.EOF,
|
||||
pexpect.exceptions.TIMEOUT,
|
||||
):
|
||||
# races all over..
|
||||
|
||||
print(f"Failed early on {i}?")
|
||||
before = str(child.before.decode())
|
||||
|
||||
timed_out_early = True
|
||||
|
||||
# race conditions on how fast the continue is sent?
|
||||
break
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "NameError: name 'doggypants' is not defined" in before
|
||||
|
||||
child.sendline('c')
|
||||
|
||||
child.expect(pexpect.EOF)
|
||||
|
||||
if not timed_out_early:
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "tractor._exceptions.RemoteActorError: ('spawner0'" in before
|
||||
assert "tractor._exceptions.RemoteActorError: ('name_error'" in before
|
||||
assert "NameError: name 'doggypants' is not defined" in before
|
||||
|
||||
|
||||
def test_root_cancels_child_context_during_startup(
|
||||
spawn,
|
||||
):
|
||||
'''Verify a fast fail in the root doesn't lock up the child reaping
|
||||
and all while using the new context api.
|
||||
|
||||
'''
|
||||
child = spawn('fast_error_in_root_after_spawn')
|
||||
|
||||
child.expect(r"\(Pdb\+\+\)")
|
||||
|
||||
before = str(child.before.decode())
|
||||
assert "AssertionError" in before
|
||||
|
||||
child.sendline('c')
|
||||
child.expect(pexpect.EOF)
|
|
@ -9,24 +9,25 @@ import itertools
|
|||
|
||||
import pytest
|
||||
import tractor
|
||||
from tractor._testing import tractor_test
|
||||
import trio
|
||||
|
||||
from conftest import tractor_test
|
||||
|
||||
|
||||
@tractor_test
|
||||
async def test_reg_then_unreg(reg_addr):
|
||||
async def test_reg_then_unreg(arb_addr):
|
||||
actor = tractor.current_actor()
|
||||
assert actor.is_arbiter
|
||||
assert len(actor._registry) == 1 # only self is registered
|
||||
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
) as n:
|
||||
|
||||
portal = await n.start_actor('actor', enable_modules=[__name__])
|
||||
uid = portal.channel.uid
|
||||
|
||||
async with tractor.get_registry(*reg_addr) as aportal:
|
||||
async with tractor.get_arbiter(*arb_addr) as aportal:
|
||||
# this local actor should be the arbiter
|
||||
assert actor is aportal.actor
|
||||
|
||||
|
@ -41,7 +42,7 @@ async def test_reg_then_unreg(reg_addr):
|
|||
|
||||
await trio.sleep(0.1)
|
||||
assert uid not in aportal.actor._registry
|
||||
sockaddrs = actor._registry.get(uid)
|
||||
sockaddrs = actor._registry[uid]
|
||||
assert not sockaddrs
|
||||
|
||||
|
||||
|
@ -52,27 +53,15 @@ async def hi():
|
|||
return the_line.format(tractor.current_actor().name)
|
||||
|
||||
|
||||
async def say_hello(
|
||||
other_actor: str,
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
async def say_hello(other_actor):
|
||||
await trio.sleep(1) # wait for other actor to spawn
|
||||
async with tractor.find_actor(
|
||||
other_actor,
|
||||
registry_addrs=[reg_addr],
|
||||
) as portal:
|
||||
async with tractor.find_actor(other_actor) as portal:
|
||||
assert portal is not None
|
||||
return await portal.run(__name__, 'hi')
|
||||
|
||||
|
||||
async def say_hello_use_wait(
|
||||
other_actor: str,
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
async with tractor.wait_for_actor(
|
||||
other_actor,
|
||||
registry_addr=reg_addr,
|
||||
) as portal:
|
||||
async def say_hello_use_wait(other_actor):
|
||||
async with tractor.wait_for_actor(other_actor) as portal:
|
||||
assert portal is not None
|
||||
result = await portal.run(__name__, 'hi')
|
||||
return result
|
||||
|
@ -80,29 +69,21 @@ async def say_hello_use_wait(
|
|||
|
||||
@tractor_test
|
||||
@pytest.mark.parametrize('func', [say_hello, say_hello_use_wait])
|
||||
async def test_trynamic_trio(
|
||||
func,
|
||||
start_method,
|
||||
reg_addr,
|
||||
):
|
||||
'''
|
||||
Root actor acting as the "director" and running one-shot-task-actors
|
||||
for the directed subs.
|
||||
|
||||
'''
|
||||
async def test_trynamic_trio(func, start_method, arb_addr):
|
||||
"""Main tractor entry point, the "master" process (for now
|
||||
acts as the "director").
|
||||
"""
|
||||
async with tractor.open_nursery() as n:
|
||||
print("Alright... Action!")
|
||||
|
||||
donny = await n.run_in_actor(
|
||||
func,
|
||||
other_actor='gretchen',
|
||||
reg_addr=reg_addr,
|
||||
name='donny',
|
||||
)
|
||||
gretchen = await n.run_in_actor(
|
||||
func,
|
||||
other_actor='donny',
|
||||
reg_addr=reg_addr,
|
||||
name='gretchen',
|
||||
)
|
||||
print(await gretchen.result())
|
||||
|
@ -135,32 +116,17 @@ async def stream_from(portal):
|
|||
print(value)
|
||||
|
||||
|
||||
async def unpack_reg(actor_or_portal):
|
||||
'''
|
||||
Get and unpack a "registry" RPC request from the "arbiter" registry
|
||||
system.
|
||||
|
||||
'''
|
||||
if getattr(actor_or_portal, 'get_registry', None):
|
||||
msg = await actor_or_portal.get_registry()
|
||||
else:
|
||||
msg = await actor_or_portal.run_from_ns('self', 'get_registry')
|
||||
|
||||
return {tuple(key.split('.')): val for key, val in msg.items()}
|
||||
|
||||
|
||||
async def spawn_and_check_registry(
|
||||
reg_addr: tuple,
|
||||
arb_addr: tuple,
|
||||
use_signal: bool,
|
||||
remote_arbiter: bool = False,
|
||||
with_streaming: bool = False,
|
||||
|
||||
) -> None:
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
):
|
||||
async with tractor.get_registry(*reg_addr) as portal:
|
||||
async with tractor.get_arbiter(*arb_addr) as portal:
|
||||
# runtime needs to be up to call this
|
||||
actor = tractor.current_actor()
|
||||
|
||||
|
@ -168,11 +134,13 @@ async def spawn_and_check_registry(
|
|||
assert not actor.is_arbiter
|
||||
|
||||
if actor.is_arbiter:
|
||||
extra = 1 # arbiter is local root actor
|
||||
get_reg = partial(unpack_reg, actor)
|
||||
|
||||
async def get_reg():
|
||||
return actor._registry
|
||||
|
||||
extra = 1 # arbiter is local root actor
|
||||
else:
|
||||
get_reg = partial(unpack_reg, portal)
|
||||
get_reg = partial(portal.run_from_ns, 'self', 'get_registry')
|
||||
extra = 2 # local root actor + remote arbiter
|
||||
|
||||
# ensure current actor is registered
|
||||
|
@ -181,9 +149,7 @@ async def spawn_and_check_registry(
|
|||
|
||||
try:
|
||||
async with tractor.open_nursery() as n:
|
||||
async with trio.open_nursery(
|
||||
strict_exception_groups=False,
|
||||
) as trion:
|
||||
async with trio.open_nursery() as trion:
|
||||
|
||||
portals = {}
|
||||
for i in range(3):
|
||||
|
@ -221,12 +187,13 @@ async def spawn_and_check_registry(
|
|||
await cancel(use_signal)
|
||||
|
||||
finally:
|
||||
await trio.sleep(0.5)
|
||||
with trio.CancelScope(shield=True):
|
||||
await trio.sleep(0.5)
|
||||
|
||||
# all subactors should have de-registered
|
||||
registry = await get_reg()
|
||||
assert len(registry) == extra
|
||||
assert actor.uid in registry
|
||||
# all subactors should have de-registered
|
||||
registry = await get_reg()
|
||||
assert len(registry) == extra
|
||||
assert actor.uid in registry
|
||||
|
||||
|
||||
@pytest.mark.parametrize('use_signal', [False, True])
|
||||
|
@ -234,19 +201,17 @@ async def spawn_and_check_registry(
|
|||
def test_subactors_unregister_on_cancel(
|
||||
start_method,
|
||||
use_signal,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
with_streaming,
|
||||
):
|
||||
'''
|
||||
Verify that cancelling a nursery results in all subactors
|
||||
"""Verify that cancelling a nursery results in all subactors
|
||||
deregistering themselves with the arbiter.
|
||||
|
||||
'''
|
||||
"""
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
trio.run(
|
||||
partial(
|
||||
spawn_and_check_registry,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
use_signal,
|
||||
remote_arbiter=False,
|
||||
with_streaming=with_streaming,
|
||||
|
@ -260,7 +225,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
|
|||
daemon,
|
||||
start_method,
|
||||
use_signal,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
with_streaming,
|
||||
):
|
||||
"""Verify that cancelling a nursery results in all subactors
|
||||
|
@ -271,7 +236,7 @@ def test_subactors_unregister_on_cancel_remote_daemon(
|
|||
trio.run(
|
||||
partial(
|
||||
spawn_and_check_registry,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
use_signal,
|
||||
remote_arbiter=True,
|
||||
with_streaming=with_streaming,
|
||||
|
@ -285,7 +250,7 @@ async def streamer(agen):
|
|||
|
||||
|
||||
async def close_chans_before_nursery(
|
||||
reg_addr: tuple,
|
||||
arb_addr: tuple,
|
||||
use_signal: bool,
|
||||
remote_arbiter: bool = False,
|
||||
) -> None:
|
||||
|
@ -298,11 +263,11 @@ async def close_chans_before_nursery(
|
|||
entries_at_end = 1
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
):
|
||||
async with tractor.get_registry(*reg_addr) as aportal:
|
||||
async with tractor.get_arbiter(*arb_addr) as aportal:
|
||||
try:
|
||||
get_reg = partial(unpack_reg, aportal)
|
||||
get_reg = partial(aportal.run_from_ns, 'self', 'get_registry')
|
||||
|
||||
async with tractor.open_nursery() as tn:
|
||||
portal1 = await tn.start_actor(
|
||||
|
@ -312,15 +277,11 @@ async def close_chans_before_nursery(
|
|||
|
||||
# TODO: compact this back as was in last commit once
|
||||
# 3.9+, see https://github.com/goodboy/tractor/issues/207
|
||||
async with portal1.open_stream_from(
|
||||
stream_forever
|
||||
) as agen1:
|
||||
async with portal1.open_stream_from(stream_forever) as agen1:
|
||||
async with portal2.open_stream_from(
|
||||
stream_forever
|
||||
) as agen2:
|
||||
async with trio.open_nursery(
|
||||
strict_exception_groups=False,
|
||||
) as n:
|
||||
async with trio.open_nursery() as n:
|
||||
n.start_soon(streamer, agen1)
|
||||
n.start_soon(cancel, use_signal, .5)
|
||||
try:
|
||||
|
@ -332,9 +293,8 @@ async def close_chans_before_nursery(
|
|||
# reliably triggered by an external SIGINT.
|
||||
# tractor.current_actor()._root_nursery.cancel_scope.cancel()
|
||||
|
||||
# XXX: THIS IS THE KEY THING that
|
||||
# happens **before** exiting the
|
||||
# actor nursery block
|
||||
# XXX: THIS IS THE KEY THING that happens
|
||||
# **before** exiting the actor nursery block
|
||||
|
||||
# also kill off channels cuz why not
|
||||
await agen1.aclose()
|
||||
|
@ -354,7 +314,7 @@ async def close_chans_before_nursery(
|
|||
def test_close_channel_explicit(
|
||||
start_method,
|
||||
use_signal,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
):
|
||||
"""Verify that closing a stream explicitly and killing the actor's
|
||||
"root nursery" **before** the containing nursery tears down also
|
||||
|
@ -364,7 +324,7 @@ def test_close_channel_explicit(
|
|||
trio.run(
|
||||
partial(
|
||||
close_chans_before_nursery,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
use_signal,
|
||||
remote_arbiter=False,
|
||||
),
|
||||
|
@ -376,7 +336,7 @@ def test_close_channel_explicit_remote_arbiter(
|
|||
daemon,
|
||||
start_method,
|
||||
use_signal,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
):
|
||||
"""Verify that closing a stream explicitly and killing the actor's
|
||||
"root nursery" **before** the containing nursery tears down also
|
||||
|
@ -386,7 +346,7 @@ def test_close_channel_explicit_remote_arbiter(
|
|||
trio.run(
|
||||
partial(
|
||||
close_chans_before_nursery,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
use_signal,
|
||||
remote_arbiter=True,
|
||||
),
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
'''
|
||||
"""
|
||||
Let's make sure them docs work yah?
|
||||
|
||||
'''
|
||||
"""
|
||||
from contextlib import contextmanager
|
||||
import itertools
|
||||
import os
|
||||
|
@ -11,17 +10,18 @@ import platform
|
|||
import shutil
|
||||
|
||||
import pytest
|
||||
from tractor._testing import (
|
||||
examples_dir,
|
||||
)
|
||||
|
||||
from conftest import repodir
|
||||
|
||||
|
||||
def examples_dir():
|
||||
"""Return the abspath to the examples directory.
|
||||
"""
|
||||
return os.path.join(repodir(), 'examples')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def run_example_in_subproc(
|
||||
loglevel: str,
|
||||
testdir: pytest.Pytester,
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
def run_example_in_subproc(loglevel, testdir, arb_addr):
|
||||
|
||||
@contextmanager
|
||||
def run(script_code):
|
||||
|
@ -31,8 +31,8 @@ def run_example_in_subproc(
|
|||
# on windows we need to create a special __main__.py which will
|
||||
# be executed with ``python -m <modulename>`` on windows..
|
||||
shutil.copyfile(
|
||||
examples_dir() / '__main__.py',
|
||||
str(testdir / '__main__.py'),
|
||||
os.path.join(examples_dir(), '__main__.py'),
|
||||
os.path.join(str(testdir), '__main__.py')
|
||||
)
|
||||
|
||||
# drop the ``if __name__ == '__main__'`` guard onwards from
|
||||
|
@ -80,41 +80,24 @@ def run_example_in_subproc(
|
|||
'example_script',
|
||||
|
||||
# walk yields: (dirpath, dirnames, filenames)
|
||||
[
|
||||
(p[0], f)
|
||||
for p in os.walk(examples_dir())
|
||||
for f in p[2]
|
||||
[(p[0], f) for p in os.walk(examples_dir()) for f in p[2]
|
||||
|
||||
if (
|
||||
'__' not in f
|
||||
and f[0] != '_'
|
||||
and 'debugging' not in p[0]
|
||||
and 'integration' not in p[0]
|
||||
and 'advanced_faults' not in p[0]
|
||||
and 'multihost' not in p[0]
|
||||
)
|
||||
if '__' not in f
|
||||
and f[0] != '_'
|
||||
and 'debugging' not in p[0]
|
||||
],
|
||||
ids=lambda t: t[1],
|
||||
)
|
||||
def test_example(
|
||||
run_example_in_subproc,
|
||||
example_script,
|
||||
):
|
||||
'''
|
||||
Load and run scripts from this repo's ``examples/`` dir as a user
|
||||
def test_example(run_example_in_subproc, example_script):
|
||||
"""Load and run scripts from this repo's ``examples/`` dir as a user
|
||||
would copy and pasing them into their editor.
|
||||
|
||||
On windows a little more "finessing" is done to make
|
||||
``multiprocessing`` play nice: we copy the ``__main__.py`` into the
|
||||
test directory and invoke the script as a module with ``python -m
|
||||
test_example``.
|
||||
|
||||
'''
|
||||
ex_file: str = os.path.join(*example_script)
|
||||
|
||||
if 'rpc_bidir_streaming' in ex_file and sys.version_info < (3, 9):
|
||||
pytest.skip("2-way streaming example requires py3.9 async with syntax")
|
||||
|
||||
"""
|
||||
ex_file = os.path.join(*example_script)
|
||||
with open(ex_file, 'r') as ex:
|
||||
code = ex.read()
|
||||
|
||||
|
@ -125,20 +108,9 @@ def test_example(
|
|||
# print(f'STDOUT: {out}')
|
||||
|
||||
# if we get some gnarly output let's aggregate and raise
|
||||
if err:
|
||||
errmsg = err.decode()
|
||||
errlines = errmsg.splitlines()
|
||||
last_error = errlines[-1]
|
||||
if (
|
||||
'Error' in last_error
|
||||
|
||||
# XXX: currently we print this to console, but maybe
|
||||
# shouldn't eventually once we figure out what's
|
||||
# a better way to be explicit about aio side
|
||||
# cancels?
|
||||
and
|
||||
'asyncio.exceptions.CancelledError' not in last_error
|
||||
):
|
||||
raise Exception(errmsg)
|
||||
errmsg = err.decode()
|
||||
errlines = errmsg.splitlines()
|
||||
if err and 'Error' in errlines[-1]:
|
||||
raise Exception(errmsg)
|
||||
|
||||
assert proc.returncode == 0
|
||||
|
|
|
@ -1,946 +0,0 @@
|
|||
'''
|
||||
Low-level functional audits for our
|
||||
"capability based messaging"-spec feats.
|
||||
|
||||
B~)
|
||||
|
||||
'''
|
||||
from contextlib import (
|
||||
contextmanager as cm,
|
||||
# nullcontext,
|
||||
)
|
||||
import importlib
|
||||
from typing import (
|
||||
Any,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from msgspec import (
|
||||
# structs,
|
||||
# msgpack,
|
||||
Raw,
|
||||
# Struct,
|
||||
ValidationError,
|
||||
)
|
||||
import pytest
|
||||
import trio
|
||||
|
||||
import tractor
|
||||
from tractor import (
|
||||
Actor,
|
||||
# _state,
|
||||
MsgTypeError,
|
||||
Context,
|
||||
)
|
||||
from tractor.msg import (
|
||||
_codec,
|
||||
_ctxvar_MsgCodec,
|
||||
_exts,
|
||||
|
||||
NamespacePath,
|
||||
MsgCodec,
|
||||
MsgDec,
|
||||
mk_codec,
|
||||
mk_dec,
|
||||
apply_codec,
|
||||
current_codec,
|
||||
)
|
||||
from tractor.msg.types import (
|
||||
log,
|
||||
Started,
|
||||
# _payload_msgs,
|
||||
# PayloadMsg,
|
||||
# mk_msg_spec,
|
||||
)
|
||||
from tractor.msg._ops import (
|
||||
limit_plds,
|
||||
)
|
||||
|
||||
def enc_nsp(obj: Any) -> Any:
|
||||
actor: Actor = tractor.current_actor(
|
||||
err_on_no_runtime=False,
|
||||
)
|
||||
uid: tuple[str, str]|None = None if not actor else actor.uid
|
||||
print(f'{uid} ENC HOOK')
|
||||
|
||||
match obj:
|
||||
# case NamespacePath()|str():
|
||||
case NamespacePath():
|
||||
encoded: str = str(obj)
|
||||
print(
|
||||
f'----- ENCODING `NamespacePath` as `str` ------\n'
|
||||
f'|_obj:{type(obj)!r} = {obj!r}\n'
|
||||
f'|_encoded: str = {encoded!r}\n'
|
||||
)
|
||||
# if type(obj) != NamespacePath:
|
||||
# breakpoint()
|
||||
return encoded
|
||||
case _:
|
||||
logmsg: str = (
|
||||
f'{uid}\n'
|
||||
'FAILED ENCODE\n'
|
||||
f'obj-> `{obj}: {type(obj)}`\n'
|
||||
)
|
||||
raise NotImplementedError(logmsg)
|
||||
|
||||
|
||||
def dec_nsp(
|
||||
obj_type: Type,
|
||||
obj: Any,
|
||||
|
||||
) -> Any:
|
||||
# breakpoint()
|
||||
actor: Actor = tractor.current_actor(
|
||||
err_on_no_runtime=False,
|
||||
)
|
||||
uid: tuple[str, str]|None = None if not actor else actor.uid
|
||||
print(
|
||||
f'{uid}\n'
|
||||
'CUSTOM DECODE\n'
|
||||
f'type-arg-> {obj_type}\n'
|
||||
f'obj-arg-> `{obj}`: {type(obj)}\n'
|
||||
)
|
||||
nsp = None
|
||||
# XXX, never happens right?
|
||||
if obj_type is Raw:
|
||||
breakpoint()
|
||||
|
||||
if (
|
||||
obj_type is NamespacePath
|
||||
and isinstance(obj, str)
|
||||
and ':' in obj
|
||||
):
|
||||
nsp = NamespacePath(obj)
|
||||
# TODO: we could built a generic handler using
|
||||
# JUST matching the obj_type part?
|
||||
# nsp = obj_type(obj)
|
||||
|
||||
if nsp:
|
||||
print(f'Returning NSP instance: {nsp}')
|
||||
return nsp
|
||||
|
||||
logmsg: str = (
|
||||
f'{uid}\n'
|
||||
'FAILED DECODE\n'
|
||||
f'type-> {obj_type}\n'
|
||||
f'obj-arg-> `{obj}`: {type(obj)}\n\n'
|
||||
f'current codec:\n'
|
||||
f'{current_codec()}\n'
|
||||
)
|
||||
# TODO: figure out the ignore subsys for this!
|
||||
# -[ ] option whether to defense-relay backc the msg
|
||||
# inside an `Invalid`/`Ignore`
|
||||
# -[ ] how to make this handling pluggable such that a
|
||||
# `Channel`/`MsgTransport` can intercept and process
|
||||
# back msgs either via exception handling or some other
|
||||
# signal?
|
||||
log.warning(logmsg)
|
||||
# NOTE: this delivers the invalid
|
||||
# value up to `msgspec`'s decoding
|
||||
# machinery for error raising.
|
||||
return obj
|
||||
# raise NotImplementedError(logmsg)
|
||||
|
||||
|
||||
def ex_func(*args):
|
||||
'''
|
||||
A mod level func we can ref and load via our `NamespacePath`
|
||||
python-object pointer `str` subtype.
|
||||
|
||||
'''
|
||||
print(f'ex_func({args})')
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'add_codec_hooks',
|
||||
[
|
||||
True,
|
||||
False,
|
||||
],
|
||||
ids=['use_codec_hooks', 'no_codec_hooks'],
|
||||
)
|
||||
def test_custom_extension_types(
|
||||
debug_mode: bool,
|
||||
add_codec_hooks: bool
|
||||
):
|
||||
'''
|
||||
Verify that a `MsgCodec` (used for encoding all outbound IPC msgs
|
||||
and decoding all inbound `PayloadMsg`s) and a paired `MsgDec`
|
||||
(used for decoding the `PayloadMsg.pld: Raw` received within a given
|
||||
task's ipc `Context` scope) can both send and receive "extension types"
|
||||
as supported via custom converter hooks passed to `msgspec`.
|
||||
|
||||
'''
|
||||
nsp_pld_dec: MsgDec = mk_dec(
|
||||
spec=None, # ONLY support the ext type
|
||||
dec_hook=dec_nsp if add_codec_hooks else None,
|
||||
ext_types=[NamespacePath],
|
||||
)
|
||||
nsp_codec: MsgCodec = mk_codec(
|
||||
# ipc_pld_spec=Raw, # default!
|
||||
|
||||
# NOTE XXX: the encode hook MUST be used no matter what since
|
||||
# our `NamespacePath` is not any of a `Any` native type nor
|
||||
# a `msgspec.Struct` subtype - so `msgspec` has no way to know
|
||||
# how to encode it unless we provide the custom hook.
|
||||
#
|
||||
# AGAIN that is, regardless of whether we spec an
|
||||
# `Any`-decoded-pld the enc has no knowledge (by default)
|
||||
# how to enc `NamespacePath` (nsp), so we add a custom
|
||||
# hook to do that ALWAYS.
|
||||
enc_hook=enc_nsp if add_codec_hooks else None,
|
||||
|
||||
# XXX NOTE: pretty sure this is mutex with the `type=` to
|
||||
# `Decoder`? so it won't work in tandem with the
|
||||
# `ipc_pld_spec` passed above?
|
||||
ext_types=[NamespacePath],
|
||||
|
||||
# TODO? is it useful to have the `.pld` decoded *prior* to
|
||||
# the `PldRx`?? like perf or mem related?
|
||||
# ext_dec=nsp_pld_dec,
|
||||
)
|
||||
if add_codec_hooks:
|
||||
assert nsp_codec.dec.dec_hook is None
|
||||
|
||||
# TODO? if we pass `ext_dec` above?
|
||||
# assert nsp_codec.dec.dec_hook is dec_nsp
|
||||
|
||||
assert nsp_codec.enc.enc_hook is enc_nsp
|
||||
|
||||
nsp = NamespacePath.from_ref(ex_func)
|
||||
|
||||
try:
|
||||
nsp_bytes: bytes = nsp_codec.encode(nsp)
|
||||
nsp_rt_sin_msg = nsp_pld_dec.decode(nsp_bytes)
|
||||
nsp_rt_sin_msg.load_ref() is ex_func
|
||||
except TypeError:
|
||||
if not add_codec_hooks:
|
||||
pass
|
||||
|
||||
try:
|
||||
msg_bytes: bytes = nsp_codec.encode(
|
||||
Started(
|
||||
cid='cid',
|
||||
pld=nsp,
|
||||
)
|
||||
)
|
||||
# since the ext-type obj should also be set as the msg.pld
|
||||
assert nsp_bytes in msg_bytes
|
||||
started_rt: Started = nsp_codec.decode(msg_bytes)
|
||||
pld: Raw = started_rt.pld
|
||||
assert isinstance(pld, Raw)
|
||||
nsp_rt: NamespacePath = nsp_pld_dec.decode(pld)
|
||||
assert isinstance(nsp_rt, NamespacePath)
|
||||
# in obj comparison terms they should be the same
|
||||
assert nsp_rt == nsp
|
||||
# ensure we've decoded to ext type!
|
||||
assert nsp_rt.load_ref() is ex_func
|
||||
|
||||
except TypeError:
|
||||
if not add_codec_hooks:
|
||||
pass
|
||||
|
||||
@tractor.context
|
||||
async def sleep_forever_in_sub(
|
||||
ctx: Context,
|
||||
) -> None:
|
||||
await trio.sleep_forever()
|
||||
|
||||
|
||||
def mk_custom_codec(
|
||||
add_hooks: bool,
|
||||
|
||||
) -> tuple[
|
||||
MsgCodec, # encode to send
|
||||
MsgDec, # pld receive-n-decode
|
||||
]:
|
||||
'''
|
||||
Create custom `msgpack` enc/dec-hooks and set a `Decoder`
|
||||
which only loads `pld_spec` (like `NamespacePath`) types.
|
||||
|
||||
'''
|
||||
|
||||
# XXX NOTE XXX: despite defining `NamespacePath` as a type
|
||||
# field on our `PayloadMsg.pld`, we still need a enc/dec_hook() pair
|
||||
# to cast to/from that type on the wire. See the docs:
|
||||
# https://jcristharif.com/msgspec/extending.html#mapping-to-from-native-types
|
||||
|
||||
# if pld_spec is Any:
|
||||
# pld_spec = Raw
|
||||
|
||||
nsp_codec: MsgCodec = mk_codec(
|
||||
# ipc_pld_spec=Raw, # default!
|
||||
|
||||
# NOTE XXX: the encode hook MUST be used no matter what since
|
||||
# our `NamespacePath` is not any of a `Any` native type nor
|
||||
# a `msgspec.Struct` subtype - so `msgspec` has no way to know
|
||||
# how to encode it unless we provide the custom hook.
|
||||
#
|
||||
# AGAIN that is, regardless of whether we spec an
|
||||
# `Any`-decoded-pld the enc has no knowledge (by default)
|
||||
# how to enc `NamespacePath` (nsp), so we add a custom
|
||||
# hook to do that ALWAYS.
|
||||
enc_hook=enc_nsp if add_hooks else None,
|
||||
|
||||
# XXX NOTE: pretty sure this is mutex with the `type=` to
|
||||
# `Decoder`? so it won't work in tandem with the
|
||||
# `ipc_pld_spec` passed above?
|
||||
ext_types=[NamespacePath],
|
||||
)
|
||||
# dec_hook=dec_nsp if add_hooks else None,
|
||||
return nsp_codec
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'limit_plds_args',
|
||||
[
|
||||
(
|
||||
{'dec_hook': None, 'ext_types': None},
|
||||
None,
|
||||
),
|
||||
(
|
||||
{'dec_hook': dec_nsp, 'ext_types': None},
|
||||
TypeError,
|
||||
),
|
||||
(
|
||||
{'dec_hook': dec_nsp, 'ext_types': [NamespacePath]},
|
||||
None,
|
||||
),
|
||||
(
|
||||
{'dec_hook': dec_nsp, 'ext_types': [NamespacePath|None]},
|
||||
None,
|
||||
),
|
||||
],
|
||||
ids=[
|
||||
'no_hook_no_ext_types',
|
||||
'only_hook',
|
||||
'hook_and_ext_types',
|
||||
'hook_and_ext_types_w_null',
|
||||
]
|
||||
)
|
||||
def test_pld_limiting_usage(
|
||||
limit_plds_args: tuple[dict, Exception|None],
|
||||
):
|
||||
'''
|
||||
Verify `dec_hook()` and `ext_types` need to either both be
|
||||
provided or we raise a explanator type-error.
|
||||
|
||||
'''
|
||||
kwargs, maybe_err = limit_plds_args
|
||||
async def main():
|
||||
async with tractor.open_nursery() as an: # just to open runtime
|
||||
|
||||
# XXX SHOULD NEVER WORK outside an ipc ctx scope!
|
||||
try:
|
||||
with limit_plds(**kwargs):
|
||||
pass
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
p: tractor.Portal = await an.start_actor(
|
||||
'sub',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
async with (
|
||||
p.open_context(
|
||||
sleep_forever_in_sub
|
||||
) as (ctx, first),
|
||||
):
|
||||
try:
|
||||
with limit_plds(**kwargs):
|
||||
pass
|
||||
except maybe_err as exc:
|
||||
assert type(exc) is maybe_err
|
||||
pass
|
||||
|
||||
|
||||
def chk_codec_applied(
|
||||
expect_codec: MsgCodec|None,
|
||||
enter_value: MsgCodec|None = None,
|
||||
|
||||
) -> MsgCodec:
|
||||
'''
|
||||
buncha sanity checks ensuring that the IPC channel's
|
||||
context-vars are set to the expected codec and that are
|
||||
ctx-var wrapper APIs match the same.
|
||||
|
||||
'''
|
||||
# TODO: play with tricyle again, bc this is supposed to work
|
||||
# the way we want?
|
||||
#
|
||||
# TreeVar
|
||||
# task: trio.Task = trio.lowlevel.current_task()
|
||||
# curr_codec = _ctxvar_MsgCodec.get_in(task)
|
||||
|
||||
# ContextVar
|
||||
# task_ctx: Context = task.context
|
||||
# assert _ctxvar_MsgCodec in task_ctx
|
||||
# curr_codec: MsgCodec = task.context[_ctxvar_MsgCodec]
|
||||
if expect_codec is None:
|
||||
assert enter_value is None
|
||||
return
|
||||
|
||||
# NOTE: currently we use this!
|
||||
# RunVar
|
||||
curr_codec: MsgCodec = current_codec()
|
||||
last_read_codec = _ctxvar_MsgCodec.get()
|
||||
# assert curr_codec is last_read_codec
|
||||
|
||||
assert (
|
||||
(same_codec := expect_codec) is
|
||||
# returned from `mk_codec()`
|
||||
|
||||
# yielded value from `apply_codec()`
|
||||
|
||||
# read from current task's `contextvars.Context`
|
||||
curr_codec is
|
||||
last_read_codec
|
||||
|
||||
# the default `msgspec` settings
|
||||
is not _codec._def_msgspec_codec
|
||||
is not _codec._def_tractor_codec
|
||||
)
|
||||
|
||||
if enter_value:
|
||||
assert enter_value is same_codec
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def send_back_values(
|
||||
ctx: Context,
|
||||
rent_pld_spec_type_strs: list[str],
|
||||
add_hooks: bool,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Setup up a custom codec to load instances of `NamespacePath`
|
||||
and ensure we can round trip a func ref with our parent.
|
||||
|
||||
'''
|
||||
uid: tuple = tractor.current_actor().uid
|
||||
|
||||
# init state in sub-actor should be default
|
||||
chk_codec_applied(
|
||||
expect_codec=_codec._def_tractor_codec,
|
||||
)
|
||||
|
||||
# load pld spec from input str
|
||||
rent_pld_spec = _exts.dec_type_union(
|
||||
rent_pld_spec_type_strs,
|
||||
mods=[
|
||||
importlib.import_module(__name__),
|
||||
],
|
||||
)
|
||||
rent_pld_spec_types: set[Type] = _codec.unpack_spec_types(
|
||||
rent_pld_spec,
|
||||
)
|
||||
|
||||
# ONLY add ext-hooks if the rent specified a non-std type!
|
||||
add_hooks: bool = (
|
||||
NamespacePath in rent_pld_spec_types
|
||||
and
|
||||
add_hooks
|
||||
)
|
||||
|
||||
# same as on parent side config.
|
||||
nsp_codec: MsgCodec|None = None
|
||||
if add_hooks:
|
||||
nsp_codec = mk_codec(
|
||||
enc_hook=enc_nsp,
|
||||
ext_types=[NamespacePath],
|
||||
)
|
||||
|
||||
with (
|
||||
maybe_apply_codec(nsp_codec) as codec,
|
||||
limit_plds(
|
||||
rent_pld_spec,
|
||||
dec_hook=dec_nsp if add_hooks else None,
|
||||
ext_types=[NamespacePath] if add_hooks else None,
|
||||
) as pld_dec,
|
||||
):
|
||||
# ?XXX? SHOULD WE NOT be swapping the global codec since it
|
||||
# breaks `Context.started()` roundtripping checks??
|
||||
chk_codec_applied(
|
||||
expect_codec=nsp_codec,
|
||||
enter_value=codec,
|
||||
)
|
||||
|
||||
# ?TODO, mismatch case(s)?
|
||||
#
|
||||
# ensure pld spec matches on both sides
|
||||
ctx_pld_dec: MsgDec = ctx._pld_rx._pld_dec
|
||||
assert pld_dec is ctx_pld_dec
|
||||
child_pld_spec: Type = pld_dec.spec
|
||||
child_pld_spec_types: set[Type] = _codec.unpack_spec_types(
|
||||
child_pld_spec,
|
||||
)
|
||||
assert (
|
||||
child_pld_spec_types.issuperset(
|
||||
rent_pld_spec_types
|
||||
)
|
||||
)
|
||||
|
||||
# ?TODO, try loop for each of the types in pld-superset?
|
||||
#
|
||||
# for send_value in [
|
||||
# nsp,
|
||||
# str(nsp),
|
||||
# None,
|
||||
# ]:
|
||||
nsp = NamespacePath.from_ref(ex_func)
|
||||
try:
|
||||
print(
|
||||
f'{uid}: attempting to `.started({nsp})`\n'
|
||||
f'\n'
|
||||
f'rent_pld_spec: {rent_pld_spec}\n'
|
||||
f'child_pld_spec: {child_pld_spec}\n'
|
||||
f'codec: {codec}\n'
|
||||
)
|
||||
# await tractor.pause()
|
||||
await ctx.started(nsp)
|
||||
|
||||
except tractor.MsgTypeError as _mte:
|
||||
mte = _mte
|
||||
|
||||
# false -ve case
|
||||
if add_hooks:
|
||||
raise RuntimeError(
|
||||
f'EXPECTED to `.started()` value given spec ??\n\n'
|
||||
f'child_pld_spec -> {child_pld_spec}\n'
|
||||
f'value = {nsp}: {type(nsp)}\n'
|
||||
)
|
||||
|
||||
# true -ve case
|
||||
raise mte
|
||||
|
||||
# TODO: maybe we should add our own wrapper error so as to
|
||||
# be interchange-lib agnostic?
|
||||
# -[ ] the error type is wtv is raised from the hook so we
|
||||
# could also require a type-class of errors for
|
||||
# indicating whether the hook-failure can be handled by
|
||||
# a nasty-dialog-unprot sub-sys?
|
||||
except TypeError as typerr:
|
||||
# false -ve
|
||||
if add_hooks:
|
||||
raise RuntimeError('Should have been able to send `nsp`??')
|
||||
|
||||
# true -ve
|
||||
print('Failed to send `nsp` due to no ext hooks set!')
|
||||
raise typerr
|
||||
|
||||
# now try sending a set of valid and invalid plds to ensure
|
||||
# the pld spec is respected.
|
||||
sent: list[Any] = []
|
||||
async with ctx.open_stream() as ipc:
|
||||
print(
|
||||
f'{uid}: streaming all pld types to rent..'
|
||||
)
|
||||
|
||||
# for send_value, expect_send in iter_send_val_items:
|
||||
for send_value in [
|
||||
nsp,
|
||||
str(nsp),
|
||||
None,
|
||||
]:
|
||||
send_type: Type = type(send_value)
|
||||
print(
|
||||
f'{uid}: SENDING NEXT pld\n'
|
||||
f'send_type: {send_type}\n'
|
||||
f'send_value: {send_value}\n'
|
||||
)
|
||||
try:
|
||||
await ipc.send(send_value)
|
||||
sent.append(send_value)
|
||||
|
||||
except ValidationError as valerr:
|
||||
print(f'{uid} FAILED TO SEND {send_value}!')
|
||||
|
||||
# false -ve
|
||||
if add_hooks:
|
||||
raise RuntimeError(
|
||||
f'EXPECTED to roundtrip value given spec:\n'
|
||||
f'rent_pld_spec -> {rent_pld_spec}\n'
|
||||
f'child_pld_spec -> {child_pld_spec}\n'
|
||||
f'value = {send_value}: {send_type}\n'
|
||||
)
|
||||
|
||||
# true -ve
|
||||
raise valerr
|
||||
# continue
|
||||
|
||||
else:
|
||||
print(
|
||||
f'{uid}: finished sending all values\n'
|
||||
'Should be exiting stream block!\n'
|
||||
)
|
||||
|
||||
print(f'{uid}: exited streaming block!')
|
||||
|
||||
|
||||
|
||||
@cm
|
||||
def maybe_apply_codec(codec: MsgCodec|None) -> MsgCodec|None:
|
||||
if codec is None:
|
||||
yield None
|
||||
return
|
||||
|
||||
with apply_codec(codec) as codec:
|
||||
yield codec
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'pld_spec',
|
||||
[
|
||||
Any,
|
||||
NamespacePath,
|
||||
NamespacePath|None, # the "maybe" spec Bo
|
||||
],
|
||||
ids=[
|
||||
'any_type',
|
||||
'only_nsp_ext',
|
||||
'maybe_nsp_ext',
|
||||
]
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
'add_hooks',
|
||||
[
|
||||
True,
|
||||
False,
|
||||
],
|
||||
ids=[
|
||||
'use_codec_hooks',
|
||||
'no_codec_hooks',
|
||||
],
|
||||
)
|
||||
def test_ext_types_over_ipc(
|
||||
debug_mode: bool,
|
||||
pld_spec: Union[Type],
|
||||
add_hooks: bool,
|
||||
):
|
||||
'''
|
||||
Ensure we can support extension types coverted using
|
||||
`enc/dec_hook()`s passed to the `.msg.limit_plds()` API
|
||||
and that sane errors happen when we try do the same without
|
||||
the codec hooks.
|
||||
|
||||
'''
|
||||
pld_types: set[Type] = _codec.unpack_spec_types(pld_spec)
|
||||
|
||||
async def main():
|
||||
|
||||
# sanity check the default pld-spec beforehand
|
||||
chk_codec_applied(
|
||||
expect_codec=_codec._def_tractor_codec,
|
||||
)
|
||||
|
||||
# extension type we want to send as msg payload
|
||||
nsp = NamespacePath.from_ref(ex_func)
|
||||
|
||||
# ^NOTE, 2 cases:
|
||||
# - codec hooks noto added -> decode nsp as `str`
|
||||
# - codec with hooks -> decode nsp as `NamespacePath`
|
||||
nsp_codec: MsgCodec|None = None
|
||||
if (
|
||||
NamespacePath in pld_types
|
||||
and
|
||||
add_hooks
|
||||
):
|
||||
nsp_codec = mk_codec(
|
||||
enc_hook=enc_nsp,
|
||||
ext_types=[NamespacePath],
|
||||
)
|
||||
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as an:
|
||||
p: tractor.Portal = await an.start_actor(
|
||||
'sub',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
with (
|
||||
maybe_apply_codec(nsp_codec) as codec,
|
||||
):
|
||||
chk_codec_applied(
|
||||
expect_codec=nsp_codec,
|
||||
enter_value=codec,
|
||||
)
|
||||
rent_pld_spec_type_strs: list[str] = _exts.enc_type_union(pld_spec)
|
||||
|
||||
# XXX should raise an mte (`MsgTypeError`)
|
||||
# when `add_hooks == False` bc the input
|
||||
# `expect_ipc_send` kwarg has a nsp which can't be
|
||||
# serialized!
|
||||
#
|
||||
# TODO:can we ensure this happens from the
|
||||
# `Return`-side (aka the sub) as well?
|
||||
try:
|
||||
ctx: tractor.Context
|
||||
ipc: tractor.MsgStream
|
||||
async with (
|
||||
|
||||
# XXX should raise an mte (`MsgTypeError`)
|
||||
# when `add_hooks == False`..
|
||||
p.open_context(
|
||||
send_back_values,
|
||||
# expect_debug=debug_mode,
|
||||
rent_pld_spec_type_strs=rent_pld_spec_type_strs,
|
||||
add_hooks=add_hooks,
|
||||
# expect_ipc_send=expect_ipc_send,
|
||||
) as (ctx, first),
|
||||
|
||||
ctx.open_stream() as ipc,
|
||||
):
|
||||
with (
|
||||
limit_plds(
|
||||
pld_spec,
|
||||
dec_hook=dec_nsp if add_hooks else None,
|
||||
ext_types=[NamespacePath] if add_hooks else None,
|
||||
) as pld_dec,
|
||||
):
|
||||
ctx_pld_dec: MsgDec = ctx._pld_rx._pld_dec
|
||||
assert pld_dec is ctx_pld_dec
|
||||
|
||||
# if (
|
||||
# not add_hooks
|
||||
# and
|
||||
# NamespacePath in
|
||||
# ):
|
||||
# pytest.fail('ctx should fail to open without custom enc_hook!?')
|
||||
|
||||
await ipc.send(nsp)
|
||||
nsp_rt = await ipc.receive()
|
||||
|
||||
assert nsp_rt == nsp
|
||||
assert nsp_rt.load_ref() is ex_func
|
||||
|
||||
# this test passes bc we can go no further!
|
||||
except MsgTypeError as mte:
|
||||
# if not add_hooks:
|
||||
# # teardown nursery
|
||||
# await p.cancel_actor()
|
||||
# return
|
||||
|
||||
raise mte
|
||||
|
||||
await p.cancel_actor()
|
||||
|
||||
if (
|
||||
NamespacePath in pld_types
|
||||
and
|
||||
add_hooks
|
||||
):
|
||||
trio.run(main)
|
||||
|
||||
else:
|
||||
with pytest.raises(
|
||||
expected_exception=tractor.RemoteActorError,
|
||||
) as excinfo:
|
||||
trio.run(main)
|
||||
|
||||
exc = excinfo.value
|
||||
# bc `.started(nsp: NamespacePath)` will raise
|
||||
assert exc.boxed_type is TypeError
|
||||
|
||||
|
||||
# def chk_pld_type(
|
||||
# payload_spec: Type[Struct]|Any,
|
||||
# pld: Any,
|
||||
|
||||
# expect_roundtrip: bool|None = None,
|
||||
|
||||
# ) -> bool:
|
||||
|
||||
# pld_val_type: Type = type(pld)
|
||||
|
||||
# # TODO: verify that the overridden subtypes
|
||||
# # DO NOT have modified type-annots from original!
|
||||
# # 'Start', .pld: FuncSpec
|
||||
# # 'StartAck', .pld: IpcCtxSpec
|
||||
# # 'Stop', .pld: UNSEt
|
||||
# # 'Error', .pld: ErrorData
|
||||
|
||||
# codec: MsgCodec = mk_codec(
|
||||
# # NOTE: this ONLY accepts `PayloadMsg.pld` fields of a specified
|
||||
# # type union.
|
||||
# ipc_pld_spec=payload_spec,
|
||||
# )
|
||||
|
||||
# # make a one-off dec to compare with our `MsgCodec` instance
|
||||
# # which does the below `mk_msg_spec()` call internally
|
||||
# ipc_msg_spec: Union[Type[Struct]]
|
||||
# msg_types: list[PayloadMsg[payload_spec]]
|
||||
# (
|
||||
# ipc_msg_spec,
|
||||
# msg_types,
|
||||
# ) = mk_msg_spec(
|
||||
# payload_type_union=payload_spec,
|
||||
# )
|
||||
# _enc = msgpack.Encoder()
|
||||
# _dec = msgpack.Decoder(
|
||||
# type=ipc_msg_spec or Any, # like `PayloadMsg[Any]`
|
||||
# )
|
||||
|
||||
# assert (
|
||||
# payload_spec
|
||||
# ==
|
||||
# codec.pld_spec
|
||||
# )
|
||||
|
||||
# # assert codec.dec == dec
|
||||
# #
|
||||
# # ^-XXX-^ not sure why these aren't "equal" but when cast
|
||||
# # to `str` they seem to match ?? .. kk
|
||||
|
||||
# assert (
|
||||
# str(ipc_msg_spec)
|
||||
# ==
|
||||
# str(codec.msg_spec)
|
||||
# ==
|
||||
# str(_dec.type)
|
||||
# ==
|
||||
# str(codec.dec.type)
|
||||
# )
|
||||
|
||||
# # verify the boxed-type for all variable payload-type msgs.
|
||||
# if not msg_types:
|
||||
# breakpoint()
|
||||
|
||||
# roundtrip: bool|None = None
|
||||
# pld_spec_msg_names: list[str] = [
|
||||
# td.__name__ for td in _payload_msgs
|
||||
# ]
|
||||
# for typedef in msg_types:
|
||||
|
||||
# skip_runtime_msg: bool = typedef.__name__ not in pld_spec_msg_names
|
||||
# if skip_runtime_msg:
|
||||
# continue
|
||||
|
||||
# pld_field = structs.fields(typedef)[1]
|
||||
# assert pld_field.type is payload_spec # TODO-^ does this need to work to get all subtypes to adhere?
|
||||
|
||||
# kwargs: dict[str, Any] = {
|
||||
# 'cid': '666',
|
||||
# 'pld': pld,
|
||||
# }
|
||||
# enc_msg: PayloadMsg = typedef(**kwargs)
|
||||
|
||||
# _wire_bytes: bytes = _enc.encode(enc_msg)
|
||||
# wire_bytes: bytes = codec.enc.encode(enc_msg)
|
||||
# assert _wire_bytes == wire_bytes
|
||||
|
||||
# ve: ValidationError|None = None
|
||||
# try:
|
||||
# dec_msg = codec.dec.decode(wire_bytes)
|
||||
# _dec_msg = _dec.decode(wire_bytes)
|
||||
|
||||
# # decoded msg and thus payload should be exactly same!
|
||||
# assert (roundtrip := (
|
||||
# _dec_msg
|
||||
# ==
|
||||
# dec_msg
|
||||
# ==
|
||||
# enc_msg
|
||||
# ))
|
||||
|
||||
# if (
|
||||
# expect_roundtrip is not None
|
||||
# and expect_roundtrip != roundtrip
|
||||
# ):
|
||||
# breakpoint()
|
||||
|
||||
# assert (
|
||||
# pld
|
||||
# ==
|
||||
# dec_msg.pld
|
||||
# ==
|
||||
# enc_msg.pld
|
||||
# )
|
||||
# # assert (roundtrip := (_dec_msg == enc_msg))
|
||||
|
||||
# except ValidationError as _ve:
|
||||
# ve = _ve
|
||||
# roundtrip: bool = False
|
||||
# if pld_val_type is payload_spec:
|
||||
# raise ValueError(
|
||||
# 'Got `ValidationError` despite type-var match!?\n'
|
||||
# f'pld_val_type: {pld_val_type}\n'
|
||||
# f'payload_type: {payload_spec}\n'
|
||||
# ) from ve
|
||||
|
||||
# else:
|
||||
# # ow we good cuz the pld spec mismatched.
|
||||
# print(
|
||||
# 'Got expected `ValidationError` since,\n'
|
||||
# f'{pld_val_type} is not {payload_spec}\n'
|
||||
# )
|
||||
# else:
|
||||
# if (
|
||||
# payload_spec is not Any
|
||||
# and
|
||||
# pld_val_type is not payload_spec
|
||||
# ):
|
||||
# raise ValueError(
|
||||
# 'DID NOT `ValidationError` despite expected type match!?\n'
|
||||
# f'pld_val_type: {pld_val_type}\n'
|
||||
# f'payload_type: {payload_spec}\n'
|
||||
# )
|
||||
|
||||
# # full code decode should always be attempted!
|
||||
# if roundtrip is None:
|
||||
# breakpoint()
|
||||
|
||||
# return roundtrip
|
||||
|
||||
|
||||
# ?TODO? maybe remove since covered in the newer `test_pldrx_limiting`
|
||||
# via end-2-end testing of all this?
|
||||
# -[ ] IOW do we really NEED this lowlevel unit testing?
|
||||
#
|
||||
# def test_limit_msgspec(
|
||||
# debug_mode: bool,
|
||||
# ):
|
||||
# '''
|
||||
# Internals unit testing to verify that type-limiting an IPC ctx's
|
||||
# msg spec with `Pldrx.limit_plds()` results in various
|
||||
# encapsulated `msgspec` object settings and state.
|
||||
|
||||
# '''
|
||||
# async def main():
|
||||
# async with tractor.open_root_actor(
|
||||
# debug_mode=debug_mode,
|
||||
# ):
|
||||
# # ensure we can round-trip a boxing `PayloadMsg`
|
||||
# assert chk_pld_type(
|
||||
# payload_spec=Any,
|
||||
# pld=None,
|
||||
# expect_roundtrip=True,
|
||||
# )
|
||||
|
||||
# # verify that a mis-typed payload value won't decode
|
||||
# assert not chk_pld_type(
|
||||
# payload_spec=int,
|
||||
# pld='doggy',
|
||||
# )
|
||||
|
||||
# # parametrize the boxed `.pld` type as a custom-struct
|
||||
# # and ensure that parametrization propagates
|
||||
# # to all payload-msg-spec-able subtypes!
|
||||
# class CustomPayload(Struct):
|
||||
# name: str
|
||||
# value: Any
|
||||
|
||||
# assert not chk_pld_type(
|
||||
# payload_spec=CustomPayload,
|
||||
# pld='doggy',
|
||||
# )
|
||||
|
||||
# assert chk_pld_type(
|
||||
# payload_spec=CustomPayload,
|
||||
# pld=CustomPayload(name='doggy', value='urmom')
|
||||
# )
|
||||
|
||||
# # yah, we can `.pause_from_sync()` now!
|
||||
# # breakpoint()
|
||||
|
||||
# trio.run(main)
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -7,24 +7,31 @@ import pytest
|
|||
import trio
|
||||
import tractor
|
||||
|
||||
from tractor._testing import tractor_test
|
||||
from conftest import tractor_test
|
||||
|
||||
|
||||
@pytest.mark.trio
|
||||
async def test_no_runtime():
|
||||
async def test_no_arbitter():
|
||||
"""An arbitter must be established before any nurseries
|
||||
can be created.
|
||||
|
||||
(In other words ``tractor.open_root_actor()`` must be engaged at
|
||||
some point?)
|
||||
"""
|
||||
with pytest.raises(RuntimeError) :
|
||||
async with tractor.find_actor('doggy'):
|
||||
with pytest.raises(RuntimeError):
|
||||
with tractor.open_nursery():
|
||||
pass
|
||||
|
||||
|
||||
def test_no_main():
|
||||
"""An async function **must** be passed to ``tractor.run()``.
|
||||
"""
|
||||
with pytest.raises(TypeError):
|
||||
tractor.run(None)
|
||||
|
||||
|
||||
@tractor_test
|
||||
async def test_self_is_registered(reg_addr):
|
||||
async def test_self_is_registered(arb_addr):
|
||||
"Verify waiting on the arbiter to register itself using the standard api."
|
||||
actor = tractor.current_actor()
|
||||
assert actor.is_arbiter
|
||||
|
@ -34,20 +41,20 @@ async def test_self_is_registered(reg_addr):
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_self_is_registered_localportal(reg_addr):
|
||||
async def test_self_is_registered_localportal(arb_addr):
|
||||
"Verify waiting on the arbiter to register itself using a local portal."
|
||||
actor = tractor.current_actor()
|
||||
assert actor.is_arbiter
|
||||
async with tractor.get_registry(*reg_addr) as portal:
|
||||
async with tractor.get_arbiter(*arb_addr) as portal:
|
||||
assert isinstance(portal, tractor._portal.LocalPortal)
|
||||
|
||||
with trio.fail_after(0.2):
|
||||
sockaddr = await portal.run_from_ns(
|
||||
'self', 'wait_for_actor', name='root')
|
||||
assert sockaddr[0] == reg_addr
|
||||
assert sockaddr[0] == arb_addr
|
||||
|
||||
|
||||
def test_local_actor_async_func(reg_addr):
|
||||
def test_local_actor_async_func(arb_addr):
|
||||
"""Verify a simple async function in-process.
|
||||
"""
|
||||
nums = []
|
||||
|
@ -55,7 +62,7 @@ def test_local_actor_async_func(reg_addr):
|
|||
async def print_loop():
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
):
|
||||
# arbiter is started in-proc if dne
|
||||
assert tractor.current_actor().is_arbiter
|
||||
|
|
|
@ -7,10 +7,8 @@ import time
|
|||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
from tractor._testing import (
|
||||
from conftest import (
|
||||
tractor_test,
|
||||
)
|
||||
from .conftest import (
|
||||
sig_prog,
|
||||
_INT_SIGNAL,
|
||||
_INT_RETURN_CODE,
|
||||
|
@ -30,9 +28,9 @@ def test_abort_on_sigint(daemon):
|
|||
|
||||
|
||||
@tractor_test
|
||||
async def test_cancel_remote_arbiter(daemon, reg_addr):
|
||||
async def test_cancel_remote_arbiter(daemon, arb_addr):
|
||||
assert not tractor.current_actor().is_arbiter
|
||||
async with tractor.get_registry(*reg_addr) as portal:
|
||||
async with tractor.get_arbiter(*arb_addr) as portal:
|
||||
await portal.cancel_actor()
|
||||
|
||||
time.sleep(0.1)
|
||||
|
@ -41,16 +39,16 @@ async def test_cancel_remote_arbiter(daemon, reg_addr):
|
|||
|
||||
# no arbiter socket should exist
|
||||
with pytest.raises(OSError):
|
||||
async with tractor.get_registry(*reg_addr) as portal:
|
||||
async with tractor.get_arbiter(*arb_addr) as portal:
|
||||
pass
|
||||
|
||||
|
||||
def test_register_duplicate_name(daemon, reg_addr):
|
||||
def test_register_duplicate_name(daemon, arb_addr):
|
||||
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
) as n:
|
||||
|
||||
assert not tractor.current_actor().is_arbiter
|
||||
|
|
|
@ -1,364 +0,0 @@
|
|||
'''
|
||||
Audit sub-sys APIs from `.msg._ops`
|
||||
mostly for ensuring correct `contextvars`
|
||||
related settings around IPC contexts.
|
||||
|
||||
'''
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
|
||||
from msgspec import (
|
||||
Struct,
|
||||
)
|
||||
import pytest
|
||||
import trio
|
||||
|
||||
import tractor
|
||||
from tractor import (
|
||||
Context,
|
||||
MsgTypeError,
|
||||
current_ipc_ctx,
|
||||
Portal,
|
||||
)
|
||||
from tractor.msg import (
|
||||
_ops as msgops,
|
||||
Return,
|
||||
)
|
||||
from tractor.msg import (
|
||||
_codec,
|
||||
)
|
||||
from tractor.msg.types import (
|
||||
log,
|
||||
)
|
||||
|
||||
|
||||
class PldMsg(
|
||||
Struct,
|
||||
|
||||
# TODO: with multiple structs in-spec we need to tag them!
|
||||
# -[ ] offer a built-in `PldMsg` type to inherit from which takes
|
||||
# case of these details?
|
||||
#
|
||||
# https://jcristharif.com/msgspec/structs.html#tagged-unions
|
||||
# tag=True,
|
||||
# tag_field='msg_type',
|
||||
):
|
||||
field: str
|
||||
|
||||
|
||||
maybe_msg_spec = PldMsg|None
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_expect_raises(
|
||||
raises: BaseException|None = None,
|
||||
ensure_in_message: list[str]|None = None,
|
||||
post_mortem: bool = False,
|
||||
timeout: int = 3,
|
||||
) -> None:
|
||||
'''
|
||||
Async wrapper for ensuring errors propagate from the inner scope.
|
||||
|
||||
'''
|
||||
if tractor._state.debug_mode():
|
||||
timeout += 999
|
||||
|
||||
with trio.fail_after(timeout):
|
||||
try:
|
||||
yield
|
||||
except BaseException as _inner_err:
|
||||
inner_err = _inner_err
|
||||
# wasn't-expected to error..
|
||||
if raises is None:
|
||||
raise
|
||||
|
||||
else:
|
||||
assert type(inner_err) is raises
|
||||
|
||||
# maybe check for error txt content
|
||||
if ensure_in_message:
|
||||
part: str
|
||||
err_repr: str = repr(inner_err)
|
||||
for part in ensure_in_message:
|
||||
for i, arg in enumerate(inner_err.args):
|
||||
if part in err_repr:
|
||||
break
|
||||
# if part never matches an arg, then we're
|
||||
# missing a match.
|
||||
else:
|
||||
raise ValueError(
|
||||
'Failed to find error message content?\n\n'
|
||||
f'expected: {ensure_in_message!r}\n'
|
||||
f'part: {part!r}\n\n'
|
||||
f'{inner_err.args}'
|
||||
)
|
||||
|
||||
if post_mortem:
|
||||
await tractor.post_mortem()
|
||||
|
||||
else:
|
||||
if raises:
|
||||
raise RuntimeError(
|
||||
f'Expected a {raises.__name__!r} to be raised?'
|
||||
)
|
||||
|
||||
|
||||
@tractor.context(
|
||||
pld_spec=maybe_msg_spec,
|
||||
)
|
||||
async def child(
|
||||
ctx: Context,
|
||||
started_value: int|PldMsg|None,
|
||||
return_value: str|None,
|
||||
validate_pld_spec: bool,
|
||||
raise_on_started_mte: bool = True,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Call ``Context.started()`` more then once (an error).
|
||||
|
||||
'''
|
||||
expect_started_mte: bool = started_value == 10
|
||||
|
||||
# sanaity check that child RPC context is the current one
|
||||
curr_ctx: Context = current_ipc_ctx()
|
||||
assert ctx is curr_ctx
|
||||
|
||||
rx: msgops.PldRx = ctx._pld_rx
|
||||
curr_pldec: _codec.MsgDec = rx.pld_dec
|
||||
|
||||
ctx_meta: dict = getattr(
|
||||
child,
|
||||
'_tractor_context_meta',
|
||||
None,
|
||||
)
|
||||
if ctx_meta:
|
||||
assert (
|
||||
ctx_meta['pld_spec']
|
||||
is curr_pldec.spec
|
||||
is curr_pldec.pld_spec
|
||||
)
|
||||
|
||||
# 2 cases: hdndle send-side and recv-only validation
|
||||
# - when `raise_on_started_mte == True`, send validate
|
||||
# - else, parent-recv-side only validation
|
||||
mte: MsgTypeError|None = None
|
||||
try:
|
||||
await ctx.started(
|
||||
value=started_value,
|
||||
validate_pld_spec=validate_pld_spec,
|
||||
)
|
||||
|
||||
except MsgTypeError as _mte:
|
||||
mte = _mte
|
||||
log.exception('started()` raised an MTE!\n')
|
||||
if not expect_started_mte:
|
||||
raise RuntimeError(
|
||||
'Child-ctx-task SHOULD NOT HAVE raised an MTE for\n\n'
|
||||
f'{started_value!r}\n'
|
||||
)
|
||||
|
||||
boxed_div: str = '------ - ------'
|
||||
assert boxed_div not in mte._message
|
||||
assert boxed_div not in mte.tb_str
|
||||
assert boxed_div not in repr(mte)
|
||||
assert boxed_div not in str(mte)
|
||||
mte_repr: str = repr(mte)
|
||||
for line in mte.message.splitlines():
|
||||
assert line in mte_repr
|
||||
|
||||
# since this is a *local error* there should be no
|
||||
# boxed traceback content!
|
||||
assert not mte.tb_str
|
||||
|
||||
# propagate to parent?
|
||||
if raise_on_started_mte:
|
||||
raise
|
||||
|
||||
# no-send-side-error fallthrough
|
||||
if (
|
||||
validate_pld_spec
|
||||
and
|
||||
expect_started_mte
|
||||
):
|
||||
raise RuntimeError(
|
||||
'Child-ctx-task SHOULD HAVE raised an MTE for\n\n'
|
||||
f'{started_value!r}\n'
|
||||
)
|
||||
|
||||
assert (
|
||||
not expect_started_mte
|
||||
or
|
||||
not validate_pld_spec
|
||||
)
|
||||
|
||||
# if wait_for_parent_to_cancel:
|
||||
# ...
|
||||
#
|
||||
# ^-TODO-^ logic for diff validation policies on each side:
|
||||
#
|
||||
# -[ ] ensure that if we don't validate on the send
|
||||
# side, that we are eventually error-cancelled by our
|
||||
# parent due to the bad `Started` payload!
|
||||
# -[ ] the boxed error should be srced from the parent's
|
||||
# runtime NOT ours!
|
||||
# -[ ] we should still error on bad `return_value`s
|
||||
# despite the parent not yet error-cancelling us?
|
||||
# |_ how do we want the parent side to look in that
|
||||
# case?
|
||||
# -[ ] maybe the equiv of "during handling of the
|
||||
# above error another occurred" for the case where
|
||||
# the parent sends a MTE to this child and while
|
||||
# waiting for the child to terminate it gets back
|
||||
# the MTE for this case?
|
||||
#
|
||||
|
||||
# XXX should always fail on recv side since we can't
|
||||
# really do much else beside terminate and relay the
|
||||
# msg-type-error from this RPC task ;)
|
||||
return return_value
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'return_value',
|
||||
[
|
||||
'yo',
|
||||
None,
|
||||
],
|
||||
ids=[
|
||||
'return[invalid-"yo"]',
|
||||
'return[valid-None]',
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
'started_value',
|
||||
[
|
||||
10,
|
||||
PldMsg(field='yo'),
|
||||
],
|
||||
ids=[
|
||||
'Started[invalid-10]',
|
||||
'Started[valid-PldMsg]',
|
||||
],
|
||||
)
|
||||
@pytest.mark.parametrize(
|
||||
'pld_check_started_value',
|
||||
[
|
||||
True,
|
||||
False,
|
||||
],
|
||||
ids=[
|
||||
'check-started-pld',
|
||||
'no-started-pld-validate',
|
||||
],
|
||||
)
|
||||
def test_basic_payload_spec(
|
||||
debug_mode: bool,
|
||||
loglevel: str,
|
||||
return_value: str|None,
|
||||
started_value: int|PldMsg,
|
||||
pld_check_started_value: bool,
|
||||
):
|
||||
'''
|
||||
Validate the most basic `PldRx` msg-type-spec semantics around
|
||||
a IPC `Context` endpoint start, started-sync, and final return
|
||||
value depending on set payload types and the currently applied
|
||||
pld-spec.
|
||||
|
||||
'''
|
||||
invalid_return: bool = return_value == 'yo'
|
||||
invalid_started: bool = started_value == 10
|
||||
|
||||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
loglevel=loglevel,
|
||||
) as an:
|
||||
p: Portal = await an.start_actor(
|
||||
'child',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
# since not opened yet.
|
||||
assert current_ipc_ctx() is None
|
||||
|
||||
if invalid_started:
|
||||
msg_type_str: str = 'Started'
|
||||
bad_value: int = 10
|
||||
elif invalid_return:
|
||||
msg_type_str: str = 'Return'
|
||||
bad_value: str = 'yo'
|
||||
else:
|
||||
# XXX but should never be used below then..
|
||||
msg_type_str: str = ''
|
||||
bad_value: str = ''
|
||||
|
||||
maybe_mte: MsgTypeError|None = None
|
||||
should_raise: Exception|None = (
|
||||
MsgTypeError if (
|
||||
invalid_return
|
||||
or
|
||||
invalid_started
|
||||
) else None
|
||||
)
|
||||
async with (
|
||||
maybe_expect_raises(
|
||||
raises=should_raise,
|
||||
ensure_in_message=[
|
||||
f"invalid `{msg_type_str}` msg payload",
|
||||
f'{bad_value}',
|
||||
f'has type {type(bad_value)!r}',
|
||||
'not match type-spec',
|
||||
f'`{msg_type_str}.pld: PldMsg|NoneType`',
|
||||
],
|
||||
# only for debug
|
||||
# post_mortem=True,
|
||||
),
|
||||
p.open_context(
|
||||
child,
|
||||
return_value=return_value,
|
||||
started_value=started_value,
|
||||
validate_pld_spec=pld_check_started_value,
|
||||
) as (ctx, first),
|
||||
):
|
||||
# now opened with 'child' sub
|
||||
assert current_ipc_ctx() is ctx
|
||||
|
||||
assert type(first) is PldMsg
|
||||
assert first.field == 'yo'
|
||||
|
||||
try:
|
||||
res: None|PldMsg = await ctx.result(hide_tb=False)
|
||||
assert res is None
|
||||
except MsgTypeError as mte:
|
||||
maybe_mte = mte
|
||||
if not invalid_return:
|
||||
raise
|
||||
|
||||
# expected this invalid `Return.pld` so audit
|
||||
# the error state + meta-data
|
||||
assert mte.expected_msg_type is Return
|
||||
assert mte.cid == ctx.cid
|
||||
mte_repr: str = repr(mte)
|
||||
for line in mte.message.splitlines():
|
||||
assert line in mte_repr
|
||||
|
||||
assert mte.tb_str
|
||||
# await tractor.pause(shield=True)
|
||||
|
||||
# verify expected remote mte deats
|
||||
assert ctx._local_error is None
|
||||
assert (
|
||||
mte is
|
||||
ctx._remote_error is
|
||||
ctx.maybe_error is
|
||||
ctx.outcome
|
||||
)
|
||||
|
||||
if should_raise is None:
|
||||
assert maybe_mte is None
|
||||
|
||||
await p.cancel_actor()
|
||||
|
||||
trio.run(main)
|
|
@ -4,21 +4,20 @@ from itertools import cycle
|
|||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
from tractor.experimental import msgpub
|
||||
from tractor._testing import tractor_test
|
||||
from tractor.testing import tractor_test
|
||||
|
||||
|
||||
def test_type_checks():
|
||||
|
||||
with pytest.raises(TypeError) as err:
|
||||
@msgpub
|
||||
@tractor.msg.pub
|
||||
async def no_get_topics(yo):
|
||||
yield
|
||||
|
||||
assert "must define a `get_topics`" in str(err.value)
|
||||
|
||||
with pytest.raises(TypeError) as err:
|
||||
@msgpub
|
||||
@tractor.msg.pub
|
||||
def not_async_gen(yo):
|
||||
pass
|
||||
|
||||
|
@ -33,7 +32,7 @@ def is_even(i):
|
|||
_get_topics = None
|
||||
|
||||
|
||||
@msgpub
|
||||
@tractor.msg.pub
|
||||
async def pubber(get_topics, seed=10):
|
||||
|
||||
# ensure topic subscriptions are as expected
|
||||
|
@ -104,7 +103,7 @@ async def subs(
|
|||
await stream.aclose()
|
||||
|
||||
|
||||
@msgpub(tasks=['one', 'two'])
|
||||
@tractor.msg.pub(tasks=['one', 'two'])
|
||||
async def multilock_pubber(get_topics):
|
||||
yield {'doggy': 10}
|
||||
|
||||
|
@ -159,7 +158,7 @@ async def test_required_args(callwith_expecterror):
|
|||
)
|
||||
def test_multi_actor_subs_arbiter_pub(
|
||||
loglevel,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
pub_actor,
|
||||
):
|
||||
"""Try out the neato @pub decorator system.
|
||||
|
@ -169,7 +168,7 @@ def test_multi_actor_subs_arbiter_pub(
|
|||
async def main():
|
||||
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
enable_modules=[__name__],
|
||||
) as n:
|
||||
|
||||
|
@ -181,7 +180,6 @@ def test_multi_actor_subs_arbiter_pub(
|
|||
'streamer',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
name = 'streamer'
|
||||
|
||||
even_portal = await n.run_in_actor(
|
||||
subs,
|
||||
|
@ -254,12 +252,12 @@ def test_multi_actor_subs_arbiter_pub(
|
|||
|
||||
def test_single_subactor_pub_multitask_subs(
|
||||
loglevel,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
):
|
||||
async def main():
|
||||
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
enable_modules=[__name__],
|
||||
) as n:
|
||||
|
||||
|
|
|
@ -1,181 +0,0 @@
|
|||
'''
|
||||
Async context manager cache api testing: ``trionics.maybe_open_context():``
|
||||
|
||||
'''
|
||||
from contextlib import asynccontextmanager as acm
|
||||
import platform
|
||||
from typing import Awaitable
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
_resource: int = 0
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_increment_counter(task_name: str):
|
||||
global _resource
|
||||
|
||||
_resource += 1
|
||||
await trio.lowlevel.checkpoint()
|
||||
yield _resource
|
||||
await trio.lowlevel.checkpoint()
|
||||
_resource -= 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'key_on',
|
||||
['key_value', 'kwargs'],
|
||||
ids="key_on={}".format,
|
||||
)
|
||||
def test_resource_only_entered_once(key_on):
|
||||
global _resource
|
||||
_resource = 0
|
||||
|
||||
key = None
|
||||
if key_on == 'key_value':
|
||||
key = 'some_common_key'
|
||||
|
||||
async def main():
|
||||
cache_active: bool = False
|
||||
|
||||
async def enter_cached_mngr(name: str):
|
||||
nonlocal cache_active
|
||||
|
||||
if key_on == 'kwargs':
|
||||
# make a common kwargs input to key on it
|
||||
kwargs = {'task_name': 'same_task_name'}
|
||||
assert key is None
|
||||
else:
|
||||
# different task names per task will be used
|
||||
kwargs = {'task_name': name}
|
||||
|
||||
async with tractor.trionics.maybe_open_context(
|
||||
maybe_increment_counter,
|
||||
kwargs=kwargs,
|
||||
key=key,
|
||||
|
||||
) as (cache_hit, resource):
|
||||
if cache_hit:
|
||||
try:
|
||||
cache_active = True
|
||||
assert resource == 1
|
||||
await trio.sleep_forever()
|
||||
finally:
|
||||
cache_active = False
|
||||
else:
|
||||
assert resource == 1
|
||||
await trio.sleep_forever()
|
||||
|
||||
with trio.move_on_after(0.5):
|
||||
async with (
|
||||
tractor.open_root_actor(),
|
||||
trio.open_nursery() as n,
|
||||
):
|
||||
|
||||
for i in range(10):
|
||||
n.start_soon(enter_cached_mngr, f'task_{i}')
|
||||
await trio.sleep(0.001)
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def streamer(
|
||||
ctx: tractor.Context,
|
||||
seq: list[int] = list(range(1000)),
|
||||
) -> None:
|
||||
|
||||
await ctx.started()
|
||||
async with ctx.open_stream() as stream:
|
||||
for val in seq:
|
||||
await stream.send(val)
|
||||
await trio.sleep(0.001)
|
||||
|
||||
print('producer finished')
|
||||
|
||||
|
||||
@acm
|
||||
async def open_stream() -> Awaitable[tractor.MsgStream]:
|
||||
|
||||
async with tractor.open_nursery() as tn:
|
||||
portal = await tn.start_actor('streamer', enable_modules=[__name__])
|
||||
async with (
|
||||
portal.open_context(streamer) as (ctx, first),
|
||||
ctx.open_stream() as stream,
|
||||
):
|
||||
yield stream
|
||||
|
||||
await portal.cancel_actor()
|
||||
print('CANCELLED STREAMER')
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_stream(taskname: str):
|
||||
async with tractor.trionics.maybe_open_context(
|
||||
# NOTE: all secondary tasks should cache hit on the same key
|
||||
acm_func=open_stream,
|
||||
) as (cache_hit, stream):
|
||||
|
||||
if cache_hit:
|
||||
print(f'{taskname} loaded from cache')
|
||||
|
||||
# add a new broadcast subscription for the quote stream
|
||||
# if this feed is already allocated by the first
|
||||
# task that entereed
|
||||
async with stream.subscribe() as bstream:
|
||||
yield bstream
|
||||
else:
|
||||
# yield the actual stream
|
||||
yield stream
|
||||
|
||||
|
||||
def test_open_local_sub_to_stream():
|
||||
'''
|
||||
Verify a single inter-actor stream can can be fanned-out shared to
|
||||
N local tasks using ``trionics.maybe_open_context():``.
|
||||
|
||||
'''
|
||||
timeout: float = 3.6 if platform.system() != "Windows" else 10
|
||||
|
||||
async def main():
|
||||
|
||||
full = list(range(1000))
|
||||
|
||||
async def get_sub_and_pull(taskname: str):
|
||||
async with (
|
||||
maybe_open_stream(taskname) as stream,
|
||||
):
|
||||
if '0' in taskname:
|
||||
assert isinstance(stream, tractor.MsgStream)
|
||||
else:
|
||||
assert isinstance(
|
||||
stream,
|
||||
tractor.trionics.BroadcastReceiver
|
||||
)
|
||||
|
||||
first = await stream.receive()
|
||||
print(f'{taskname} started with value {first}')
|
||||
seq = []
|
||||
async for msg in stream:
|
||||
seq.append(msg)
|
||||
|
||||
assert set(seq).issubset(set(full))
|
||||
print(f'{taskname} finished')
|
||||
|
||||
with trio.fail_after(timeout):
|
||||
# TODO: turns out this isn't multi-task entrant XD
|
||||
# We probably need an indepotent entry semantic?
|
||||
async with tractor.open_root_actor():
|
||||
async with (
|
||||
trio.open_nursery() as nurse,
|
||||
):
|
||||
for i in range(10):
|
||||
nurse.start_soon(get_sub_and_pull, f'task_{i}')
|
||||
await trio.sleep(0.001)
|
||||
|
||||
print('all consumer tasks finished')
|
||||
|
||||
trio.run(main)
|
|
@ -1,248 +0,0 @@
|
|||
'''
|
||||
Special attention cases for using "infect `asyncio`" mode from a root
|
||||
actor; i.e. not using a std `trio.run()` bootstrap.
|
||||
|
||||
'''
|
||||
import asyncio
|
||||
from functools import partial
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
from tractor import (
|
||||
to_asyncio,
|
||||
)
|
||||
from tests.test_infected_asyncio import (
|
||||
aio_echo_server,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'raise_error_mid_stream',
|
||||
[
|
||||
False,
|
||||
Exception,
|
||||
KeyboardInterrupt,
|
||||
],
|
||||
ids='raise_error={}'.format,
|
||||
)
|
||||
def test_infected_root_actor(
|
||||
raise_error_mid_stream: bool|Exception,
|
||||
|
||||
# conftest wide
|
||||
loglevel: str,
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
Verify you can run the `tractor` runtime with `Actor.is_infected_aio() == True`
|
||||
in the root actor.
|
||||
|
||||
'''
|
||||
async def _trio_main():
|
||||
with trio.fail_after(2 if not debug_mode else 999):
|
||||
first: str
|
||||
chan: to_asyncio.LinkedTaskChannel
|
||||
async with (
|
||||
tractor.open_root_actor(
|
||||
debug_mode=debug_mode,
|
||||
loglevel=loglevel,
|
||||
),
|
||||
to_asyncio.open_channel_from(
|
||||
aio_echo_server,
|
||||
) as (first, chan),
|
||||
):
|
||||
assert first == 'start'
|
||||
|
||||
for i in range(1000):
|
||||
await chan.send(i)
|
||||
out = await chan.receive()
|
||||
assert out == i
|
||||
print(f'asyncio echoing {i}')
|
||||
|
||||
if (
|
||||
raise_error_mid_stream
|
||||
and
|
||||
i == 500
|
||||
):
|
||||
raise raise_error_mid_stream
|
||||
|
||||
if out is None:
|
||||
try:
|
||||
out = await chan.receive()
|
||||
except trio.EndOfChannel:
|
||||
break
|
||||
else:
|
||||
raise RuntimeError(
|
||||
'aio channel never stopped?'
|
||||
)
|
||||
|
||||
if raise_error_mid_stream:
|
||||
with pytest.raises(raise_error_mid_stream):
|
||||
tractor.to_asyncio.run_as_asyncio_guest(
|
||||
trio_main=_trio_main,
|
||||
)
|
||||
else:
|
||||
tractor.to_asyncio.run_as_asyncio_guest(
|
||||
trio_main=_trio_main,
|
||||
)
|
||||
|
||||
|
||||
|
||||
async def sync_and_err(
|
||||
# just signature placeholders for compat with
|
||||
# ``to_asyncio.open_channel_from()``
|
||||
to_trio: trio.MemorySendChannel,
|
||||
from_trio: asyncio.Queue,
|
||||
ev: asyncio.Event,
|
||||
|
||||
):
|
||||
if to_trio:
|
||||
to_trio.send_nowait('start')
|
||||
|
||||
await ev.wait()
|
||||
raise RuntimeError('asyncio-side')
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'aio_err_trigger',
|
||||
[
|
||||
'before_start_point',
|
||||
'after_trio_task_starts',
|
||||
'after_start_point',
|
||||
],
|
||||
ids='aio_err_triggered={}'.format
|
||||
)
|
||||
def test_trio_prestarted_task_bubbles(
|
||||
aio_err_trigger: str,
|
||||
|
||||
# conftest wide
|
||||
loglevel: str,
|
||||
debug_mode: bool,
|
||||
):
|
||||
async def pre_started_err(
|
||||
raise_err: bool = False,
|
||||
pre_sleep: float|None = None,
|
||||
aio_trigger: asyncio.Event|None = None,
|
||||
task_status=trio.TASK_STATUS_IGNORED,
|
||||
):
|
||||
'''
|
||||
Maybe pre-started error then sleep.
|
||||
|
||||
'''
|
||||
if pre_sleep is not None:
|
||||
print(f'Sleeping from trio for {pre_sleep!r}s !')
|
||||
await trio.sleep(pre_sleep)
|
||||
|
||||
# signal aio-task to raise JUST AFTER this task
|
||||
# starts but has not yet `.started()`
|
||||
if aio_trigger:
|
||||
print('Signalling aio-task to raise from `trio`!!')
|
||||
aio_trigger.set()
|
||||
|
||||
if raise_err:
|
||||
print('Raising from trio!')
|
||||
raise TypeError('trio-side')
|
||||
|
||||
task_status.started()
|
||||
await trio.sleep_forever()
|
||||
|
||||
async def _trio_main():
|
||||
# with trio.fail_after(2):
|
||||
with trio.fail_after(999):
|
||||
first: str
|
||||
chan: to_asyncio.LinkedTaskChannel
|
||||
aio_ev = asyncio.Event()
|
||||
|
||||
async with (
|
||||
tractor.open_root_actor(
|
||||
debug_mode=False,
|
||||
loglevel=loglevel,
|
||||
),
|
||||
):
|
||||
# TODO, tests for this with 3.13 egs?
|
||||
# from tractor.devx import open_crash_handler
|
||||
# with open_crash_handler():
|
||||
async with (
|
||||
# where we'll start a sub-task that errors BEFORE
|
||||
# calling `.started()` such that the error should
|
||||
# bubble before the guest run terminates!
|
||||
trio.open_nursery() as tn,
|
||||
|
||||
# THEN start an infect task which should error just
|
||||
# after the trio-side's task does.
|
||||
to_asyncio.open_channel_from(
|
||||
partial(
|
||||
sync_and_err,
|
||||
ev=aio_ev,
|
||||
)
|
||||
) as (first, chan),
|
||||
):
|
||||
|
||||
for i in range(5):
|
||||
pre_sleep: float|None = None
|
||||
last_iter: bool = (i == 4)
|
||||
|
||||
# TODO, missing cases?
|
||||
# -[ ] error as well on
|
||||
# 'after_start_point' case as well for
|
||||
# another case?
|
||||
raise_err: bool = False
|
||||
|
||||
if last_iter:
|
||||
raise_err: bool = True
|
||||
|
||||
# trigger aio task to error on next loop
|
||||
# tick/checkpoint
|
||||
if aio_err_trigger == 'before_start_point':
|
||||
aio_ev.set()
|
||||
|
||||
pre_sleep: float = 0
|
||||
|
||||
await tn.start(
|
||||
pre_started_err,
|
||||
raise_err,
|
||||
pre_sleep,
|
||||
(aio_ev if (
|
||||
aio_err_trigger == 'after_trio_task_starts'
|
||||
and
|
||||
last_iter
|
||||
) else None
|
||||
),
|
||||
)
|
||||
|
||||
if (
|
||||
aio_err_trigger == 'after_start_point'
|
||||
and
|
||||
last_iter
|
||||
):
|
||||
aio_ev.set()
|
||||
|
||||
with pytest.raises(
|
||||
expected_exception=ExceptionGroup,
|
||||
) as excinfo:
|
||||
tractor.to_asyncio.run_as_asyncio_guest(
|
||||
trio_main=_trio_main,
|
||||
)
|
||||
|
||||
eg = excinfo.value
|
||||
rte_eg, rest_eg = eg.split(RuntimeError)
|
||||
|
||||
# ensure the trio-task's error bubbled despite the aio-side
|
||||
# having (maybe) errored first.
|
||||
if aio_err_trigger in (
|
||||
'after_trio_task_starts',
|
||||
'after_start_point',
|
||||
):
|
||||
assert len(errs := rest_eg.exceptions) == 1
|
||||
typerr = errs[0]
|
||||
assert (
|
||||
type(typerr) is TypeError
|
||||
and
|
||||
'trio-side' in typerr.args
|
||||
)
|
||||
|
||||
# when aio errors BEFORE (last) trio task is scheduled, we should
|
||||
# never see anythinb but the aio-side.
|
||||
else:
|
||||
assert len(rtes := rte_eg.exceptions) == 1
|
||||
assert 'asyncio-side' in rtes[0].args[0]
|
|
@ -1,8 +1,6 @@
|
|||
'''
|
||||
RPC (or maybe better labelled as "RTS: remote task scheduling"?)
|
||||
related API and error checks.
|
||||
|
||||
'''
|
||||
"""
|
||||
RPC related
|
||||
"""
|
||||
import itertools
|
||||
|
||||
import pytest
|
||||
|
@ -15,19 +13,9 @@ async def sleep_back_actor(
|
|||
func_name,
|
||||
func_defined,
|
||||
exposed_mods,
|
||||
*,
|
||||
reg_addr: tuple,
|
||||
):
|
||||
if actor_name:
|
||||
async with tractor.find_actor(
|
||||
actor_name,
|
||||
# NOTE: must be set manually since
|
||||
# the subactor doesn't have the reg_addr
|
||||
# fixture code run in it!
|
||||
# TODO: maybe we should just set this once in the
|
||||
# _state mod and derive to all children?
|
||||
registry_addrs=[reg_addr],
|
||||
) as portal:
|
||||
async with tractor.find_actor(actor_name) as portal:
|
||||
try:
|
||||
await portal.run(__name__, func_name)
|
||||
except tractor.RemoteActorError as err:
|
||||
|
@ -36,7 +24,7 @@ async def sleep_back_actor(
|
|||
if not exposed_mods:
|
||||
expect = tractor.ModuleNotExposed
|
||||
|
||||
assert err.boxed_type is expect
|
||||
assert err.type is expect
|
||||
raise
|
||||
else:
|
||||
await trio.sleep(float('inf'))
|
||||
|
@ -54,25 +42,14 @@ async def short_sleep():
|
|||
(['tmp_mod'], 'import doggy', ModuleNotFoundError),
|
||||
(['tmp_mod'], '4doggy', SyntaxError),
|
||||
],
|
||||
ids=[
|
||||
'no_mods',
|
||||
'this_mod',
|
||||
'this_mod_bad_func',
|
||||
'fail_to_import',
|
||||
'fail_on_syntax',
|
||||
],
|
||||
ids=['no_mods', 'this_mod', 'this_mod_bad_func', 'fail_to_import',
|
||||
'fail_on_syntax'],
|
||||
)
|
||||
def test_rpc_errors(
|
||||
reg_addr,
|
||||
to_call,
|
||||
testdir,
|
||||
):
|
||||
'''
|
||||
Test errors when making various RPC requests to an actor
|
||||
def test_rpc_errors(arb_addr, to_call, testdir):
|
||||
"""Test errors when making various RPC requests to an actor
|
||||
that either doesn't have the requested module exposed or doesn't define
|
||||
the named function.
|
||||
|
||||
'''
|
||||
"""
|
||||
exposed_mods, funcname, inside_err = to_call
|
||||
subactor_exposed_mods = []
|
||||
func_defined = globals().get(funcname, False)
|
||||
|
@ -100,13 +77,8 @@ def test_rpc_errors(
|
|||
|
||||
# spawn a subactor which calls us back
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
enable_modules=exposed_mods.copy(),
|
||||
|
||||
# NOTE: will halt test in REPL if uncommented, so only
|
||||
# do that if actually debugging subactor but keep it
|
||||
# disabled for the test.
|
||||
# debug_mode=True,
|
||||
) as n:
|
||||
|
||||
actor = tractor.current_actor()
|
||||
|
@ -123,7 +95,6 @@ def test_rpc_errors(
|
|||
exposed_mods=exposed_mods,
|
||||
func_defined=True if func_defined else False,
|
||||
enable_modules=subactor_exposed_mods,
|
||||
reg_addr=reg_addr,
|
||||
)
|
||||
|
||||
def run():
|
||||
|
@ -134,20 +105,18 @@ def test_rpc_errors(
|
|||
run()
|
||||
else:
|
||||
# underlying errors aren't propagated upwards (yet)
|
||||
with pytest.raises(
|
||||
expected_exception=(remote_err, ExceptionGroup),
|
||||
) as err:
|
||||
with pytest.raises(remote_err) as err:
|
||||
run()
|
||||
|
||||
# get raw instance from pytest wrapper
|
||||
value = err.value
|
||||
|
||||
# might get multiple `trio.Cancelled`s as well inside an inception
|
||||
if isinstance(value, ExceptionGroup):
|
||||
if isinstance(value, trio.MultiError):
|
||||
value = next(itertools.dropwhile(
|
||||
lambda exc: not isinstance(exc, tractor.RemoteActorError),
|
||||
value.exceptions
|
||||
))
|
||||
|
||||
if getattr(value, 'type', None):
|
||||
assert value.boxed_type is inside_err
|
||||
assert value.type is inside_err
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
"""
|
||||
Verifying internal runtime state and undocumented extras.
|
||||
|
||||
"""
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
from tractor._testing import tractor_test
|
||||
|
||||
|
||||
_file_path: str = ''
|
||||
|
||||
|
||||
def unlink_file():
|
||||
print('Removing tmp file!')
|
||||
os.remove(_file_path)
|
||||
|
||||
|
||||
async def crash_and_clean_tmpdir(
|
||||
tmp_file_path: str,
|
||||
error: bool = True,
|
||||
):
|
||||
global _file_path
|
||||
_file_path = tmp_file_path
|
||||
|
||||
actor = tractor.current_actor()
|
||||
actor.lifetime_stack.callback(unlink_file)
|
||||
|
||||
assert os.path.isfile(tmp_file_path)
|
||||
await trio.sleep(0.1)
|
||||
if error:
|
||||
assert 0
|
||||
else:
|
||||
actor.cancel_soon()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'error_in_child',
|
||||
[True, False],
|
||||
)
|
||||
@tractor_test
|
||||
async def test_lifetime_stack_wipes_tmpfile(
|
||||
tmp_path,
|
||||
error_in_child: bool,
|
||||
):
|
||||
child_tmp_file = tmp_path / "child.txt"
|
||||
child_tmp_file.touch()
|
||||
assert child_tmp_file.exists()
|
||||
path = str(child_tmp_file)
|
||||
|
||||
try:
|
||||
with trio.move_on_after(0.5):
|
||||
async with tractor.open_nursery() as n:
|
||||
await ( # inlined portal
|
||||
await n.run_in_actor(
|
||||
crash_and_clean_tmpdir,
|
||||
tmp_file_path=path,
|
||||
error=error_in_child,
|
||||
)
|
||||
).result()
|
||||
|
||||
except (
|
||||
tractor.RemoteActorError,
|
||||
# tractor.BaseExceptionGroup,
|
||||
BaseExceptionGroup,
|
||||
):
|
||||
pass
|
||||
|
||||
# tmp file should have been wiped by
|
||||
# teardown stack.
|
||||
assert not child_tmp_file.exists()
|
|
@ -1,38 +1,33 @@
|
|||
"""
|
||||
Spawning basics
|
||||
|
||||
"""
|
||||
from typing import (
|
||||
Any,
|
||||
)
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
from tractor._testing import tractor_test
|
||||
from conftest import tractor_test
|
||||
|
||||
data_to_pass_down = {'doggy': 10, 'kitty': 4}
|
||||
|
||||
|
||||
async def spawn(
|
||||
is_arbiter: bool,
|
||||
data: dict,
|
||||
reg_addr: tuple[str, int],
|
||||
):
|
||||
async def spawn(is_arbiter, data, arb_addr):
|
||||
namespaces = [__name__]
|
||||
|
||||
await trio.sleep(0.1)
|
||||
|
||||
async with tractor.open_root_actor(
|
||||
arbiter_addr=reg_addr,
|
||||
arbiter_addr=arb_addr,
|
||||
):
|
||||
|
||||
actor = tractor.current_actor()
|
||||
assert actor.is_arbiter == is_arbiter
|
||||
data = data_to_pass_down
|
||||
|
||||
if actor.is_arbiter:
|
||||
async with tractor.open_nursery() as nursery:
|
||||
|
||||
async with tractor.open_nursery(
|
||||
) as nursery:
|
||||
|
||||
# forks here
|
||||
portal = await nursery.run_in_actor(
|
||||
|
@ -40,7 +35,7 @@ async def spawn(
|
|||
is_arbiter=False,
|
||||
name='sub-actor',
|
||||
data=data,
|
||||
reg_addr=reg_addr,
|
||||
arb_addr=arb_addr,
|
||||
enable_modules=namespaces,
|
||||
)
|
||||
|
||||
|
@ -54,14 +49,12 @@ async def spawn(
|
|||
return 10
|
||||
|
||||
|
||||
def test_local_arbiter_subactor_global_state(
|
||||
reg_addr,
|
||||
):
|
||||
def test_local_arbiter_subactor_global_state(arb_addr):
|
||||
result = trio.run(
|
||||
spawn,
|
||||
True,
|
||||
data_to_pass_down,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
)
|
||||
assert result == 10
|
||||
|
||||
|
@ -95,42 +88,24 @@ async def test_movie_theatre_convo(start_method):
|
|||
await portal.cancel_actor()
|
||||
|
||||
|
||||
async def cellar_door(
|
||||
return_value: str|None,
|
||||
):
|
||||
return return_value
|
||||
async def cellar_door():
|
||||
return "Dang that's beautiful"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'return_value', ["Dang that's beautiful", None],
|
||||
ids=['return_str', 'return_None'],
|
||||
)
|
||||
@tractor_test
|
||||
async def test_most_beautiful_word(
|
||||
start_method: str,
|
||||
return_value: Any,
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
The main ``tractor`` routine.
|
||||
async def test_most_beautiful_word(start_method):
|
||||
"""The main ``tractor`` routine.
|
||||
"""
|
||||
async with tractor.open_nursery() as n:
|
||||
|
||||
'''
|
||||
with trio.fail_after(1):
|
||||
async with tractor.open_nursery(
|
||||
debug_mode=debug_mode,
|
||||
) as n:
|
||||
portal = await n.run_in_actor(
|
||||
cellar_door,
|
||||
return_value=return_value,
|
||||
name='some_linguist',
|
||||
)
|
||||
portal = await n.run_in_actor(
|
||||
cellar_door,
|
||||
name='some_linguist',
|
||||
)
|
||||
|
||||
print(await portal.result())
|
||||
# The ``async with`` will unblock here since the 'some_linguist'
|
||||
# actor has completed its main task ``cellar_door``.
|
||||
|
||||
# this should pull the cached final result already captured during
|
||||
# the nursery block exit.
|
||||
print(await portal.result())
|
||||
|
||||
|
||||
|
@ -145,9 +120,9 @@ async def check_loglevel(level):
|
|||
def test_loglevel_propagated_to_subactor(
|
||||
start_method,
|
||||
capfd,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
):
|
||||
if start_method == 'mp_forkserver':
|
||||
if start_method == 'forkserver':
|
||||
pytest.skip(
|
||||
"a bug with `capfd` seems to make forkserver capture not work?")
|
||||
|
||||
|
@ -156,13 +131,13 @@ def test_loglevel_propagated_to_subactor(
|
|||
async def main():
|
||||
async with tractor.open_nursery(
|
||||
name='arbiter',
|
||||
loglevel=level,
|
||||
start_method=start_method,
|
||||
arbiter_addr=reg_addr,
|
||||
arbiter_addr=arb_addr,
|
||||
|
||||
) as tn:
|
||||
await tn.run_in_actor(
|
||||
check_loglevel,
|
||||
loglevel=level,
|
||||
level=level,
|
||||
)
|
||||
|
||||
|
|
|
@ -7,10 +7,9 @@ import platform
|
|||
|
||||
import trio
|
||||
import tractor
|
||||
from tractor.testing import tractor_test
|
||||
import pytest
|
||||
|
||||
from tractor._testing import tractor_test
|
||||
|
||||
|
||||
def test_must_define_ctx():
|
||||
|
||||
|
@ -38,13 +37,10 @@ async def async_gen_stream(sequence):
|
|||
assert cs.cancelled_caught
|
||||
|
||||
|
||||
# TODO: deprecated either remove entirely
|
||||
# or re-impl in terms of `MsgStream` one-sides
|
||||
# wrapper, but at least remove `Portal.open_stream_from()`
|
||||
@tractor.stream
|
||||
async def context_stream(
|
||||
ctx: tractor.Context,
|
||||
sequence: list[int],
|
||||
sequence
|
||||
):
|
||||
for i in sequence:
|
||||
await ctx.send_yield(i)
|
||||
|
@ -58,7 +54,7 @@ async def context_stream(
|
|||
|
||||
|
||||
async def stream_from_single_subactor(
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
start_method,
|
||||
stream_func,
|
||||
):
|
||||
|
@ -67,7 +63,7 @@ async def stream_from_single_subactor(
|
|||
# only one per host address, spawns an actor if None
|
||||
|
||||
async with tractor.open_nursery(
|
||||
registry_addrs=[reg_addr],
|
||||
arbiter_addr=arb_addr,
|
||||
start_method=start_method,
|
||||
) as nursery:
|
||||
|
||||
|
@ -83,48 +79,45 @@ async def stream_from_single_subactor(
|
|||
|
||||
seq = range(10)
|
||||
|
||||
with trio.fail_after(5):
|
||||
async with portal.open_stream_from(
|
||||
stream_func,
|
||||
sequence=list(seq), # has to be msgpack serializable
|
||||
) as stream:
|
||||
async with portal.open_stream_from(
|
||||
stream_func,
|
||||
sequence=list(seq), # has to be msgpack serializable
|
||||
) as stream:
|
||||
|
||||
# it'd sure be nice to have an asyncitertools here...
|
||||
iseq = iter(seq)
|
||||
ival = next(iseq)
|
||||
# it'd sure be nice to have an asyncitertools here...
|
||||
iseq = iter(seq)
|
||||
ival = next(iseq)
|
||||
|
||||
async for val in stream:
|
||||
assert val == ival
|
||||
async for val in stream:
|
||||
assert val == ival
|
||||
|
||||
try:
|
||||
ival = next(iseq)
|
||||
except StopIteration:
|
||||
# should cancel far end task which will be
|
||||
# caught and no error is raised
|
||||
await stream.aclose()
|
||||
|
||||
await trio.sleep(0.3)
|
||||
|
||||
# ensure EOC signalled-state translates
|
||||
# XXX: not really sure this is correct,
|
||||
# shouldn't it be a `ClosedResourceError`?
|
||||
try:
|
||||
await stream.__anext__()
|
||||
except StopAsyncIteration:
|
||||
# stop all spawned subactors
|
||||
await portal.cancel_actor()
|
||||
ival = next(iseq)
|
||||
except StopIteration:
|
||||
# should cancel far end task which will be
|
||||
# caught and no error is raised
|
||||
await stream.aclose()
|
||||
|
||||
await trio.sleep(0.3)
|
||||
|
||||
try:
|
||||
await stream.__anext__()
|
||||
except StopAsyncIteration:
|
||||
# stop all spawned subactors
|
||||
await portal.cancel_actor()
|
||||
# await nursery.cancel()
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'stream_func', [async_gen_stream, context_stream]
|
||||
)
|
||||
def test_stream_from_single_subactor(reg_addr, start_method, stream_func):
|
||||
def test_stream_from_single_subactor(arb_addr, start_method, stream_func):
|
||||
"""Verify streaming from a spawned async generator.
|
||||
"""
|
||||
trio.run(
|
||||
partial(
|
||||
stream_from_single_subactor,
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
start_method,
|
||||
stream_func=stream_func,
|
||||
),
|
||||
|
@ -139,7 +132,7 @@ async def stream_data(seed):
|
|||
yield i
|
||||
|
||||
# trigger scheduler to simulate practical usage
|
||||
await trio.sleep(0.0001)
|
||||
await trio.sleep(0)
|
||||
|
||||
|
||||
# this is the third actor; the aggregator
|
||||
|
@ -228,14 +221,14 @@ async def a_quadruple_example():
|
|||
return result_stream
|
||||
|
||||
|
||||
async def cancel_after(wait, reg_addr):
|
||||
async with tractor.open_root_actor(registry_addrs=[reg_addr]):
|
||||
async def cancel_after(wait, arb_addr):
|
||||
async with tractor.open_root_actor(arbiter_addr=arb_addr):
|
||||
with trio.move_on_after(wait):
|
||||
return await a_quadruple_example()
|
||||
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def time_quad_ex(reg_addr, ci_env, spawn_backend):
|
||||
def time_quad_ex(arb_addr, ci_env, spawn_backend):
|
||||
if spawn_backend == 'mp':
|
||||
"""no idea but the mp *nix runs are flaking out here often...
|
||||
"""
|
||||
|
@ -243,7 +236,7 @@ def time_quad_ex(reg_addr, ci_env, spawn_backend):
|
|||
|
||||
timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4
|
||||
start = time.time()
|
||||
results = trio.run(cancel_after, timeout, reg_addr)
|
||||
results = trio.run(cancel_after, timeout, arb_addr)
|
||||
diff = time.time() - start
|
||||
assert results
|
||||
return results, diff
|
||||
|
@ -254,7 +247,7 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend):
|
|||
|
||||
results, diff = time_quad_ex
|
||||
assert results
|
||||
this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 3
|
||||
this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 2.5
|
||||
assert diff < this_fast
|
||||
|
||||
|
||||
|
@ -263,14 +256,14 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend):
|
|||
list(map(lambda i: i/10, range(3, 9)))
|
||||
)
|
||||
def test_not_fast_enough_quad(
|
||||
reg_addr, time_quad_ex, cancel_delay, ci_env, spawn_backend
|
||||
arb_addr, time_quad_ex, cancel_delay, ci_env, spawn_backend
|
||||
):
|
||||
"""Verify we can cancel midway through the quad example and all actors
|
||||
cancel gracefully.
|
||||
"""
|
||||
results, diff = time_quad_ex
|
||||
delay = max(diff - cancel_delay, 0)
|
||||
results = trio.run(cancel_after, delay, reg_addr)
|
||||
results = trio.run(cancel_after, delay, arb_addr)
|
||||
system = platform.system()
|
||||
if system in ('Windows', 'Darwin') and results is not None:
|
||||
# In CI envoirments it seems later runs are quicker then the first
|
||||
|
@ -283,7 +276,7 @@ def test_not_fast_enough_quad(
|
|||
|
||||
@tractor_test
|
||||
async def test_respawn_consumer_task(
|
||||
reg_addr,
|
||||
arb_addr,
|
||||
spawn_backend,
|
||||
loglevel,
|
||||
):
|
||||
|
@ -320,12 +313,12 @@ async def test_respawn_consumer_task(
|
|||
task_status.started(cs)
|
||||
|
||||
# shield stream's underlying channel from cancellation
|
||||
# with stream.shield():
|
||||
with stream.shield():
|
||||
|
||||
async for v in stream:
|
||||
print(f'from stream: {v}')
|
||||
expect.remove(v)
|
||||
received.append(v)
|
||||
async for v in stream:
|
||||
print(f'from stream: {v}')
|
||||
expect.remove(v)
|
||||
received.append(v)
|
||||
|
||||
print('exited consume')
|
||||
|
|
@ -1,521 +0,0 @@
|
|||
"""
|
||||
Broadcast channels for fan-out to local tasks.
|
||||
|
||||
"""
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from functools import partial
|
||||
from itertools import cycle
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
from trio.lowlevel import current_task
|
||||
import tractor
|
||||
from tractor.trionics import (
|
||||
broadcast_receiver,
|
||||
Lagged,
|
||||
collapse_eg,
|
||||
)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def echo_sequences(
|
||||
|
||||
ctx: tractor.Context,
|
||||
|
||||
) -> None:
|
||||
'''Bidir streaming endpoint which will stream
|
||||
back any sequence it is sent item-wise.
|
||||
|
||||
'''
|
||||
await ctx.started()
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
async for sequence in stream:
|
||||
seq = list(sequence)
|
||||
for value in seq:
|
||||
await stream.send(value)
|
||||
print(f'producer sent {value}')
|
||||
|
||||
|
||||
async def ensure_sequence(
|
||||
|
||||
stream: tractor.MsgStream,
|
||||
sequence: list,
|
||||
delay: Optional[float] = None,
|
||||
|
||||
) -> None:
|
||||
|
||||
name = current_task().name
|
||||
async with stream.subscribe() as bcaster:
|
||||
assert not isinstance(bcaster, type(stream))
|
||||
async for value in bcaster:
|
||||
print(f'{name} rx: {value}')
|
||||
assert value == sequence[0]
|
||||
sequence.remove(value)
|
||||
|
||||
if delay:
|
||||
await trio.sleep(delay)
|
||||
|
||||
if not sequence:
|
||||
# fully consumed
|
||||
break
|
||||
|
||||
|
||||
@acm
|
||||
async def open_sequence_streamer(
|
||||
|
||||
sequence: list[int],
|
||||
reg_addr: tuple[str, int],
|
||||
start_method: str,
|
||||
|
||||
) -> tractor.MsgStream:
|
||||
|
||||
async with tractor.open_nursery(
|
||||
arbiter_addr=reg_addr,
|
||||
start_method=start_method,
|
||||
) as an:
|
||||
|
||||
portal = await an.start_actor(
|
||||
'sequence_echoer',
|
||||
enable_modules=[__name__],
|
||||
)
|
||||
|
||||
async with portal.open_context(
|
||||
echo_sequences,
|
||||
) as (ctx, first):
|
||||
|
||||
assert first is None
|
||||
async with ctx.open_stream(allow_overruns=True) as stream:
|
||||
yield stream
|
||||
|
||||
await portal.cancel_actor()
|
||||
|
||||
|
||||
def test_stream_fan_out_to_local_subscriptions(
|
||||
reg_addr,
|
||||
start_method,
|
||||
):
|
||||
|
||||
sequence = list(range(1000))
|
||||
|
||||
async def main():
|
||||
|
||||
async with open_sequence_streamer(
|
||||
sequence,
|
||||
reg_addr,
|
||||
start_method,
|
||||
) as stream:
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
for i in range(10):
|
||||
n.start_soon(
|
||||
ensure_sequence,
|
||||
stream,
|
||||
sequence.copy(),
|
||||
name=f'consumer_{i}',
|
||||
)
|
||||
|
||||
await stream.send(tuple(sequence))
|
||||
|
||||
async for value in stream:
|
||||
print(f'source stream rx: {value}')
|
||||
assert value == sequence[0]
|
||||
sequence.remove(value)
|
||||
|
||||
if not sequence:
|
||||
# fully consumed
|
||||
break
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'task_delays',
|
||||
[
|
||||
(0.01, 0.001),
|
||||
(0.001, 0.01),
|
||||
]
|
||||
)
|
||||
def test_consumer_and_parent_maybe_lag(
|
||||
reg_addr,
|
||||
start_method,
|
||||
task_delays,
|
||||
):
|
||||
|
||||
async def main():
|
||||
|
||||
sequence = list(range(300))
|
||||
parent_delay, sub_delay = task_delays
|
||||
|
||||
async with open_sequence_streamer(
|
||||
sequence,
|
||||
reg_addr,
|
||||
start_method,
|
||||
) as stream:
|
||||
|
||||
try:
|
||||
async with (
|
||||
collapse_eg(),
|
||||
trio.open_nursery() as tn,
|
||||
):
|
||||
|
||||
tn.start_soon(
|
||||
ensure_sequence,
|
||||
stream,
|
||||
sequence.copy(),
|
||||
sub_delay,
|
||||
name='consumer_task',
|
||||
)
|
||||
|
||||
await stream.send(tuple(sequence))
|
||||
|
||||
# async for value in stream:
|
||||
lagged = False
|
||||
lag_count = 0
|
||||
|
||||
while True:
|
||||
try:
|
||||
value = await stream.receive()
|
||||
print(f'source stream rx: {value}')
|
||||
|
||||
if lagged:
|
||||
# re set the sequence starting at our last
|
||||
# value
|
||||
sequence = sequence[sequence.index(value) + 1:]
|
||||
else:
|
||||
assert value == sequence[0]
|
||||
sequence.remove(value)
|
||||
|
||||
lagged = False
|
||||
|
||||
except Lagged:
|
||||
lagged = True
|
||||
print(f'source stream lagged after {value}')
|
||||
lag_count += 1
|
||||
continue
|
||||
|
||||
# lag the parent
|
||||
await trio.sleep(parent_delay)
|
||||
|
||||
if not sequence:
|
||||
# fully consumed
|
||||
break
|
||||
print(f'parent + source stream lagged: {lag_count}')
|
||||
|
||||
if parent_delay > sub_delay:
|
||||
assert lag_count > 0
|
||||
|
||||
except Lagged:
|
||||
# child was lagged
|
||||
assert parent_delay < sub_delay
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
def test_faster_task_to_recv_is_cancelled_by_slower(
|
||||
reg_addr,
|
||||
start_method,
|
||||
):
|
||||
'''
|
||||
Ensure that if a faster task consuming from a stream is cancelled
|
||||
the slower task can continue to receive all expected values.
|
||||
|
||||
'''
|
||||
async def main():
|
||||
|
||||
sequence = list(range(1000))
|
||||
|
||||
async with open_sequence_streamer(
|
||||
sequence,
|
||||
reg_addr,
|
||||
start_method,
|
||||
|
||||
) as stream:
|
||||
|
||||
async with trio.open_nursery() as tn:
|
||||
tn.start_soon(
|
||||
ensure_sequence,
|
||||
stream,
|
||||
sequence.copy(),
|
||||
0,
|
||||
name='consumer_task',
|
||||
)
|
||||
|
||||
await stream.send(tuple(sequence))
|
||||
|
||||
# pull 3 values, cancel the subtask, then
|
||||
# expect to be able to pull all values still
|
||||
for i in range(20):
|
||||
try:
|
||||
value = await stream.receive()
|
||||
print(f'source stream rx: {value}')
|
||||
await trio.sleep(0.01)
|
||||
except Lagged:
|
||||
print(f'parent overrun after {value}')
|
||||
continue
|
||||
|
||||
print('cancelling faster subtask')
|
||||
tn.cancel_scope.cancel()
|
||||
|
||||
try:
|
||||
value = await stream.receive()
|
||||
print(f'source stream after cancel: {value}')
|
||||
except Lagged:
|
||||
print(f'parent overrun after {value}')
|
||||
|
||||
# expect to see all remaining values
|
||||
with trio.fail_after(0.5):
|
||||
async for value in stream:
|
||||
assert stream._broadcaster._state.recv_ready is None
|
||||
print(f'source stream rx: {value}')
|
||||
if value == 999:
|
||||
# fully consumed and we missed no values once
|
||||
# the faster subtask was cancelled
|
||||
break
|
||||
|
||||
# await tractor.pause()
|
||||
# await stream.receive()
|
||||
print(f'final value: {value}')
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
def test_subscribe_errors_after_close():
|
||||
|
||||
async def main():
|
||||
|
||||
size = 1
|
||||
tx, rx = trio.open_memory_channel(size)
|
||||
async with broadcast_receiver(rx, size) as brx:
|
||||
pass
|
||||
|
||||
try:
|
||||
# open and close
|
||||
async with brx.subscribe():
|
||||
pass
|
||||
|
||||
except trio.ClosedResourceError:
|
||||
assert brx.key not in brx._state.subs
|
||||
|
||||
else:
|
||||
assert 0
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
def test_ensure_slow_consumers_lag_out(
|
||||
reg_addr,
|
||||
start_method,
|
||||
):
|
||||
'''This is a pure local task test; no tractor
|
||||
machinery is really required.
|
||||
|
||||
'''
|
||||
async def main():
|
||||
|
||||
# make sure it all works within the runtime
|
||||
async with tractor.open_root_actor():
|
||||
|
||||
num_laggers = 4
|
||||
laggers: dict[str, int] = {}
|
||||
retries = 3
|
||||
size = 100
|
||||
tx, rx = trio.open_memory_channel(size)
|
||||
brx = broadcast_receiver(rx, size)
|
||||
|
||||
async def sub_and_print(
|
||||
delay: float,
|
||||
) -> None:
|
||||
|
||||
task = current_task()
|
||||
start = time.time()
|
||||
|
||||
async with brx.subscribe() as lbrx:
|
||||
while True:
|
||||
print(f'{task.name}: starting consume loop')
|
||||
try:
|
||||
async for value in lbrx:
|
||||
print(f'{task.name}: {value}')
|
||||
await trio.sleep(delay)
|
||||
|
||||
if task.name == 'sub_1':
|
||||
# trigger checkpoint to clean out other subs
|
||||
await trio.sleep(0.01)
|
||||
|
||||
# the non-lagger got
|
||||
# a ``trio.EndOfChannel``
|
||||
# because the ``tx`` below was closed
|
||||
assert len(lbrx._state.subs) == 1
|
||||
|
||||
await lbrx.aclose()
|
||||
|
||||
assert len(lbrx._state.subs) == 0
|
||||
|
||||
except trio.ClosedResourceError:
|
||||
# only the fast sub will try to re-enter
|
||||
# iteration on the now closed bcaster
|
||||
assert task.name == 'sub_1'
|
||||
return
|
||||
|
||||
except Lagged:
|
||||
lag_time = time.time() - start
|
||||
lags = laggers[task.name]
|
||||
print(
|
||||
f'restarting slow task {task.name} '
|
||||
f'that bailed out on {lags}:{value} '
|
||||
f'after {lag_time:.3f}')
|
||||
if lags <= retries:
|
||||
laggers[task.name] += 1
|
||||
continue
|
||||
else:
|
||||
print(
|
||||
f'{task.name} was too slow and terminated '
|
||||
f'on {lags}:{value}')
|
||||
return
|
||||
|
||||
async with trio.open_nursery() as tn:
|
||||
|
||||
for i in range(1, num_laggers):
|
||||
|
||||
task_name = f'sub_{i}'
|
||||
laggers[task_name] = 0
|
||||
tn.start_soon(
|
||||
partial(
|
||||
sub_and_print,
|
||||
delay=i*0.001,
|
||||
),
|
||||
name=task_name,
|
||||
)
|
||||
|
||||
# allow subs to sched
|
||||
await trio.sleep(0.1)
|
||||
|
||||
async with tx:
|
||||
for i in cycle(range(size)):
|
||||
await tx.send(i)
|
||||
if len(brx._state.subs) == 2:
|
||||
# only one, the non lagger, sub is left
|
||||
break
|
||||
|
||||
# the non-lagger
|
||||
assert laggers.pop('sub_1') == 0
|
||||
|
||||
for n, v in laggers.items():
|
||||
assert v == 4
|
||||
|
||||
assert tx._closed
|
||||
assert not tx._state.open_send_channels
|
||||
|
||||
# check that "first" bcaster that we created
|
||||
# above, never was iterated and is thus overrun
|
||||
try:
|
||||
await brx.receive()
|
||||
except Lagged:
|
||||
# expect tokio style index truncation
|
||||
seq = brx._state.subs[brx.key]
|
||||
assert seq == len(brx._state.queue) - 1
|
||||
|
||||
# all no_overruns entries in the underlying
|
||||
# channel should have been copied into the bcaster
|
||||
# queue trailing-window
|
||||
async for i in rx:
|
||||
print(f'bped: {i}')
|
||||
assert i in brx._state.queue
|
||||
|
||||
# should be noop
|
||||
await brx.aclose()
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
def test_first_recver_is_cancelled():
|
||||
|
||||
async def main():
|
||||
|
||||
# make sure it all works within the runtime
|
||||
async with tractor.open_root_actor():
|
||||
|
||||
tx, rx = trio.open_memory_channel(1)
|
||||
brx = broadcast_receiver(rx, 1)
|
||||
cs = trio.CancelScope()
|
||||
|
||||
async def sub_and_recv():
|
||||
with cs:
|
||||
async with brx.subscribe() as bc:
|
||||
async for value in bc:
|
||||
print(value)
|
||||
|
||||
async def cancel_and_send():
|
||||
await trio.sleep(0.2)
|
||||
cs.cancel()
|
||||
await tx.send(1)
|
||||
|
||||
async with trio.open_nursery() as n:
|
||||
|
||||
n.start_soon(sub_and_recv)
|
||||
await trio.sleep(0.1)
|
||||
assert brx._state.recv_ready
|
||||
|
||||
n.start_soon(cancel_and_send)
|
||||
|
||||
# ensure that we don't hang because no-task is now
|
||||
# waiting on the underlying receive..
|
||||
with trio.fail_after(0.5):
|
||||
value = await brx.receive()
|
||||
print(f'parent: {value}')
|
||||
assert value == 1
|
||||
|
||||
trio.run(main)
|
||||
|
||||
|
||||
def test_no_raise_on_lag():
|
||||
'''
|
||||
Run a simple 2-task broadcast where one task is slow but configured
|
||||
so that it does not raise `Lagged` on overruns using
|
||||
`raise_on_lasg=False` and verify that the task does not raise.
|
||||
|
||||
'''
|
||||
size = 100
|
||||
tx, rx = trio.open_memory_channel(size)
|
||||
brx = broadcast_receiver(rx, size)
|
||||
|
||||
async def slow():
|
||||
async with brx.subscribe(
|
||||
raise_on_lag=False,
|
||||
) as br:
|
||||
async for msg in br:
|
||||
print(f'slow task got: {msg}')
|
||||
await trio.sleep(0.1)
|
||||
|
||||
async def fast():
|
||||
async with brx.subscribe() as br:
|
||||
async for msg in br:
|
||||
print(f'fast task got: {msg}')
|
||||
|
||||
async def main():
|
||||
async with (
|
||||
tractor.open_root_actor(
|
||||
# NOTE: so we see the warning msg emitted by the bcaster
|
||||
# internals when the no raise flag is set.
|
||||
loglevel='warning',
|
||||
),
|
||||
collapse_eg(),
|
||||
trio.open_nursery() as n,
|
||||
):
|
||||
n.start_soon(slow)
|
||||
n.start_soon(fast)
|
||||
|
||||
for i in range(1000):
|
||||
await tx.send(i)
|
||||
|
||||
# simulate user nailing ctl-c after realizing
|
||||
# there's a lag in the slow task.
|
||||
await trio.sleep(1)
|
||||
raise KeyboardInterrupt
|
||||
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
trio.run(main)
|
|
@ -1,203 +0,0 @@
|
|||
'''
|
||||
Reminders for oddities in `trio` that we need to stay aware of and/or
|
||||
want to see changed.
|
||||
|
||||
'''
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
|
||||
import pytest
|
||||
import trio
|
||||
from trio import TaskStatus
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'use_start_soon', [
|
||||
pytest.param(
|
||||
True,
|
||||
marks=pytest.mark.xfail(reason="see python-trio/trio#2258")
|
||||
),
|
||||
False,
|
||||
]
|
||||
)
|
||||
def test_stashed_child_nursery(use_start_soon):
|
||||
|
||||
_child_nursery = None
|
||||
|
||||
async def waits_on_signal(
|
||||
ev: trio.Event(),
|
||||
task_status: TaskStatus[trio.Nursery] = trio.TASK_STATUS_IGNORED,
|
||||
):
|
||||
'''
|
||||
Do some stuf, then signal other tasks, then yield back to "starter".
|
||||
|
||||
'''
|
||||
await ev.wait()
|
||||
task_status.started()
|
||||
|
||||
async def mk_child_nursery(
|
||||
task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
|
||||
):
|
||||
'''
|
||||
Allocate a child sub-nursery and stash it as a global.
|
||||
|
||||
'''
|
||||
nonlocal _child_nursery
|
||||
|
||||
async with trio.open_nursery() as cn:
|
||||
_child_nursery = cn
|
||||
task_status.started(cn)
|
||||
|
||||
# block until cancelled by parent.
|
||||
await trio.sleep_forever()
|
||||
|
||||
async def sleep_and_err(
|
||||
ev: trio.Event,
|
||||
task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
|
||||
):
|
||||
await trio.sleep(0.5)
|
||||
doggy() # noqa
|
||||
ev.set()
|
||||
task_status.started()
|
||||
|
||||
async def main():
|
||||
|
||||
async with (
|
||||
trio.open_nursery(
|
||||
strict_exception_groups=False,
|
||||
) as pn,
|
||||
):
|
||||
cn = await pn.start(mk_child_nursery)
|
||||
assert cn
|
||||
|
||||
ev = trio.Event()
|
||||
|
||||
if use_start_soon:
|
||||
# this causes inf hang
|
||||
cn.start_soon(sleep_and_err, ev)
|
||||
|
||||
else:
|
||||
# this does not.
|
||||
await cn.start(sleep_and_err, ev)
|
||||
|
||||
with trio.fail_after(1):
|
||||
await cn.start(waits_on_signal, ev)
|
||||
|
||||
with pytest.raises(NameError):
|
||||
trio.run(main)
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
('unmask_from_canc', 'canc_from_finally'),
|
||||
[
|
||||
(True, False),
|
||||
(True, True),
|
||||
pytest.param(False, True,
|
||||
marks=pytest.mark.xfail(reason="never raises!")
|
||||
),
|
||||
],
|
||||
# TODO, ask ronny how to impl this .. XD
|
||||
# ids='unmask_from_canc={0}, canc_from_finally={1}',#.format,
|
||||
)
|
||||
def test_acm_embedded_nursery_propagates_enter_err(
|
||||
canc_from_finally: bool,
|
||||
unmask_from_canc: bool,
|
||||
debug_mode: bool,
|
||||
):
|
||||
'''
|
||||
Demo how a masking `trio.Cancelled` could be handled by unmasking from the
|
||||
`.__context__` field when a user (by accident) re-raises from a `finally:`.
|
||||
|
||||
'''
|
||||
import tractor
|
||||
|
||||
@acm
|
||||
async def maybe_raise_from_masking_exc(
|
||||
tn: trio.Nursery,
|
||||
unmask_from: BaseException|None = trio.Cancelled
|
||||
|
||||
# TODO, maybe offer a collection?
|
||||
# unmask_from: set[BaseException] = {
|
||||
# trio.Cancelled,
|
||||
# },
|
||||
):
|
||||
if not unmask_from:
|
||||
yield
|
||||
return
|
||||
|
||||
try:
|
||||
yield
|
||||
except* unmask_from as be_eg:
|
||||
|
||||
# TODO, if we offer `unmask_from: set`
|
||||
# for masker_exc_type in unmask_from:
|
||||
|
||||
matches, rest = be_eg.split(unmask_from)
|
||||
if not matches:
|
||||
raise
|
||||
|
||||
for exc_match in be_eg.exceptions:
|
||||
if (
|
||||
(exc_ctx := exc_match.__context__)
|
||||
and
|
||||
type(exc_ctx) not in {
|
||||
# trio.Cancelled, # always by default?
|
||||
unmask_from,
|
||||
}
|
||||
):
|
||||
exc_ctx.add_note(
|
||||
f'\n'
|
||||
f'WARNING: the above error was masked by a {unmask_from!r} !?!\n'
|
||||
f'Are you always cancelling? Say from a `finally:` ?\n\n'
|
||||
|
||||
f'{tn!r}'
|
||||
)
|
||||
raise exc_ctx from exc_match
|
||||
|
||||
|
||||
@acm
|
||||
async def wraps_tn_that_always_cancels():
|
||||
async with (
|
||||
trio.open_nursery() as tn,
|
||||
maybe_raise_from_masking_exc(
|
||||
tn=tn,
|
||||
unmask_from=(
|
||||
trio.Cancelled
|
||||
if unmask_from_canc
|
||||
else None
|
||||
),
|
||||
)
|
||||
):
|
||||
try:
|
||||
yield tn
|
||||
finally:
|
||||
if canc_from_finally:
|
||||
tn.cancel_scope.cancel()
|
||||
await trio.lowlevel.checkpoint()
|
||||
|
||||
async def _main():
|
||||
with tractor.devx.maybe_open_crash_handler(
|
||||
pdb=debug_mode,
|
||||
) as bxerr:
|
||||
assert not bxerr.value
|
||||
|
||||
async with (
|
||||
wraps_tn_that_always_cancels() as tn,
|
||||
):
|
||||
assert not tn.cancel_scope.cancel_called
|
||||
assert 0
|
||||
|
||||
assert (
|
||||
(err := bxerr.value)
|
||||
and
|
||||
type(err) is AssertionError
|
||||
)
|
||||
|
||||
with pytest.raises(ExceptionGroup) as excinfo:
|
||||
trio.run(_main)
|
||||
|
||||
eg: ExceptionGroup = excinfo.value
|
||||
assert_eg, rest_eg = eg.split(AssertionError)
|
||||
|
||||
assert len(assert_eg.exceptions) == 1
|
|
@ -1,70 +1,52 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
tractor: structured concurrent ``trio``-"actors".
|
||||
|
||||
tractor: An actor model micro-framework built on
|
||||
``trio`` and ``multiprocessing``.
|
||||
"""
|
||||
from trio import MultiError
|
||||
|
||||
from ._clustering import (
|
||||
open_actor_cluster as open_actor_cluster,
|
||||
)
|
||||
from ._context import (
|
||||
Context as Context, # the type
|
||||
context as context, # a func-decorator
|
||||
)
|
||||
from ._ipc import Channel
|
||||
from ._streaming import (
|
||||
MsgStream as MsgStream,
|
||||
stream as stream,
|
||||
)
|
||||
from ._discovery import (
|
||||
get_registry as get_registry,
|
||||
find_actor as find_actor,
|
||||
wait_for_actor as wait_for_actor,
|
||||
query_actor as query_actor,
|
||||
)
|
||||
from ._supervise import (
|
||||
open_nursery as open_nursery,
|
||||
ActorNursery as ActorNursery,
|
||||
)
|
||||
from ._state import (
|
||||
current_actor as current_actor,
|
||||
is_root_process as is_root_process,
|
||||
current_ipc_ctx as current_ipc_ctx,
|
||||
debug_mode as debug_mode
|
||||
Context,
|
||||
ReceiveMsgStream,
|
||||
MsgStream,
|
||||
stream,
|
||||
context,
|
||||
)
|
||||
from ._discovery import get_arbiter, find_actor, wait_for_actor
|
||||
from ._trionics import open_nursery
|
||||
from ._state import current_actor, is_root_process
|
||||
from ._exceptions import (
|
||||
ContextCancelled as ContextCancelled,
|
||||
ModuleNotExposed as ModuleNotExposed,
|
||||
MsgTypeError as MsgTypeError,
|
||||
RemoteActorError as RemoteActorError,
|
||||
TransportClosed as TransportClosed,
|
||||
RemoteActorError,
|
||||
ModuleNotExposed,
|
||||
ContextCancelled,
|
||||
)
|
||||
from .devx import (
|
||||
breakpoint as breakpoint,
|
||||
pause as pause,
|
||||
pause_from_sync as pause_from_sync,
|
||||
post_mortem as post_mortem,
|
||||
)
|
||||
from . import msg as msg
|
||||
from ._root import (
|
||||
run_daemon as run_daemon,
|
||||
open_root_actor as open_root_actor,
|
||||
)
|
||||
from ._ipc import Channel as Channel
|
||||
from ._portal import Portal as Portal
|
||||
from ._runtime import Actor as Actor
|
||||
# from . import hilevel as hilevel
|
||||
from ._debug import breakpoint, post_mortem
|
||||
from . import msg
|
||||
from ._root import run, run_daemon, open_root_actor
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Channel',
|
||||
'Context',
|
||||
'ModuleNotExposed',
|
||||
'MultiError',
|
||||
'RemoteActorError',
|
||||
'ContextCancelled',
|
||||
'breakpoint',
|
||||
'current_actor',
|
||||
'find_actor',
|
||||
'get_arbiter',
|
||||
'is_root_process',
|
||||
'msg',
|
||||
'open_nursery',
|
||||
'open_root_actor',
|
||||
'post_mortem',
|
||||
'run',
|
||||
'run_daemon',
|
||||
'stream',
|
||||
'context',
|
||||
'ReceiveMsgStream',
|
||||
'MsgStream',
|
||||
'to_asyncio',
|
||||
'wait_for_actor',
|
||||
]
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,28 +1,12 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
This is the "bootloader" for actors started using the native trio backend.
|
||||
|
||||
"""This is the "bootloader" for actors started using the native trio backend.
|
||||
"""
|
||||
import sys
|
||||
import trio
|
||||
import argparse
|
||||
|
||||
from ast import literal_eval
|
||||
|
||||
from ._runtime import Actor
|
||||
from ._actor import Actor
|
||||
from ._entry import _trio_main
|
||||
|
||||
|
||||
|
@ -36,13 +20,11 @@ def parse_ipaddr(arg):
|
|||
|
||||
|
||||
if __name__ == "__main__":
|
||||
__tracebackhide__: bool = True
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--uid", type=parse_uid)
|
||||
parser.add_argument("--loglevel", type=str)
|
||||
parser.add_argument("--parent_addr", type=parse_ipaddr)
|
||||
parser.add_argument("--asyncio", action='store_true')
|
||||
args = parser.parse_args()
|
||||
|
||||
subactor = Actor(
|
||||
|
@ -54,6 +36,5 @@ if __name__ == "__main__":
|
|||
|
||||
_trio_main(
|
||||
subactor,
|
||||
parent_addr=args.parent_addr,
|
||||
infect_asyncio=args.asyncio,
|
||||
)
|
||||
parent_addr=args.parent_addr
|
||||
)
|
|
@ -1,77 +0,0 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
'''
|
||||
Actor cluster helpers.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from multiprocessing import cpu_count
|
||||
from typing import (
|
||||
AsyncGenerator,
|
||||
)
|
||||
|
||||
import trio
|
||||
import tractor
|
||||
|
||||
|
||||
@acm
|
||||
async def open_actor_cluster(
|
||||
modules: list[str],
|
||||
count: int = cpu_count(),
|
||||
names: list[str] | None = None,
|
||||
hard_kill: bool = False,
|
||||
|
||||
# passed through verbatim to ``open_root_actor()``
|
||||
**runtime_kwargs,
|
||||
|
||||
) -> AsyncGenerator[
|
||||
dict[str, tractor.Portal],
|
||||
None,
|
||||
]:
|
||||
|
||||
portals: dict[str, tractor.Portal] = {}
|
||||
|
||||
if not names:
|
||||
names = [f'worker_{i}' for i in range(count)]
|
||||
|
||||
if not len(names) == count:
|
||||
raise ValueError(
|
||||
'Number of names is {len(names)} but count it {count}')
|
||||
|
||||
async with tractor.open_nursery(
|
||||
**runtime_kwargs,
|
||||
) as an:
|
||||
async with trio.open_nursery() as n:
|
||||
uid = tractor.current_actor().uid
|
||||
|
||||
async def _start(name: str) -> None:
|
||||
name = f'{uid[0]}.{name}'
|
||||
portals[name] = await an.start_actor(
|
||||
enable_modules=modules,
|
||||
name=name,
|
||||
)
|
||||
|
||||
for name in names:
|
||||
n.start_soon(_start, name)
|
||||
|
||||
assert len(portals) == count
|
||||
yield portals
|
||||
|
||||
await an.cancel(hard_kill=hard_kill)
|
2618
tractor/_context.py
2618
tractor/_context.py
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,500 @@
|
|||
"""
|
||||
Multi-core debugging for da peeps!
|
||||
|
||||
"""
|
||||
import bdb
|
||||
import sys
|
||||
from functools import partial
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import Tuple, Optional, Callable, AsyncIterator
|
||||
|
||||
import tractor
|
||||
import trio
|
||||
|
||||
from .log import get_logger
|
||||
from . import _state
|
||||
from ._discovery import get_root
|
||||
from ._state import is_root_process
|
||||
from ._exceptions import is_multi_cancelled
|
||||
|
||||
try:
|
||||
# wtf: only exported when installed in dev mode?
|
||||
import pdbpp
|
||||
except ImportError:
|
||||
# pdbpp is installed in regular mode...it monkey patches stuff
|
||||
import pdb
|
||||
assert pdb.xpm, "pdbpp is not installed?" # type: ignore
|
||||
pdbpp = pdb
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
__all__ = ['breakpoint', 'post_mortem']
|
||||
|
||||
|
||||
# TODO: wrap all these in a static global class: ``DebugLock`` maybe?
|
||||
|
||||
# placeholder for function to set a ``trio.Event`` on debugger exit
|
||||
_pdb_release_hook: Optional[Callable] = None
|
||||
|
||||
# actor-wide variable pointing to current task name using debugger
|
||||
_local_task_in_debug: Optional[str] = None
|
||||
|
||||
# actor tree-wide actor uid that supposedly has the tty lock
|
||||
_global_actor_in_debug: Optional[Tuple[str, str]] = None
|
||||
|
||||
# lock in root actor preventing multi-access to local tty
|
||||
_debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock()
|
||||
_local_pdb_complete: Optional[trio.Event] = None
|
||||
_no_remote_has_tty: Optional[trio.Event] = None
|
||||
|
||||
# XXX: set by the current task waiting on the root tty lock
|
||||
# and must be cancelled if this actor is cancelled via message
|
||||
# otherwise deadlocks with the parent actor may ensure
|
||||
_debugger_request_cs: Optional[trio.CancelScope] = None
|
||||
|
||||
|
||||
class TractorConfig(pdbpp.DefaultConfig):
|
||||
"""Custom ``pdbpp`` goodness.
|
||||
"""
|
||||
# sticky_by_default = True
|
||||
|
||||
|
||||
class PdbwTeardown(pdbpp.Pdb):
|
||||
"""Add teardown hooks to the regular ``pdbpp.Pdb``.
|
||||
"""
|
||||
# override the pdbpp config with our coolio one
|
||||
DefaultConfig = TractorConfig
|
||||
|
||||
# TODO: figure out how to dissallow recursive .set_trace() entry
|
||||
# since that'll cause deadlock for us.
|
||||
def set_continue(self):
|
||||
try:
|
||||
super().set_continue()
|
||||
finally:
|
||||
global _local_task_in_debug
|
||||
_local_task_in_debug = None
|
||||
_pdb_release_hook()
|
||||
|
||||
def set_quit(self):
|
||||
try:
|
||||
super().set_quit()
|
||||
finally:
|
||||
global _local_task_in_debug
|
||||
_local_task_in_debug = None
|
||||
_pdb_release_hook()
|
||||
|
||||
|
||||
# TODO: will be needed whenever we get to true remote debugging.
|
||||
# XXX see https://github.com/goodboy/tractor/issues/130
|
||||
|
||||
# # TODO: is there some way to determine this programatically?
|
||||
# _pdb_exit_patterns = tuple(
|
||||
# str.encode(patt + "\n") for patt in (
|
||||
# 'c', 'cont', 'continue', 'q', 'quit')
|
||||
# )
|
||||
|
||||
# def subactoruid2proc(
|
||||
# actor: 'Actor', # noqa
|
||||
# uid: Tuple[str, str]
|
||||
# ) -> trio.Process:
|
||||
# n = actor._actoruid2nursery[uid]
|
||||
# _, proc, _ = n._children[uid]
|
||||
# return proc
|
||||
|
||||
# async def hijack_stdin():
|
||||
# log.info(f"Hijacking stdin from {actor.uid}")
|
||||
|
||||
# trap std in and relay to subproc
|
||||
# async_stdin = trio.wrap_file(sys.stdin)
|
||||
|
||||
# async with aclosing(async_stdin):
|
||||
# async for msg in async_stdin:
|
||||
# log.runtime(f"Stdin input:\n{msg}")
|
||||
# # encode to bytes
|
||||
# bmsg = str.encode(msg)
|
||||
|
||||
# # relay bytes to subproc over pipe
|
||||
# # await proc.stdin.send_all(bmsg)
|
||||
|
||||
# if bmsg in _pdb_exit_patterns:
|
||||
# log.info("Closing stdin hijack")
|
||||
# break
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def _acquire_debug_lock(uid: Tuple[str, str]) -> AsyncIterator[None]:
|
||||
'''Acquire a actor local FIFO lock meant to mutex entry to a local
|
||||
debugger entry point to avoid tty clobbering a global root process.
|
||||
|
||||
'''
|
||||
global _debug_lock, _global_actor_in_debug, _no_remote_has_tty
|
||||
|
||||
task_name = trio.lowlevel.current_task().name
|
||||
|
||||
log.pdb(
|
||||
f"Attempting to acquire TTY lock, remote task: {task_name}:{uid}"
|
||||
)
|
||||
|
||||
we_acquired = False
|
||||
|
||||
if _no_remote_has_tty is None:
|
||||
# mark the tty lock as being in use so that the runtime
|
||||
# can try to avoid clobbering any connection from a child
|
||||
# that's currently relying on it.
|
||||
_no_remote_has_tty = trio.Event()
|
||||
|
||||
try:
|
||||
log.debug(
|
||||
f"entering lock checkpoint, remote task: {task_name}:{uid}"
|
||||
)
|
||||
we_acquired = True
|
||||
await _debug_lock.acquire()
|
||||
|
||||
# we_acquired = True
|
||||
|
||||
_global_actor_in_debug = uid
|
||||
log.debug(f"TTY lock acquired, remote task: {task_name}:{uid}")
|
||||
|
||||
# NOTE: critical section!
|
||||
# this yield is unshielded.
|
||||
# IF we received a cancel during the shielded lock
|
||||
# entry of some next-in-queue requesting task,
|
||||
# then the resumption here will result in that
|
||||
# Cancelled being raised to our caller below!
|
||||
|
||||
# in this case the finally below should trigger
|
||||
# and the surrounding calle side context should cancel
|
||||
# normally relaying back to the caller.
|
||||
|
||||
yield _debug_lock
|
||||
|
||||
finally:
|
||||
# if _global_actor_in_debug == uid:
|
||||
if we_acquired and _debug_lock.locked():
|
||||
_debug_lock.release()
|
||||
|
||||
# IFF there are no more requesting tasks queued up fire, the
|
||||
# "tty-unlocked" event thereby alerting any monitors of the lock that
|
||||
# we are now back in the "tty unlocked" state. This is basically
|
||||
# and edge triggered signal around an empty queue of sub-actor
|
||||
# tasks that may have tried to acquire the lock.
|
||||
stats = _debug_lock.statistics()
|
||||
if (
|
||||
not stats.owner
|
||||
):
|
||||
log.pdb(f"No more tasks waiting on tty lock! says {uid}")
|
||||
_no_remote_has_tty.set()
|
||||
_no_remote_has_tty = None
|
||||
|
||||
_global_actor_in_debug = None
|
||||
|
||||
log.debug(f"TTY lock released, remote task: {task_name}:{uid}")
|
||||
|
||||
|
||||
# @contextmanager
|
||||
# def _disable_sigint():
|
||||
# try:
|
||||
# # disable sigint handling while in debug
|
||||
# prior_handler = signal.signal(signal.SIGINT, handler)
|
||||
# yield
|
||||
# finally:
|
||||
# # restore SIGINT handling
|
||||
# signal.signal(signal.SIGINT, prior_handler)
|
||||
|
||||
|
||||
@tractor.context
|
||||
async def _hijack_stdin_relay_to_child(
|
||||
|
||||
ctx: tractor.Context,
|
||||
subactor_uid: Tuple[str, str]
|
||||
|
||||
) -> str:
|
||||
'''Hijack the tty in the root process of an actor tree such that
|
||||
the pdbpp debugger console can be allocated to a sub-actor for repl
|
||||
bossing.
|
||||
|
||||
'''
|
||||
task_name = trio.lowlevel.current_task().name
|
||||
|
||||
# TODO: when we get to true remote debugging
|
||||
# this will deliver stdin data?
|
||||
|
||||
log.debug(
|
||||
"Attempting to acquire TTY lock\n"
|
||||
f"remote task: {task_name}:{subactor_uid}"
|
||||
)
|
||||
|
||||
log.debug(f"Actor {subactor_uid} is WAITING on stdin hijack lock")
|
||||
|
||||
with trio.CancelScope(shield=True):
|
||||
|
||||
async with _acquire_debug_lock(subactor_uid):
|
||||
|
||||
# indicate to child that we've locked stdio
|
||||
await ctx.started('Locked')
|
||||
log.pdb( # type: ignore
|
||||
f"Actor {subactor_uid} ACQUIRED stdin hijack lock")
|
||||
|
||||
# wait for unlock pdb by child
|
||||
async with ctx.open_stream() as stream:
|
||||
try:
|
||||
assert await stream.receive() == 'pdb_unlock'
|
||||
|
||||
except trio.BrokenResourceError:
|
||||
# XXX: there may be a race with the portal teardown
|
||||
# with the calling actor which we can safely ignore
|
||||
# the alternative would be sending an ack message
|
||||
# and allowing the client to wait for us to teardown
|
||||
# first?
|
||||
pass
|
||||
|
||||
log.debug(
|
||||
f"TTY lock released, remote task: {task_name}:{subactor_uid}")
|
||||
|
||||
return "pdb_unlock_complete"
|
||||
|
||||
|
||||
async def _breakpoint(
|
||||
|
||||
debug_func,
|
||||
|
||||
# TODO:
|
||||
# shield: bool = False
|
||||
|
||||
) -> None:
|
||||
'''``tractor`` breakpoint entry for engaging pdb machinery
|
||||
in the root or a subactor.
|
||||
|
||||
'''
|
||||
# TODO: is it possible to debug a trio.Cancelled except block?
|
||||
# right now it seems like we can kinda do with by shielding
|
||||
# around ``tractor.breakpoint()`` but not if we move the shielded
|
||||
# scope here???
|
||||
# with trio.CancelScope(shield=shield):
|
||||
|
||||
actor = tractor.current_actor()
|
||||
task_name = trio.lowlevel.current_task().name
|
||||
|
||||
global _local_pdb_complete, _pdb_release_hook
|
||||
global _local_task_in_debug, _global_actor_in_debug
|
||||
|
||||
await trio.lowlevel.checkpoint()
|
||||
|
||||
async def wait_for_parent_stdin_hijack(
|
||||
task_status=trio.TASK_STATUS_IGNORED
|
||||
):
|
||||
global _debugger_request_cs
|
||||
|
||||
with trio.CancelScope(shield=True) as cs:
|
||||
_debugger_request_cs = cs
|
||||
|
||||
try:
|
||||
async with get_root() as portal:
|
||||
|
||||
log.error('got portal')
|
||||
|
||||
# this syncs to child's ``Context.started()`` call.
|
||||
async with portal.open_context(
|
||||
|
||||
tractor._debug._hijack_stdin_relay_to_child,
|
||||
subactor_uid=actor.uid,
|
||||
|
||||
) as (ctx, val):
|
||||
|
||||
log.error('locked context')
|
||||
assert val == 'Locked'
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
log.error('opened stream')
|
||||
# unblock local caller
|
||||
task_status.started()
|
||||
|
||||
try:
|
||||
await _local_pdb_complete.wait()
|
||||
|
||||
finally:
|
||||
# TODO: shielding currently can cause hangs...
|
||||
with trio.CancelScope(shield=True):
|
||||
await stream.send('pdb_unlock')
|
||||
|
||||
# sync with callee termination
|
||||
assert await ctx.result() == "pdb_unlock_complete"
|
||||
|
||||
except tractor.ContextCancelled:
|
||||
log.warning('Root actor cancelled debug lock')
|
||||
|
||||
finally:
|
||||
log.debug(f"Exiting debugger for actor {actor}")
|
||||
global _local_task_in_debug
|
||||
_local_task_in_debug = None
|
||||
log.debug(f"Child {actor} released parent stdio lock")
|
||||
|
||||
if not _local_pdb_complete or _local_pdb_complete.is_set():
|
||||
_local_pdb_complete = trio.Event()
|
||||
|
||||
# TODO: need a more robust check for the "root" actor
|
||||
if actor._parent_chan and not is_root_process():
|
||||
|
||||
if _local_task_in_debug:
|
||||
if _local_task_in_debug == task_name:
|
||||
# this task already has the lock and is
|
||||
# likely recurrently entering a breakpoint
|
||||
return
|
||||
|
||||
# if **this** actor is already in debug mode block here
|
||||
# waiting for the control to be released - this allows
|
||||
# support for recursive entries to `tractor.breakpoint()`
|
||||
log.warning(f"{actor.uid} already has a debug lock, waiting...")
|
||||
|
||||
await _local_pdb_complete.wait()
|
||||
await trio.sleep(0.1)
|
||||
|
||||
# mark local actor as "in debug mode" to avoid recurrent
|
||||
# entries/requests to the root process
|
||||
_local_task_in_debug = task_name
|
||||
|
||||
# assign unlock callback for debugger teardown hooks
|
||||
_pdb_release_hook = _local_pdb_complete.set
|
||||
|
||||
# this **must** be awaited by the caller and is done using the
|
||||
# root nursery so that the debugger can continue to run without
|
||||
# being restricted by the scope of a new task nursery.
|
||||
|
||||
# NOTE: if we want to debug a trio.Cancelled triggered exception
|
||||
# we have to figure out how to avoid having the service nursery
|
||||
# cancel on this task start? I *think* this works below?
|
||||
# actor._service_n.cancel_scope.shield = shield
|
||||
with trio.CancelScope(shield=True):
|
||||
await actor._service_n.start(wait_for_parent_stdin_hijack)
|
||||
|
||||
elif is_root_process():
|
||||
|
||||
# we also wait in the root-parent for any child that
|
||||
# may have the tty locked prior
|
||||
global _debug_lock
|
||||
|
||||
# TODO: wait, what about multiple root tasks acquiring
|
||||
# it though.. shrug?
|
||||
# root process (us) already has it; ignore
|
||||
if _global_actor_in_debug == actor.uid:
|
||||
return
|
||||
|
||||
# XXX: since we need to enter pdb synchronously below,
|
||||
# we have to release the lock manually from pdb completion
|
||||
# callbacks. Can't think of a nicer way then this atm.
|
||||
if _debug_lock.locked():
|
||||
log.warning(
|
||||
'Root actor attempting to acquire active tty lock'
|
||||
f' owned by {_global_actor_in_debug}')
|
||||
|
||||
await _debug_lock.acquire()
|
||||
|
||||
_global_actor_in_debug = actor.uid
|
||||
_local_task_in_debug = task_name
|
||||
|
||||
# the lock must be released on pdb completion
|
||||
def teardown():
|
||||
global _local_pdb_complete, _debug_lock
|
||||
global _global_actor_in_debug, _local_task_in_debug
|
||||
|
||||
_debug_lock.release()
|
||||
_global_actor_in_debug = None
|
||||
_local_task_in_debug = None
|
||||
_local_pdb_complete.set()
|
||||
|
||||
_pdb_release_hook = teardown
|
||||
|
||||
# block here one (at the appropriate frame *up* where
|
||||
# ``breakpoint()`` was awaited and begin handling stdio
|
||||
log.debug("Entering the synchronous world of pdb")
|
||||
debug_func(actor)
|
||||
|
||||
|
||||
def _mk_pdb():
|
||||
# XXX: setting these flags on the pdb instance are absolutely
|
||||
# critical to having ctrl-c work in the ``trio`` standard way!
|
||||
# The stdlib's pdb supports entering the current sync frame
|
||||
# on a SIGINT, with ``trio`` we pretty much never want this
|
||||
# and we did we can handle it in the ``tractor`` task runtime.
|
||||
|
||||
pdb = PdbwTeardown()
|
||||
pdb.allow_kbdint = True
|
||||
pdb.nosigint = True
|
||||
|
||||
return pdb
|
||||
|
||||
|
||||
def _set_trace(actor=None):
|
||||
pdb = _mk_pdb()
|
||||
|
||||
if actor is not None:
|
||||
log.pdb(f"\nAttaching pdb to actor: {actor.uid}\n") # type: ignore
|
||||
|
||||
pdb.set_trace(
|
||||
# start 2 levels up in user code
|
||||
frame=sys._getframe().f_back.f_back,
|
||||
)
|
||||
|
||||
else:
|
||||
# we entered the global ``breakpoint()`` built-in from sync code
|
||||
global _local_task_in_debug, _pdb_release_hook
|
||||
_local_task_in_debug = 'sync'
|
||||
|
||||
def nuttin():
|
||||
pass
|
||||
|
||||
_pdb_release_hook = nuttin
|
||||
|
||||
pdb.set_trace(
|
||||
# start 2 levels up in user code
|
||||
frame=sys._getframe().f_back,
|
||||
)
|
||||
|
||||
|
||||
breakpoint = partial(
|
||||
_breakpoint,
|
||||
_set_trace,
|
||||
)
|
||||
|
||||
|
||||
def _post_mortem(actor):
|
||||
log.pdb(f"\nAttaching to pdb in crashed actor: {actor.uid}\n")
|
||||
pdb = _mk_pdb()
|
||||
|
||||
# custom Pdb post-mortem entry
|
||||
pdbpp.xpm(Pdb=lambda: pdb)
|
||||
|
||||
|
||||
post_mortem = partial(
|
||||
_breakpoint,
|
||||
_post_mortem,
|
||||
)
|
||||
|
||||
|
||||
async def _maybe_enter_pm(err):
|
||||
if (
|
||||
_state.debug_mode()
|
||||
|
||||
# NOTE: don't enter debug mode recursively after quitting pdb
|
||||
# Iow, don't re-enter the repl if the `quit` command was issued
|
||||
# by the user.
|
||||
and not isinstance(err, bdb.BdbQuit)
|
||||
|
||||
# XXX: if the error is the likely result of runtime-wide
|
||||
# cancellation, we don't want to enter the debugger since
|
||||
# there's races between when the parent actor has killed all
|
||||
# comms and when the child tries to contact said parent to
|
||||
# acquire the tty lock.
|
||||
|
||||
# Really we just want to mostly avoid catching KBIs here so there
|
||||
# might be a simpler check we can do?
|
||||
and not is_multi_cancelled(err)
|
||||
):
|
||||
log.debug("Actor crashed, entering debug mode")
|
||||
await post_mortem()
|
||||
return True
|
||||
|
||||
else:
|
||||
return False
|
|
@ -1,314 +1,103 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Discovery (protocols) API for automatic addressing and location
|
||||
management of (service) actors.
|
||||
|
||||
Actor discovery API.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from typing import (
|
||||
AsyncGenerator,
|
||||
AsyncContextManager,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
from contextlib import asynccontextmanager as acm
|
||||
import typing
|
||||
from typing import Tuple, Optional, Union
|
||||
from async_generator import asynccontextmanager
|
||||
|
||||
from tractor.log import get_logger
|
||||
from .trionics import gather_contexts
|
||||
from ._ipc import _connect_chan, Channel
|
||||
from ._portal import (
|
||||
Portal,
|
||||
open_portal,
|
||||
LocalPortal,
|
||||
)
|
||||
from ._state import (
|
||||
current_actor,
|
||||
_runtime_vars,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._runtime import Actor
|
||||
from ._state import current_actor, _runtime_vars
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
@asynccontextmanager
|
||||
async def get_arbiter(
|
||||
|
||||
|
||||
@acm
|
||||
async def get_registry(
|
||||
host: str,
|
||||
port: int,
|
||||
|
||||
) -> AsyncGenerator[
|
||||
Portal | LocalPortal | None,
|
||||
None,
|
||||
]:
|
||||
) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]:
|
||||
'''Return a portal instance connected to a local or remote
|
||||
arbiter.
|
||||
'''
|
||||
Return a portal instance connected to a local or remote
|
||||
registry-service actor; if a connection already exists re-use it
|
||||
(presumably to call a `.register_actor()` registry runtime RPC
|
||||
ep).
|
||||
actor = current_actor()
|
||||
|
||||
'''
|
||||
actor: Actor = current_actor()
|
||||
if actor.is_registrar:
|
||||
if not actor:
|
||||
raise RuntimeError("No actor instance has been defined yet?")
|
||||
|
||||
if actor.is_arbiter:
|
||||
# we're already the arbiter
|
||||
# (likely a re-entrant call from the arbiter actor)
|
||||
yield LocalPortal(
|
||||
actor,
|
||||
Channel((host, port))
|
||||
)
|
||||
yield LocalPortal(actor, Channel((host, port)))
|
||||
else:
|
||||
# TODO: try to look pre-existing connection from
|
||||
# `Actor._peers` and use it instead?
|
||||
async with (
|
||||
_connect_chan(host, port) as chan,
|
||||
open_portal(chan) as regstr_ptl,
|
||||
):
|
||||
yield regstr_ptl
|
||||
async with _connect_chan(host, port) as chan:
|
||||
|
||||
async with open_portal(chan) as arb_portal:
|
||||
|
||||
yield arb_portal
|
||||
|
||||
|
||||
|
||||
@acm
|
||||
@asynccontextmanager
|
||||
async def get_root(
|
||||
**kwargs,
|
||||
) -> AsyncGenerator[Portal, None]:
|
||||
) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]:
|
||||
|
||||
# TODO: rename mailbox to `_root_maddr` when we finally
|
||||
# add and impl libp2p multi-addrs?
|
||||
host, port = _runtime_vars['_root_mailbox']
|
||||
assert host is not None
|
||||
|
||||
async with (
|
||||
_connect_chan(host, port) as chan,
|
||||
open_portal(chan, **kwargs) as portal,
|
||||
):
|
||||
yield portal
|
||||
async with _connect_chan(host, port) as chan:
|
||||
async with open_portal(chan, **kwargs) as portal:
|
||||
yield portal
|
||||
|
||||
|
||||
def get_peer_by_name(
|
||||
name: str,
|
||||
# uuid: str|None = None,
|
||||
|
||||
) -> list[Channel]|None: # at least 1
|
||||
'''
|
||||
Scan for an existing connection (set) to a named actor
|
||||
and return any channels from `Actor._peers`.
|
||||
|
||||
This is an optimization method over querying the registrar for
|
||||
the same info.
|
||||
|
||||
'''
|
||||
actor: Actor = current_actor()
|
||||
to_scan: dict[tuple, list[Channel]] = actor._peers.copy()
|
||||
pchan: Channel|None = actor._parent_chan
|
||||
if pchan:
|
||||
to_scan[pchan.uid].append(pchan)
|
||||
|
||||
for aid, chans in to_scan.items():
|
||||
_, peer_name = aid
|
||||
if name == peer_name:
|
||||
if not chans:
|
||||
log.warning(
|
||||
'No IPC chans for matching peer {peer_name}\n'
|
||||
)
|
||||
continue
|
||||
return chans
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@acm
|
||||
async def query_actor(
|
||||
name: str,
|
||||
regaddr: tuple[str, int]|None = None,
|
||||
|
||||
) -> AsyncGenerator[
|
||||
tuple[str, int]|None,
|
||||
None,
|
||||
]:
|
||||
'''
|
||||
Lookup a transport address (by actor name) via querying a registrar
|
||||
listening @ `regaddr`.
|
||||
|
||||
Returns the transport protocol (socket) address or `None` if no
|
||||
entry under that name exists.
|
||||
|
||||
'''
|
||||
actor: Actor = current_actor()
|
||||
if (
|
||||
name == 'registrar'
|
||||
and actor.is_registrar
|
||||
):
|
||||
raise RuntimeError(
|
||||
'The current actor IS the registry!?'
|
||||
)
|
||||
|
||||
maybe_peers: list[Channel]|None = get_peer_by_name(name)
|
||||
if maybe_peers:
|
||||
yield maybe_peers[0].raddr
|
||||
return
|
||||
|
||||
reg_portal: Portal
|
||||
regaddr: tuple[str, int] = regaddr or actor.reg_addrs[0]
|
||||
async with get_registry(*regaddr) as reg_portal:
|
||||
# TODO: return portals to all available actors - for now
|
||||
# just the last one that registered
|
||||
sockaddr: tuple[str, int] = await reg_portal.run_from_ns(
|
||||
'self',
|
||||
'find_actor',
|
||||
name=name,
|
||||
)
|
||||
yield sockaddr
|
||||
|
||||
|
||||
@acm
|
||||
async def maybe_open_portal(
|
||||
addr: tuple[str, int],
|
||||
name: str,
|
||||
):
|
||||
async with query_actor(
|
||||
name=name,
|
||||
regaddr=addr,
|
||||
) as sockaddr:
|
||||
pass
|
||||
|
||||
if sockaddr:
|
||||
async with _connect_chan(*sockaddr) as chan:
|
||||
async with open_portal(chan) as portal:
|
||||
yield portal
|
||||
else:
|
||||
yield None
|
||||
|
||||
|
||||
@acm
|
||||
@asynccontextmanager
|
||||
async def find_actor(
|
||||
name: str,
|
||||
registry_addrs: list[tuple[str, int]]|None = None,
|
||||
|
||||
only_first: bool = True,
|
||||
raise_on_none: bool = False,
|
||||
|
||||
) -> AsyncGenerator[
|
||||
Portal | list[Portal] | None,
|
||||
None,
|
||||
]:
|
||||
'''
|
||||
Ask the arbiter to find actor(s) by name.
|
||||
arbiter_sockaddr: Tuple[str, int] = None
|
||||
) -> typing.AsyncGenerator[Optional[Portal], None]:
|
||||
"""Ask the arbiter to find actor(s) by name.
|
||||
|
||||
Returns a connected portal to the last registered matching actor
|
||||
known to the arbiter.
|
||||
"""
|
||||
actor = current_actor()
|
||||
async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal:
|
||||
|
||||
'''
|
||||
# optimization path, use any pre-existing peer channel
|
||||
maybe_peers: list[Channel]|None = get_peer_by_name(name)
|
||||
if maybe_peers and only_first:
|
||||
async with open_portal(maybe_peers[0]) as peer_portal:
|
||||
yield peer_portal
|
||||
return
|
||||
sockaddr = await arb_portal.run_from_ns('self', 'find_actor', name=name)
|
||||
|
||||
if not registry_addrs:
|
||||
# XXX NOTE: make sure to dynamically read the value on
|
||||
# every call since something may change it globally (eg.
|
||||
# like in our discovery test suite)!
|
||||
from . import _root
|
||||
registry_addrs = (
|
||||
_runtime_vars['_registry_addrs']
|
||||
or
|
||||
_root._default_lo_addrs
|
||||
)
|
||||
# TODO: return portals to all available actors - for now just
|
||||
# the last one that registered
|
||||
if name == 'arbiter' and actor.is_arbiter:
|
||||
raise RuntimeError("The current actor is the arbiter")
|
||||
|
||||
maybe_portals: list[
|
||||
AsyncContextManager[tuple[str, int]]
|
||||
] = list(
|
||||
maybe_open_portal(
|
||||
addr=addr,
|
||||
name=name,
|
||||
)
|
||||
for addr in registry_addrs
|
||||
)
|
||||
portals: list[Portal]
|
||||
async with gather_contexts(
|
||||
mngrs=maybe_portals,
|
||||
) as portals:
|
||||
# log.runtime(
|
||||
# 'Gathered portals:\n'
|
||||
# f'{portals}'
|
||||
# )
|
||||
# NOTE: `gather_contexts()` will return a
|
||||
# `tuple[None, None, ..., None]` if no contact
|
||||
# can be made with any regstrar at any of the
|
||||
# N provided addrs!
|
||||
if not any(portals):
|
||||
if raise_on_none:
|
||||
raise RuntimeError(
|
||||
f'No actor "{name}" found registered @ {registry_addrs}'
|
||||
)
|
||||
yield None
|
||||
return
|
||||
|
||||
portals: list[Portal] = list(portals)
|
||||
if only_first:
|
||||
yield portals[0]
|
||||
elif sockaddr:
|
||||
|
||||
async with _connect_chan(*sockaddr) as chan:
|
||||
async with open_portal(chan) as portal:
|
||||
yield portal
|
||||
else:
|
||||
# TODO: currently this may return multiple portals
|
||||
# given there are multi-homed or multiple registrars..
|
||||
# SO, we probably need de-duplication logic?
|
||||
yield portals
|
||||
yield None
|
||||
|
||||
|
||||
@acm
|
||||
@asynccontextmanager
|
||||
async def wait_for_actor(
|
||||
name: str,
|
||||
registry_addr: tuple[str, int] | None = None,
|
||||
arbiter_sockaddr: Tuple[str, int] = None
|
||||
) -> typing.AsyncGenerator[Portal, None]:
|
||||
"""Wait on an actor to register with the arbiter.
|
||||
|
||||
) -> AsyncGenerator[Portal, None]:
|
||||
'''
|
||||
Wait on at least one peer actor to register `name` with the
|
||||
registrar, yield a `Portal to the first registree.
|
||||
A portal to the first registered actor is returned.
|
||||
"""
|
||||
actor = current_actor()
|
||||
|
||||
'''
|
||||
actor: Actor = current_actor()
|
||||
async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal:
|
||||
|
||||
# optimization path, use any pre-existing peer channel
|
||||
maybe_peers: list[Channel]|None = get_peer_by_name(name)
|
||||
if maybe_peers:
|
||||
async with open_portal(maybe_peers[0]) as peer_portal:
|
||||
yield peer_portal
|
||||
return
|
||||
|
||||
regaddr: tuple[str, int] = (
|
||||
registry_addr
|
||||
or
|
||||
actor.reg_addrs[0]
|
||||
)
|
||||
# TODO: use `.trionics.gather_contexts()` like
|
||||
# above in `find_actor()` as well?
|
||||
reg_portal: Portal
|
||||
async with get_registry(*regaddr) as reg_portal:
|
||||
sockaddrs = await reg_portal.run_from_ns(
|
||||
'self',
|
||||
'wait_for_actor',
|
||||
name=name,
|
||||
)
|
||||
|
||||
# get latest registered addr by default?
|
||||
# TODO: offer multi-portal yields in multi-homed case?
|
||||
sockaddr: tuple[str, int] = sockaddrs[-1]
|
||||
sockaddrs = await arb_portal.run_from_ns('self', 'wait_for_actor', name=name)
|
||||
sockaddr = sockaddrs[-1]
|
||||
|
||||
async with _connect_chan(*sockaddr) as chan:
|
||||
async with open_portal(chan) as portal:
|
||||
|
|
|
@ -1,286 +1,91 @@
|
|||
# tractor: structured concurrent "actors".
|
||||
# Copyright 2018-eternity Tyler Goodlet.
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Sub-process entry points.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from functools import partial
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
import textwrap
|
||||
from typing import (
|
||||
Any,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
from typing import Tuple, Any
|
||||
import signal
|
||||
|
||||
import trio # type: ignore
|
||||
|
||||
from .log import (
|
||||
get_console_log,
|
||||
get_logger,
|
||||
)
|
||||
from .log import get_console_log, get_logger
|
||||
from . import _state
|
||||
from .devx import _debug
|
||||
from .to_asyncio import run_as_asyncio_guest
|
||||
from ._runtime import (
|
||||
async_main,
|
||||
Actor,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._spawn import SpawnMethodKey
|
||||
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
|
||||
def _mp_main(
|
||||
|
||||
actor: Actor,
|
||||
accept_addrs: list[tuple[str, int]],
|
||||
forkserver_info: tuple[Any, Any, Any, Any, Any],
|
||||
start_method: SpawnMethodKey,
|
||||
parent_addr: tuple[str, int] | None = None,
|
||||
infect_asyncio: bool = False,
|
||||
|
||||
actor: 'Actor', # type: ignore
|
||||
accept_addr: Tuple[str, int],
|
||||
forkserver_info: Tuple[Any, Any, Any, Any, Any],
|
||||
start_method: str,
|
||||
parent_addr: Tuple[str, int] = None,
|
||||
) -> None:
|
||||
'''
|
||||
The routine called *after fork* which invokes a fresh `trio.run()`
|
||||
|
||||
'''
|
||||
"""The routine called *after fork* which invokes a fresh ``trio.run``
|
||||
"""
|
||||
actor._forkserver_info = forkserver_info
|
||||
from ._spawn import try_set_start_method
|
||||
spawn_ctx: mp.context.BaseContext = try_set_start_method(start_method)
|
||||
assert spawn_ctx
|
||||
spawn_ctx = try_set_start_method(start_method)
|
||||
|
||||
if actor.loglevel is not None:
|
||||
log.info(
|
||||
f'Setting loglevel for {actor.uid} to {actor.loglevel}'
|
||||
)
|
||||
f"Setting loglevel for {actor.uid} to {actor.loglevel}")
|
||||
get_console_log(actor.loglevel)
|
||||
|
||||
# TODO: use scops headers like for `trio` below!
|
||||
# (well after we libify it maybe..)
|
||||
assert spawn_ctx
|
||||
log.info(
|
||||
f'Started new {spawn_ctx.current_process()} for {actor.uid}'
|
||||
# f"parent_addr is {parent_addr}"
|
||||
)
|
||||
_state._current_actor: Actor = actor
|
||||
f"Started new {spawn_ctx.current_process()} for {actor.uid}")
|
||||
|
||||
_state._current_actor = actor
|
||||
|
||||
log.debug(f"parent_addr is {parent_addr}")
|
||||
trio_main = partial(
|
||||
async_main,
|
||||
actor=actor,
|
||||
accept_addrs=accept_addrs,
|
||||
actor._async_main,
|
||||
accept_addr,
|
||||
parent_addr=parent_addr
|
||||
)
|
||||
try:
|
||||
if infect_asyncio:
|
||||
actor._infected_aio = True
|
||||
run_as_asyncio_guest(trio_main)
|
||||
else:
|
||||
trio.run(trio_main)
|
||||
trio.run(trio_main)
|
||||
except KeyboardInterrupt:
|
||||
pass # handle it the same way trio does?
|
||||
|
||||
finally:
|
||||
log.info(
|
||||
f'`mp`-subactor {actor.uid} exited'
|
||||
)
|
||||
|
||||
|
||||
# TODO: move this func to some kinda `.devx._conc_lang.py` eventually
|
||||
# as we work out our multi-domain state-flow-syntax!
|
||||
def nest_from_op(
|
||||
input_op: str,
|
||||
#
|
||||
# ?TODO? an idea for a syntax to the state of concurrent systems
|
||||
# as a "3-domain" (execution, scope, storage) model and using
|
||||
# a minimal ascii/utf-8 operator-set.
|
||||
#
|
||||
# try not to take any of this seriously yet XD
|
||||
#
|
||||
# > is a "play operator" indicating (CPU bound)
|
||||
# exec/work/ops required at the "lowest level computing"
|
||||
#
|
||||
# execution primititves (tasks, threads, actors..) denote their
|
||||
# lifetime with '(' and ')' since parentheses normally are used
|
||||
# in many langs to denote function calls.
|
||||
#
|
||||
# starting = (
|
||||
# >( opening/starting; beginning of the thread-of-exec (toe?)
|
||||
# (> opened/started, (finished spawning toe)
|
||||
# |_<Task: blah blah..> repr of toe, in py these look like <objs>
|
||||
#
|
||||
# >) closing/exiting/stopping,
|
||||
# )> closed/exited/stopped,
|
||||
# |_<Task: blah blah..>
|
||||
# [OR <), )< ?? ]
|
||||
#
|
||||
# ending = )
|
||||
# >c) cancelling to close/exit
|
||||
# c)> cancelled (caused close), OR?
|
||||
# |_<Actor: ..>
|
||||
# OR maybe "<c)" which better indicates the cancel being
|
||||
# "delivered/returned" / returned" to LHS?
|
||||
#
|
||||
# >x) erroring to eventuall exit
|
||||
# x)> errored and terminated
|
||||
# |_<Actor: ...>
|
||||
#
|
||||
# scopes: supers/nurseries, IPC-ctxs, sessions, perms, etc.
|
||||
# >{ opening
|
||||
# {> opened
|
||||
# }> closed
|
||||
# >} closing
|
||||
#
|
||||
# storage: like queues, shm-buffers, files, etc..
|
||||
# >[ opening
|
||||
# [> opened
|
||||
# |_<FileObj: ..>
|
||||
#
|
||||
# >] closing
|
||||
# ]> closed
|
||||
|
||||
# IPC ops: channels, transports, msging
|
||||
# => req msg
|
||||
# <= resp msg
|
||||
# <=> 2-way streaming (of msgs)
|
||||
# <- recv 1 msg
|
||||
# -> send 1 msg
|
||||
#
|
||||
# TODO: still not sure on R/L-HS approach..?
|
||||
# =>( send-req to exec start (task, actor, thread..)
|
||||
# (<= recv-req to ^
|
||||
#
|
||||
# (<= recv-req ^
|
||||
# <=( recv-resp opened remote exec primitive
|
||||
# <=) recv-resp closed
|
||||
#
|
||||
# )<=c req to stop due to cancel
|
||||
# c=>) req to stop due to cancel
|
||||
#
|
||||
# =>{ recv-req to open
|
||||
# <={ send-status that it closed
|
||||
|
||||
tree_str: str,
|
||||
|
||||
# NOTE: so move back-from-the-left of the `input_op` by
|
||||
# this amount.
|
||||
back_from_op: int = 0,
|
||||
) -> str:
|
||||
'''
|
||||
Depth-increment the input (presumably hierarchy/supervision)
|
||||
input "tree string" below the provided `input_op` execution
|
||||
operator, so injecting a `"\n|_{input_op}\n"`and indenting the
|
||||
`tree_str` to nest content aligned with the ops last char.
|
||||
|
||||
'''
|
||||
return (
|
||||
f'{input_op}\n'
|
||||
+
|
||||
textwrap.indent(
|
||||
tree_str,
|
||||
prefix=(
|
||||
len(input_op)
|
||||
-
|
||||
(back_from_op + 1)
|
||||
) * ' ',
|
||||
)
|
||||
)
|
||||
log.info(f"Actor {actor.uid} terminated")
|
||||
|
||||
|
||||
def _trio_main(
|
||||
actor: Actor,
|
||||
actor: 'Actor', # type: ignore
|
||||
*,
|
||||
parent_addr: tuple[str, int] | None = None,
|
||||
infect_asyncio: bool = False,
|
||||
|
||||
parent_addr: Tuple[str, int] = None,
|
||||
) -> None:
|
||||
'''
|
||||
Entry point for a `trio_run_in_process` subactor.
|
||||
"""Entry point for a `trio_run_in_process` subactor.
|
||||
"""
|
||||
# Disable sigint handling in children;
|
||||
# we don't need it thanks to our cancellation machinery.
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
'''
|
||||
_debug.hide_runtime_frames()
|
||||
log.info(f"Started new trio process for {actor.uid}")
|
||||
|
||||
if actor.loglevel is not None:
|
||||
log.info(
|
||||
f"Setting loglevel for {actor.uid} to {actor.loglevel}")
|
||||
get_console_log(actor.loglevel)
|
||||
|
||||
log.info(
|
||||
f"Started {actor.uid}")
|
||||
|
||||
_state._current_actor = actor
|
||||
|
||||
log.debug(f"parent_addr is {parent_addr}")
|
||||
trio_main = partial(
|
||||
async_main,
|
||||
actor,
|
||||
actor._async_main,
|
||||
parent_addr=parent_addr
|
||||
)
|
||||
|
||||
if actor.loglevel is not None:
|
||||
get_console_log(actor.loglevel)
|
||||
actor_info: str = (
|
||||
f'|_{actor}\n'
|
||||
f' uid: {actor.uid}\n'
|
||||
f' pid: {os.getpid()}\n'
|
||||
f' parent_addr: {parent_addr}\n'
|
||||
f' loglevel: {actor.loglevel}\n'
|
||||
)
|
||||
log.info(
|
||||
'Starting new `trio` subactor:\n'
|
||||
+
|
||||
nest_from_op(
|
||||
input_op='>(', # see syntax ideas above
|
||||
tree_str=actor_info,
|
||||
back_from_op=2, # since "complete"
|
||||
)
|
||||
)
|
||||
logmeth = log.info
|
||||
exit_status: str = (
|
||||
'Subactor exited\n'
|
||||
+
|
||||
nest_from_op(
|
||||
input_op=')>', # like a "closed-to-play"-icon from super perspective
|
||||
tree_str=actor_info,
|
||||
back_from_op=1,
|
||||
)
|
||||
)
|
||||
try:
|
||||
if infect_asyncio:
|
||||
actor._infected_aio = True
|
||||
run_as_asyncio_guest(trio_main)
|
||||
else:
|
||||
trio.run(trio_main)
|
||||
|
||||
trio.run(trio_main)
|
||||
except KeyboardInterrupt:
|
||||
logmeth = log.cancel
|
||||
exit_status: str = (
|
||||
'Actor received KBI (aka an OS-cancel)\n'
|
||||
+
|
||||
nest_from_op(
|
||||
input_op='c)>', # closed due to cancel (see above)
|
||||
tree_str=actor_info,
|
||||
)
|
||||
)
|
||||
except BaseException as err:
|
||||
logmeth = log.error
|
||||
exit_status: str = (
|
||||
'Main actor task exited due to crash?\n'
|
||||
+
|
||||
nest_from_op(
|
||||
input_op='x)>', # closed by error
|
||||
tree_str=actor_info,
|
||||
)
|
||||
)
|
||||
# NOTE since we raise a tb will already be shown on the
|
||||
# console, thus we do NOT use `.exception()` above.
|
||||
raise err
|
||||
log.warning(f"Actor {actor.uid} received KBI")
|
||||
|
||||
finally:
|
||||
logmeth(exit_status)
|
||||
log.info(f"Actor {actor.uid} terminated")
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue