forked from goodboy/tractor
				
			Compare commits
	
		
			1 Commits 
		
	
	
		
			master
			...
			stream_clo
		
	
	| Author | SHA1 | Date | 
|---|---|---|
|  | 6f9ef99776 | 
|  | @ -1,131 +1,41 @@ | |||
| name: CI | ||||
| 
 | ||||
| on: | ||||
|   # any time someone pushes a new branch to origin | ||||
|   push: | ||||
| 
 | ||||
|   # Allows you to run this workflow manually from the Actions tab | ||||
|   workflow_dispatch: | ||||
| on: push | ||||
| 
 | ||||
| jobs: | ||||
| 
 | ||||
|   mypy: | ||||
|     name: 'MyPy' | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v2 | ||||
| 
 | ||||
|       - name: Setup python | ||||
|         uses: actions/setup-python@v2 | ||||
|         with: | ||||
|           python-version: '3.10' | ||||
| 
 | ||||
|           python-version: '3.8' | ||||
|       - name: Install dependencies | ||||
|         run: pip install -U . --upgrade-strategy eager -r requirements-test.txt | ||||
| 
 | ||||
|         run: pip install -U . --upgrade-strategy eager | ||||
|       - name: Run MyPy check | ||||
|         run: mypy tractor/ --ignore-missing-imports --show-traceback | ||||
|         run: mypy tractor/ --ignore-missing-imports | ||||
| 
 | ||||
|   # test that we can generate a software distribution and install it | ||||
|   # thus avoid missing file issues after packaging. | ||||
|   sdist-linux: | ||||
|     name: 'sdist' | ||||
|     runs-on: ubuntu-latest | ||||
| 
 | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v2 | ||||
| 
 | ||||
|       - name: Setup python | ||||
|         uses: actions/setup-python@v2 | ||||
|         with: | ||||
|           python-version: '3.10' | ||||
| 
 | ||||
|       - name: Build sdist | ||||
|         run: python setup.py sdist --formats=zip | ||||
| 
 | ||||
|       - name: Install sdist from .zips | ||||
|         run: python -m pip install dist/*.zip | ||||
| 
 | ||||
| 
 | ||||
|   testing-linux: | ||||
|   testing: | ||||
|     name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' | ||||
|     timeout-minutes: 10 | ||||
|     runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         os: [ubuntu-latest] | ||||
|         python: ['3.10'] | ||||
|         spawn_backend: [ | ||||
|           'trio', | ||||
|           'mp_spawn', | ||||
|           'mp_forkserver', | ||||
|         ] | ||||
| 
 | ||||
|         os: [ubuntu-latest, windows-latest] | ||||
|         python: ['3.7', '3.8', '3.9'] | ||||
|         spawn_backend: ['trio', 'mp'] | ||||
|     steps: | ||||
| 
 | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v2 | ||||
| 
 | ||||
|       - name: Setup python | ||||
|         uses: actions/setup-python@v2 | ||||
|         with: | ||||
|           python-version: '${{ matrix.python }}' | ||||
| 
 | ||||
|       - name: Install dependencies | ||||
|         run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||
| 
 | ||||
|       - name: List dependencies | ||||
|         run: pip list | ||||
| 
 | ||||
|       - name: Run tests | ||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx | ||||
| 
 | ||||
|   # We skip 3.10 on windows for now due to not having any collabs to | ||||
|   # debug the CI failures. Anyone wanting to hack and solve them is very | ||||
|   # welcome, but our primary user base is not using that OS. | ||||
| 
 | ||||
|   # TODO: use job filtering to accomplish instead of repeated | ||||
|   # boilerplate as is above XD: | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idif | ||||
|   # testing-windows: | ||||
|   #   name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' | ||||
|   #   timeout-minutes: 12 | ||||
|   #   runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|   #   strategy: | ||||
|   #     fail-fast: false | ||||
|   #     matrix: | ||||
|   #       os: [windows-latest] | ||||
|   #       python: ['3.10'] | ||||
|   #       spawn_backend: ['trio', 'mp'] | ||||
| 
 | ||||
|   #   steps: | ||||
| 
 | ||||
|   #     - name: Checkout | ||||
|   #       uses: actions/checkout@v2 | ||||
| 
 | ||||
|   #     - name: Setup python | ||||
|   #       uses: actions/setup-python@v2 | ||||
|   #       with: | ||||
|   #         python-version: '${{ matrix.python }}' | ||||
| 
 | ||||
|   #     - name: Install dependencies | ||||
|   #       run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||
| 
 | ||||
|   #     # TODO: pretty sure this solves debugger deps-issues on windows, but it needs to | ||||
|   #     # be verified by someone with a native setup. | ||||
|   #     # - name: Force pyreadline3 | ||||
|   #     #   run: pip uninstall pyreadline; pip install -U pyreadline3 | ||||
| 
 | ||||
|   #     - name: List dependencies | ||||
|   #       run: pip list | ||||
| 
 | ||||
|   #     - name: Run tests | ||||
|   #       run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx | ||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs | ||||
|  |  | |||
							
								
								
									
										147
									
								
								LICENSE
								
								
								
								
							
							
						
						
									
										147
									
								
								LICENSE
								
								
								
								
							|  | @ -1,21 +1,23 @@ | |||
|                     GNU AFFERO GENERAL PUBLIC LICENSE | ||||
|                        Version 3, 19 November 2007 | ||||
|                     GNU GENERAL PUBLIC LICENSE | ||||
|                        Version 3, 29 June 2007 | ||||
| 
 | ||||
|  Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> | ||||
|  Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | ||||
|  Everyone is permitted to copy and distribute verbatim copies | ||||
|  of this license document, but changing it is not allowed. | ||||
| 
 | ||||
|                             Preamble | ||||
| 
 | ||||
|   The GNU Affero General Public License is a free, copyleft license for | ||||
| software and other kinds of works, specifically designed to ensure | ||||
| cooperation with the community in the case of network server software. | ||||
|   The GNU General Public License is a free, copyleft license for | ||||
| software and other kinds of works. | ||||
| 
 | ||||
|   The licenses for most software and other practical works are designed | ||||
| to take away your freedom to share and change the works.  By contrast, | ||||
| our General Public Licenses are intended to guarantee your freedom to | ||||
| the GNU General Public License is intended to guarantee your freedom to | ||||
| share and change all versions of a program--to make sure it remains free | ||||
| software for all its users. | ||||
| software for all its users.  We, the Free Software Foundation, use the | ||||
| GNU General Public License for most of our software; it applies also to | ||||
| any other work released this way by its authors.  You can apply it to | ||||
| your programs, too. | ||||
| 
 | ||||
|   When we speak of free software, we are referring to freedom, not | ||||
| price.  Our General Public Licenses are designed to make sure that you | ||||
|  | @ -24,34 +26,44 @@ them if you wish), that you receive source code or can get it if you | |||
| want it, that you can change the software or use pieces of it in new | ||||
| free programs, and that you know you can do these things. | ||||
| 
 | ||||
|   Developers that use our General Public Licenses protect your rights | ||||
| with two steps: (1) assert copyright on the software, and (2) offer | ||||
| you this License which gives you legal permission to copy, distribute | ||||
| and/or modify the software. | ||||
|   To protect your rights, we need to prevent others from denying you | ||||
| these rights or asking you to surrender the rights.  Therefore, you have | ||||
| certain responsibilities if you distribute copies of the software, or if | ||||
| you modify it: responsibilities to respect the freedom of others. | ||||
| 
 | ||||
|   A secondary benefit of defending all users' freedom is that | ||||
| improvements made in alternate versions of the program, if they | ||||
| receive widespread use, become available for other developers to | ||||
| incorporate.  Many developers of free software are heartened and | ||||
| encouraged by the resulting cooperation.  However, in the case of | ||||
| software used on network servers, this result may fail to come about. | ||||
| The GNU General Public License permits making a modified version and | ||||
| letting the public access it on a server without ever releasing its | ||||
| source code to the public. | ||||
|   For example, if you distribute copies of such a program, whether | ||||
| gratis or for a fee, you must pass on to the recipients the same | ||||
| freedoms that you received.  You must make sure that they, too, receive | ||||
| or can get the source code.  And you must show them these terms so they | ||||
| know their rights. | ||||
| 
 | ||||
|   The GNU Affero General Public License is designed specifically to | ||||
| ensure that, in such cases, the modified source code becomes available | ||||
| to the community.  It requires the operator of a network server to | ||||
| provide the source code of the modified version running there to the | ||||
| users of that server.  Therefore, public use of a modified version, on | ||||
| a publicly accessible server, gives the public access to the source | ||||
| code of the modified version. | ||||
|   Developers that use the GNU GPL protect your rights with two steps: | ||||
| (1) assert copyright on the software, and (2) offer you this License | ||||
| giving you legal permission to copy, distribute and/or modify it. | ||||
| 
 | ||||
|   An older license, called the Affero General Public License and | ||||
| published by Affero, was designed to accomplish similar goals.  This is | ||||
| a different license, not a version of the Affero GPL, but Affero has | ||||
| released a new version of the Affero GPL which permits relicensing under | ||||
| this license. | ||||
|   For the developers' and authors' protection, the GPL clearly explains | ||||
| that there is no warranty for this free software.  For both users' and | ||||
| authors' sake, the GPL requires that modified versions be marked as | ||||
| changed, so that their problems will not be attributed erroneously to | ||||
| authors of previous versions. | ||||
| 
 | ||||
|   Some devices are designed to deny users access to install or run | ||||
| modified versions of the software inside them, although the manufacturer | ||||
| can do so.  This is fundamentally incompatible with the aim of | ||||
| protecting users' freedom to change the software.  The systematic | ||||
| pattern of such abuse occurs in the area of products for individuals to | ||||
| use, which is precisely where it is most unacceptable.  Therefore, we | ||||
| have designed this version of the GPL to prohibit the practice for those | ||||
| products.  If such problems arise substantially in other domains, we | ||||
| stand ready to extend this provision to those domains in future versions | ||||
| of the GPL, as needed to protect the freedom of users. | ||||
| 
 | ||||
|   Finally, every program is threatened constantly by software patents. | ||||
| States should not allow patents to restrict development and use of | ||||
| software on general-purpose computers, but in those that do, we wish to | ||||
| avoid the special danger that patents applied to a free program could | ||||
| make it effectively proprietary.  To prevent this, the GPL assures that | ||||
| patents cannot be used to render the program non-free. | ||||
| 
 | ||||
|   The precise terms and conditions for copying, distribution and | ||||
| modification follow. | ||||
|  | @ -60,7 +72,7 @@ modification follow. | |||
| 
 | ||||
|   0. Definitions. | ||||
| 
 | ||||
|   "This License" refers to version 3 of the GNU Affero General Public License. | ||||
|   "This License" refers to version 3 of the GNU General Public License. | ||||
| 
 | ||||
|   "Copyright" also means copyright-like laws that apply to other kinds of | ||||
| works, such as semiconductor masks. | ||||
|  | @ -537,45 +549,35 @@ to collect a royalty for further conveying from those to whom you convey | |||
| the Program, the only way you could satisfy both those terms and this | ||||
| License would be to refrain entirely from conveying the Program. | ||||
| 
 | ||||
|   13. Remote Network Interaction; Use with the GNU General Public License. | ||||
| 
 | ||||
|   Notwithstanding any other provision of this License, if you modify the | ||||
| Program, your modified version must prominently offer all users | ||||
| interacting with it remotely through a computer network (if your version | ||||
| supports such interaction) an opportunity to receive the Corresponding | ||||
| Source of your version by providing access to the Corresponding Source | ||||
| from a network server at no charge, through some standard or customary | ||||
| means of facilitating copying of software.  This Corresponding Source | ||||
| shall include the Corresponding Source for any work covered by version 3 | ||||
| of the GNU General Public License that is incorporated pursuant to the | ||||
| following paragraph. | ||||
|   13. Use with the GNU Affero General Public License. | ||||
| 
 | ||||
|   Notwithstanding any other provision of this License, you have | ||||
| permission to link or combine any covered work with a work licensed | ||||
| under version 3 of the GNU General Public License into a single | ||||
| under version 3 of the GNU Affero General Public License into a single | ||||
| combined work, and to convey the resulting work.  The terms of this | ||||
| License will continue to apply to the part which is the covered work, | ||||
| but the work with which it is combined will remain governed by version | ||||
| 3 of the GNU General Public License. | ||||
| but the special requirements of the GNU Affero General Public License, | ||||
| section 13, concerning interaction through a network will apply to the | ||||
| combination as such. | ||||
| 
 | ||||
|   14. Revised Versions of this License. | ||||
| 
 | ||||
|   The Free Software Foundation may publish revised and/or new versions of | ||||
| the GNU Affero General Public License from time to time.  Such new versions | ||||
| will be similar in spirit to the present version, but may differ in detail to | ||||
| the GNU General Public License from time to time.  Such new versions will | ||||
| be similar in spirit to the present version, but may differ in detail to | ||||
| address new problems or concerns. | ||||
| 
 | ||||
|   Each version is given a distinguishing version number.  If the | ||||
| Program specifies that a certain numbered version of the GNU Affero General | ||||
| Program specifies that a certain numbered version of the GNU General | ||||
| Public License "or any later version" applies to it, you have the | ||||
| option of following the terms and conditions either of that numbered | ||||
| version or of any later version published by the Free Software | ||||
| Foundation.  If the Program does not specify a version number of the | ||||
| GNU Affero General Public License, you may choose any version ever published | ||||
| GNU General Public License, you may choose any version ever published | ||||
| by the Free Software Foundation. | ||||
| 
 | ||||
|   If the Program specifies that a proxy can decide which future | ||||
| versions of the GNU Affero General Public License can be used, that proxy's | ||||
| versions of the GNU General Public License can be used, that proxy's | ||||
| public statement of acceptance of a version permanently authorizes you | ||||
| to choose that version for the Program. | ||||
| 
 | ||||
|  | @ -633,29 +635,40 @@ the "copyright" line and a pointer to where the full notice is found. | |||
|     Copyright (C) <year>  <name of author> | ||||
| 
 | ||||
|     This program is free software: you can redistribute it and/or modify | ||||
|     it under the terms of the GNU Affero General Public License as published by | ||||
|     it under the terms of the GNU General Public License as published by | ||||
|     the Free Software Foundation, either version 3 of the License, or | ||||
|     (at your option) any later version. | ||||
| 
 | ||||
|     This program is distributed in the hope that it will be useful, | ||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
|     GNU Affero General Public License for more details. | ||||
|     GNU General Public License for more details. | ||||
| 
 | ||||
|     You should have received a copy of the GNU Affero General Public License | ||||
|     along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
|     You should have received a copy of the GNU General Public License | ||||
|     along with this program.  If not, see <http://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| Also add information on how to contact you by electronic and paper mail. | ||||
| 
 | ||||
|   If your software can interact with users remotely through a computer | ||||
| network, you should also make sure that it provides a way for users to | ||||
| get its source.  For example, if your program is a web application, its | ||||
| interface could display a "Source" link that leads users to an archive | ||||
| of the code.  There are many ways you could offer source, and different | ||||
| solutions will be better for different programs; see section 13 for the | ||||
| specific requirements. | ||||
|   If the program does terminal interaction, make it output a short | ||||
| notice like this when it starts in an interactive mode: | ||||
| 
 | ||||
|     <program>  Copyright (C) <year>  <name of author> | ||||
|     This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. | ||||
|     This is free software, and you are welcome to redistribute it | ||||
|     under certain conditions; type `show c' for details. | ||||
| 
 | ||||
| The hypothetical commands `show w' and `show c' should show the appropriate | ||||
| parts of the General Public License.  Of course, your program's commands | ||||
| might be different; for a GUI interface, you would use an "about box". | ||||
| 
 | ||||
|   You should also get your employer (if you work as a programmer) or school, | ||||
| if any, to sign a "copyright disclaimer" for the program, if necessary. | ||||
| For more information on this, and how to apply and follow the GNU AGPL, see | ||||
| <https://www.gnu.org/licenses/>. | ||||
| For more information on this, and how to apply and follow the GNU GPL, see | ||||
| <http://www.gnu.org/licenses/>. | ||||
| 
 | ||||
|   The GNU General Public License does not permit incorporating your program | ||||
| into proprietary programs.  If your program is a subroutine library, you | ||||
| may consider it more useful to permit linking proprietary applications with | ||||
| the library.  If this is what you want to do, use the GNU Lesser General | ||||
| Public License instead of this License.  But first, please read | ||||
| <http://www.gnu.org/philosophy/why-not-lgpl.html>. | ||||
|  |  | |||
|  | @ -1,2 +0,0 @@ | |||
| # https://packaging.python.org/en/latest/guides/using-manifest-in/#using-manifest-in | ||||
| include docs/README.rst | ||||
							
								
								
									
										528
									
								
								NEWS.rst
								
								
								
								
							
							
						
						
									
										528
									
								
								NEWS.rst
								
								
								
								
							|  | @ -1,528 +0,0 @@ | |||
| ========= | ||||
| Changelog | ||||
| ========= | ||||
| 
 | ||||
| .. towncrier release notes start | ||||
| 
 | ||||
| tractor 0.1.0a5 (2022-08-03) | ||||
| ============================ | ||||
| 
 | ||||
| This is our final release supporting Python 3.9 since we will be moving | ||||
| internals to the new `match:` syntax from 3.10 going forward and | ||||
| further, we have officially dropped usage of the `msgpack` library and | ||||
| happily adopted `msgspec`. | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| 
 | ||||
| - `#165 <https://github.com/goodboy/tractor/issues/165>`_: Add SIGINT | ||||
|   protection to our `pdbpp` based debugger subystem such that for | ||||
|   (single-depth) actor trees in debug mode we ignore interrupts in any | ||||
|   actor currently holding the TTY lock thus avoiding clobbering IPC | ||||
|   connections and/or task and process state when working in the REPL. | ||||
| 
 | ||||
|   As a big note currently so called "nested" actor trees (trees with | ||||
|   actors having more then one parent/ancestor) are not fully supported | ||||
|   since we don't yet have a mechanism to relay the debug mode knowledge | ||||
|   "up" the actor tree (for eg. when handling a crash in a leaf actor). | ||||
|   As such currently there is a set of tests and known scenarios which will | ||||
|   result in process cloberring by the zombie repaing machinery and these | ||||
|   have been documented in https://github.com/goodboy/tractor/issues/320. | ||||
| 
 | ||||
|   The implementation details include: | ||||
| 
 | ||||
|   - utilizing a custom SIGINT handler which we apply whenever an actor's | ||||
|     runtime enters the debug machinery, which we also make sure the | ||||
|     stdlib's `pdb` configuration doesn't override (which it does by | ||||
|     default without special instance config). | ||||
|   - litter the runtime with `maybe_wait_for_debugger()` mostly in spots | ||||
|     where the root actor should block before doing embedded nursery | ||||
|     teardown ops which both cancel potential-children-in-deubg as well | ||||
|     as eventually trigger zombie reaping machinery. | ||||
|   - hardening of the TTY locking semantics/API both in terms of IPC | ||||
|     terminations and cancellation and lock release determinism from | ||||
|     sync debugger instance methods. | ||||
|   - factoring of locking infrastructure into a new `._debug.Lock` global | ||||
|     which encapsulates all details of the ``trio`` sync primitives and | ||||
|     task/actor uid management and tracking. | ||||
| 
 | ||||
|   We also add `ctrl-c` cases throughout the test suite though these are | ||||
|   disabled for py3.9 (`pdbpp` UX differences that don't seem worth | ||||
|   compensating for, especially since this will be our last 3.9 supported | ||||
|   release) and there are a slew of marked cases that aren't expected to | ||||
|   work in CI more generally (as mentioned in the "nested" tree note | ||||
|   above) despite seemingly working  when run manually on linux. | ||||
| 
 | ||||
| - `#304 <https://github.com/goodboy/tractor/issues/304>`_: Add a new | ||||
|   ``to_asyncio.LinkedTaskChannel.subscribe()`` which gives task-oriented | ||||
|   broadcast functionality semantically equivalent to | ||||
|   ``tractor.MsgStream.subscribe()`` this makes it possible for multiple | ||||
|   ``trio``-side tasks to consume ``asyncio``-side task msgs in tandem. | ||||
| 
 | ||||
|   Further Improvements to the test suite were added in this patch set | ||||
|   including a new scenario test for a sub-actor managed "service nursery" | ||||
|   (implementing the basics of a "service manager") including use of | ||||
|   *infected asyncio* mode. Further we added a lower level | ||||
|   ``test_trioisms.py`` to start to track issues we need to work around in | ||||
|   ``trio`` itself which in this case included a bug we were trying to | ||||
|   solve related to https://github.com/python-trio/trio/issues/2258. | ||||
| 
 | ||||
| 
 | ||||
| Bug Fixes | ||||
| --------- | ||||
| 
 | ||||
| - `#318 <https://github.com/goodboy/tractor/issues/318>`_: Fix | ||||
|   a previously undetected ``trio``-``asyncio`` task lifetime linking | ||||
|   issue with the ``to_asyncio.open_channel_from()`` api where both sides | ||||
|   where not properly waiting/signalling termination and it was possible | ||||
|   for ``asyncio``-side errors to not propagate due to a race condition. | ||||
| 
 | ||||
|   The implementation fix summary is: | ||||
|   - add state to signal the end of the ``trio`` side task to be | ||||
|     read by the ``asyncio`` side and always cancel any ongoing | ||||
|     task in such cases. | ||||
|   - always wait on the ``asyncio`` task termination from the ``trio`` | ||||
|     side on error before maybe raising said error. | ||||
|   - always close the ``trio`` mem chan on exit to ensure the other | ||||
|     side can detect it and follow. | ||||
| 
 | ||||
| 
 | ||||
| Trivial/Internal Changes | ||||
| ------------------------ | ||||
| 
 | ||||
| - `#248 <https://github.com/goodboy/tractor/issues/248>`_: Adjust the | ||||
|   `tractor._spawn.soft_wait()` strategy to avoid sending an actor cancel | ||||
|   request (via `Portal.cancel_actor()`) if either the child process is | ||||
|   detected as having terminated or the IPC channel is detected to be | ||||
|   closed. | ||||
| 
 | ||||
|   This ensures (even) more deterministic inter-actor cancellation by | ||||
|   avoiding the timeout condition where possible when a whild never | ||||
|   sucessfully spawned, crashed, or became un-contactable over IPC. | ||||
| 
 | ||||
| - `#295 <https://github.com/goodboy/tractor/issues/295>`_: Add an | ||||
|   experimental ``tractor.msg.NamespacePath`` type for passing Python | ||||
|   objects by "reference" through a ``str``-subtype message and using the | ||||
|   new ``pkgutil.resolve_name()`` for reference loading. | ||||
| 
 | ||||
| - `#298 <https://github.com/goodboy/tractor/issues/298>`_: Add a new | ||||
|   `tractor.experimental` subpackage for staging new high level APIs and | ||||
|   subystems that we might eventually make built-ins. | ||||
| 
 | ||||
| - `#300 <https://github.com/goodboy/tractor/issues/300>`_: Update to and | ||||
|   pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) both of which | ||||
|   required adjustments for backwards imcompatible API tweaks. | ||||
| 
 | ||||
| - `#303 <https://github.com/goodboy/tractor/issues/303>`_: Fence off | ||||
|   ``multiprocessing`` imports until absolutely necessary in an effort to | ||||
|   avoid "resource tracker" spawning side effects that seem to have | ||||
|   varying degrees of unreliability per Python release. Port to new | ||||
|   ``msgspec.DecodeError``. | ||||
| 
 | ||||
| - `#305 <https://github.com/goodboy/tractor/issues/305>`_: Add | ||||
|   ``tractor.query_actor()`` an addr looker-upper which doesn't deliver | ||||
|   a ``Portal`` instance and instead just a socket address ``tuple``. | ||||
| 
 | ||||
|   Sometimes it's handy to just have a simple way to figure out if | ||||
|   a "service" actor is up, so add this discovery helper for that. We'll | ||||
|   prolly just leave it undocumented for now until we figure out | ||||
|   a longer-term/better discovery system. | ||||
| 
 | ||||
| - `#316 <https://github.com/goodboy/tractor/issues/316>`_: Run windows | ||||
|   CI jobs on python 3.10 after some hacks for ``pdbpp`` dependency | ||||
|   issues. | ||||
| 
 | ||||
|   Issue was to do with the now deprecated `pyreadline` project which | ||||
|   should be changed over to `pyreadline3`. | ||||
| 
 | ||||
| - `#317 <https://github.com/goodboy/tractor/issues/317>`_: Drop use of | ||||
|   the ``msgpack`` package and instead move fully to the ``msgspec`` | ||||
|   codec library. | ||||
| 
 | ||||
|   We've now used ``msgspec`` extensively in production and there's no | ||||
|   reason to not use it as default. Further this change preps us for the up | ||||
|   and coming typed messaging semantics (#196), dialog-unprotocol system | ||||
|   (#297), and caps-based messaging-protocols (#299) planned before our | ||||
|   first beta. | ||||
| 
 | ||||
| 
 | ||||
| tractor 0.1.0a4 (2021-12-18) | ||||
| ============================ | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| - `#275 <https://github.com/goodboy/tractor/issues/275>`_: Re-license | ||||
|   code base under AGPLv3. Also see `#274 | ||||
|   <https://github.com/goodboy/tractor/pull/274>`_ for majority | ||||
|   contributor consensus on this decision. | ||||
| 
 | ||||
| - `#121 <https://github.com/goodboy/tractor/issues/121>`_: Add | ||||
|   "infected ``asyncio`` mode; a sub-system to spawn and control | ||||
|   ``asyncio`` actors using ``trio``'s guest-mode. | ||||
| 
 | ||||
|   This gets us the following very interesting functionality: | ||||
| 
 | ||||
|   - ability to spawn an actor that has a process entry point of | ||||
|     ``asyncio.run()`` by passing ``infect_asyncio=True`` to | ||||
|     ``Portal.start_actor()`` (and friends). | ||||
|   - the ``asyncio`` actor embeds ``trio`` using guest-mode and starts | ||||
|     a main ``trio`` task which runs the ``tractor.Actor._async_main()`` | ||||
|     entry point engages all the normal ``tractor`` runtime IPC/messaging | ||||
|     machinery; for all purposes the actor is now running normally on | ||||
|     a ``trio.run()``. | ||||
|   - the actor can now make one-to-one task spawning requests to the | ||||
|     underlying ``asyncio`` event loop using either of: | ||||
| 
 | ||||
|     * ``to_asyncio.run_task()`` to spawn and run an ``asyncio`` task to | ||||
|       completion and block until a return value is delivered. | ||||
|     * ``async with to_asyncio.open_channel_from():`` which spawns a task | ||||
|       and hands it a pair of "memory channels" to allow for bi-directional | ||||
|       streaming between the now SC-linked ``trio`` and ``asyncio`` tasks. | ||||
| 
 | ||||
|   The output from any call(s) to ``asyncio`` can be handled as normal in | ||||
|   ``trio``/``tractor`` task operation with the caveat of the overhead due | ||||
|   to guest-mode use. | ||||
| 
 | ||||
|   For more details see the `original PR | ||||
|   <https://github.com/goodboy/tractor/pull/121>`_ and `issue | ||||
|   <https://github.com/goodboy/tractor/issues/120>`_. | ||||
| 
 | ||||
| - `#257 <https://github.com/goodboy/tractor/issues/257>`_: Add | ||||
|   ``trionics.maybe_open_context()`` an actor-scoped async multi-task | ||||
|   context manager resource caching API. | ||||
| 
 | ||||
|   Adds an SC-safe cacheing async context manager api that only enters on | ||||
|   the *first* task entry and only exits on the *last* task exit while in | ||||
|   between delivering the same cached value per input key. Keys can be | ||||
|   either an explicit ``key`` named arg provided by the user or a | ||||
|   hashable ``kwargs`` dict (will be converted to a ``list[tuple]``) which | ||||
|   is passed to the underlying manager function as input. | ||||
| 
 | ||||
| - `#261 <https://github.com/goodboy/tractor/issues/261>`_: Add | ||||
|   cross-actor-task ``Context`` oriented error relay, a new stream | ||||
|   overrun error-signal ``StreamOverrun``, and support disabling | ||||
|   ``MsgStream`` backpressure as the default before a stream is opened or | ||||
|   by choice of the user. | ||||
| 
 | ||||
|   We added stricter semantics around ``tractor.Context.open_stream():`` | ||||
|   particularly to do with streams which are only opened at one end. | ||||
|   Previously, if only one end opened a stream there was no way for that | ||||
|   sender to know if msgs are being received until first, the feeder mem | ||||
|   chan on the receiver side hit a backpressure state and then that | ||||
|   condition delayed its msg loop processing task to eventually create | ||||
|   backpressure on the associated IPC transport. This is non-ideal in the | ||||
|   case where the receiver side never opened a stream by mistake since it | ||||
|   results in silent block of the sender and no adherence to the underlying | ||||
|   mem chan buffer size settings (which is still unsolved btw). | ||||
| 
 | ||||
|   To solve this we add non-backpressure style message pushing inside | ||||
|   ``Actor._push_result()`` by default and only use the backpressure | ||||
|   ``trio.MemorySendChannel.send()`` call **iff** the local end of the | ||||
|   context has entered ``Context.open_stream():``. This way if the stream | ||||
|   was never opened but the mem chan is overrun, we relay back to the | ||||
|   sender a (new exception) ``SteamOverrun`` error which is raised in the | ||||
|   sender's scope with a special error message about the stream never | ||||
|   having been opened. Further, this behaviour (non-backpressure style | ||||
|   where senders can expect an error on overruns) can now be enabled with | ||||
|   ``.open_stream(backpressure=False)`` and the underlying mem chan size | ||||
|   can be specified with a kwarg ``msg_buffer_size: int``. | ||||
| 
 | ||||
|   Further bug fixes and enhancements in this changeset include: | ||||
| 
 | ||||
|   - fix a race we were ignoring where if the callee task opened a context | ||||
|     it could enter ``Context.open_stream()`` before calling | ||||
|     ``.started()``. | ||||
|   - Disallow calling ``Context.started()`` more then once. | ||||
|   - Enable ``Context`` linked tasks error relaying via the new | ||||
|     ``Context._maybe_raise_from_remote_msg()`` which (for now) uses | ||||
|     a simple ``trio.Nursery.start_soon()`` to raise the error via closure | ||||
|     in the local scope. | ||||
| 
 | ||||
| - `#267 <https://github.com/goodboy/tractor/issues/267>`_: This | ||||
|   (finally) adds fully acknowledged remote cancellation messaging | ||||
|   support for both explicit ``Portal.cancel_actor()`` calls as well as | ||||
|   when there is a "runtime-wide" cancellations (eg. during KBI or | ||||
|   general actor nursery exception handling which causes a full actor | ||||
|   "crash"/termination). | ||||
| 
 | ||||
|   You can think of this as the most ideal case in 2-generals where the | ||||
|   actor requesting the cancel of its child is able to always receive back | ||||
|   the ACK to that request. This leads to a more deterministic shutdown of | ||||
|   the child where the parent is able to wait for the child to fully | ||||
|   respond to the request. On a localhost setup, where the parent can | ||||
|   monitor the state of the child through process or other OS APIs instead | ||||
|   of solely through IPC messaging, the parent can know whether or not the | ||||
|   child decided to cancel with more certainty. In the case of separate | ||||
|   hosts, we still rely on a simple timeout approach until such a time | ||||
|   where we prefer to get "fancier". | ||||
| 
 | ||||
| - `#271 <https://github.com/goodboy/tractor/issues/271>`_: Add a per | ||||
|   actor ``debug_mode: bool`` control to our nursery. | ||||
| 
 | ||||
|   This allows spawning actors via ``ActorNursery.start_actor()`` (and | ||||
|   other dependent methods) with a ``debug_mode=True`` flag much like | ||||
|   ``tractor.open_nursery():`` such that per process crash handling | ||||
|   can be toggled for cases where a user does not need/want all child actors | ||||
|   to drop into the debugger on error. This is often useful when you have | ||||
|   actor-tasks which are expected to error often (and be re-run) but want | ||||
|   to specifically interact with some (problematic) child. | ||||
| 
 | ||||
| 
 | ||||
| Bugfixes | ||||
| -------- | ||||
| 
 | ||||
| - `#239 <https://github.com/goodboy/tractor/issues/239>`_: Fix | ||||
|   keyboard interrupt handling in ``Portal.open_context()`` blocks. | ||||
| 
 | ||||
|   Previously this was not triggering cancellation of the remote task | ||||
|   context and could result in hangs if a stream was also opened. This | ||||
|   fix is to accept `BaseException` since it is likely any other top | ||||
|   level exception other then KBI (even though not expected) should also | ||||
|   get this result. | ||||
| 
 | ||||
| - `#264 <https://github.com/goodboy/tractor/issues/264>`_: Fix | ||||
|   ``Portal.run_in_actor()`` returns ``None`` result. | ||||
| 
 | ||||
|   ``None`` was being used as the cached result flag and obviously breaks | ||||
|   on a ``None`` returned from the remote target task. This would cause an | ||||
|   infinite hang if user code ever called ``Portal.result()`` *before* the | ||||
|   nursery exit. The simple fix is to use the *return message* as the | ||||
|   initial "no-result-received-yet" flag value and, once received, the | ||||
|   return value is read from the message to avoid the cache logic error. | ||||
| 
 | ||||
| - `#266 <https://github.com/goodboy/tractor/issues/266>`_: Fix | ||||
|   graceful cancellation of daemon actors | ||||
| 
 | ||||
|   Previously, his was a bug where if the soft wait on a sub-process (the | ||||
|   ``await .proc.wait()``) in the reaper task teardown was cancelled we | ||||
|   would fail over to the hard reaping sequence (meant for culling off any | ||||
|   potential zombies via system kill signals). The hard reap has a timeout | ||||
|   of 3s (currently though in theory we could make it shorter?) before | ||||
|   system signalling kicks in. This means that any daemon actor still | ||||
|   running during nursery exit would get hard reaped (3s later) instead of | ||||
|   cancelled via IPC message. Now we catch the ``trio.Cancelled``, call | ||||
|   ``Portal.cancel_actor()`` on the daemon and expect the child to | ||||
|   self-terminate after the runtime cancels and shuts down the process. | ||||
| 
 | ||||
| - `#278 <https://github.com/goodboy/tractor/issues/278>`_: Repair | ||||
|   inter-actor stream closure semantics to work correctly with | ||||
|   ``tractor.trionics.BroadcastReceiver`` task fan out usage. | ||||
| 
 | ||||
|   A set of previously unknown bugs discovered in `#257 | ||||
|   <https://github.com/goodboy/tractor/pull/257>`_ let graceful stream | ||||
|   closure result in hanging consumer tasks that use the broadcast APIs. | ||||
|   This adds better internal closure state tracking to the broadcast | ||||
|   receiver and message stream APIs and in particular ensures that when an | ||||
|   underlying stream/receive-channel (a broadcast receiver is receiving | ||||
|   from) is closed, all consumer tasks waiting on that underlying channel | ||||
|   are woken so they can receive the ``trio.EndOfChannel`` signal and | ||||
|   promptly terminate. | ||||
| 
 | ||||
| 
 | ||||
| tractor 0.1.0a3 (2021-11-02) | ||||
| ============================ | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| 
 | ||||
| - Switch to using the ``trio`` process spawner by default on windows. (#166) | ||||
| 
 | ||||
|   This gets windows users debugger support (manually tested) and in | ||||
|   general a more resilient (nested) actor tree implementation. | ||||
| 
 | ||||
| - Add optional `msgspec <https://jcristharif.com/msgspec/>`_ support | ||||
|   as an alernative, faster MessagePack codec. (#214) | ||||
| 
 | ||||
|   Provides us with a path toward supporting typed IPC message contracts. Further, | ||||
|   ``msgspec`` structs may be a valid tool to start for formalizing our | ||||
|   "SC dialog un-protocol" messages as described in `#36 | ||||
|   <https://github.com/goodboy/tractor/issues/36>`_. | ||||
| 
 | ||||
| - Introduce a new ``tractor.trionics`` `sub-package`_ that exposes | ||||
|   a selection of our relevant high(er) level trio primitives and | ||||
|   goodies. (#241) | ||||
| 
 | ||||
|   At outset we offer a ``gather_contexts()`` context manager for | ||||
|   concurrently entering a sequence of async context managers (much like | ||||
|   a version of ``asyncio.gather()`` but for context managers) and use it | ||||
|   in a new ``tractor.open_actor_cluster()`` manager-helper that can be | ||||
|   entered to concurrently spawn a flat actor pool. We also now publicly | ||||
|   expose our "broadcast channel" APIs (``open_broadcast_receiver()``) | ||||
|   from here. | ||||
| 
 | ||||
| .. _sub-package: ../tractor/trionics | ||||
| 
 | ||||
| - Change the core message loop to handle task and actor-runtime cancel | ||||
|   requests immediately instead of scheduling them as is done for rpc-task | ||||
|   requests. (#245) | ||||
| 
 | ||||
|   In order to obtain more reliable teardown mechanics for (complex) actor | ||||
|   trees it's important that we specially treat cancel requests as having | ||||
|   higher priority. Previously, it was possible that task cancel requests | ||||
|   could actually also themselves be cancelled if a "actor-runtime" cancel | ||||
|   request was received (can happen during messy multi actor crashes that | ||||
|   propagate). Instead cancels now block the msg loop until serviced and | ||||
|   a response is relayed back to the requester. This also allows for | ||||
|   improved debugger support since we have determinism guarantees about | ||||
|   which processes must wait before hard killing their children. | ||||
| 
 | ||||
| - (`#248 <https://github.com/goodboy/tractor/pull/248>`_) Drop Python | ||||
|   3.8 support in favour of rolling with two latest releases for the time | ||||
|   being. | ||||
| 
 | ||||
| 
 | ||||
| Misc | ||||
| ---- | ||||
| 
 | ||||
| - (`#243 <https://github.com/goodboy/tractor/pull/243>`_) add a distinct | ||||
|   ``'CANCEL'`` log level to allow the runtime to emit details about | ||||
|   cancellation machinery statuses. | ||||
| 
 | ||||
| 
 | ||||
| tractor 0.1.0a2 (2021-09-07) | ||||
| ============================ | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| 
 | ||||
| - Add `tokio-style broadcast channels | ||||
|   <https://docs.rs/tokio/1.11.0/tokio/sync/broadcast/index.html>`_ as | ||||
|   a solution for `#204 <https://github.com/goodboy/tractor/pull/204>`_ and | ||||
|   discussed thoroughly in `trio/#987 | ||||
|   <https://github.com/python-trio/trio/issues/987>`_. | ||||
| 
 | ||||
|   This gives us local task broadcast functionality using a new | ||||
|   ``BroadcastReceiver`` type which can wrap ``trio.ReceiveChannel``  and | ||||
|   provide fan-out copies of a stream of data to every subscribed consumer. | ||||
|   We use this new machinery to provide a ``ReceiveMsgStream.subscribe()`` | ||||
|   async context manager which can be used by actor-local concumers tasks | ||||
|   to easily pull from a shared and dynamic IPC stream. (`#229 | ||||
|   <https://github.com/goodboy/tractor/pull/229>`_) | ||||
| 
 | ||||
| 
 | ||||
| Bugfixes | ||||
| -------- | ||||
| 
 | ||||
| - Handle broken channel/stream faults where the root's tty lock is left | ||||
|   acquired by some child actor who went MIA and the root ends up hanging | ||||
|   indefinitely. (`#234 <https://github.com/goodboy/tractor/pull/234>`_) | ||||
| 
 | ||||
|   There's two parts here: we no longer shield wait on the lock and, | ||||
|   now always do our best to release the lock on the expected worst | ||||
|   case connection faults. | ||||
| 
 | ||||
| 
 | ||||
| Deprecations and Removals | ||||
| ------------------------- | ||||
| 
 | ||||
| - Drop stream "shielding" support which was originally added to sidestep | ||||
|   a cancelled call to ``.receive()`` | ||||
| 
 | ||||
|   In the original api design a stream instance was returned directly from | ||||
|   a call to ``Portal.run()`` and thus there was no "exit phase" to handle | ||||
|   cancellations and errors which would trigger implicit closure. Now that | ||||
|   we have said enter/exit semantics with ``Portal.open_stream_from()`` and | ||||
|   ``Context.open_stream()`` we can drop this implicit (and arguably | ||||
|   confusing) behavior. (`#230 <https://github.com/goodboy/tractor/pull/230>`_) | ||||
| 
 | ||||
| - Drop Python 3.7 support in preparation for supporting 3.9+ syntax. | ||||
|   (`#232 <https://github.com/goodboy/tractor/pull/232>`_) | ||||
| 
 | ||||
| 
 | ||||
| tractor 0.1.0a1 (2021-08-01) | ||||
| ============================ | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| - Updated our uni-directional streaming API (`#206 | ||||
|   <https://github.com/goodboy/tractor/pull/206>`_) to require a context | ||||
|   manager style ``async with Portal.open_stream_from(target) as stream:`` | ||||
|   which explicitly determines when to stop a stream in the calling (aka | ||||
|   portal opening) actor much like ``async_generator.aclosing()`` | ||||
|   enforcement. | ||||
| 
 | ||||
| - Improved the ``multiprocessing`` backend sub-actor reaping (`#208 | ||||
|   <https://github.com/goodboy/tractor/pull/208>`_) during actor nursery | ||||
|   exit, particularly during cancellation scenarios that previously might | ||||
|   result in hard to debug hangs. | ||||
| 
 | ||||
| - Added initial bi-directional streaming support in `#219 | ||||
|   <https://github.com/goodboy/tractor/pull/219>`_ with follow up debugger | ||||
|   improvements via `#220 <https://github.com/goodboy/tractor/pull/220>`_ | ||||
|   using the new ``tractor.Context`` cross-actor task syncing system. | ||||
|   The debugger upgrades add an edge triggered last-in-tty-lock semaphore | ||||
|   which allows the root process for a tree to avoid clobbering children | ||||
|   who have queued to acquire the ``pdb`` repl by waiting to cancel | ||||
|   sub-actors until the lock is known to be released **and** has no | ||||
|   pending waiters. | ||||
| 
 | ||||
| 
 | ||||
| Experiments and WIPs | ||||
| -------------------- | ||||
| - Initial optional ``msgspec`` serialization support in `#214 | ||||
|   <https://github.com/goodboy/tractor/pull/214>`_ which should hopefully | ||||
|   land by next release. | ||||
| 
 | ||||
| - Improved "infect ``asyncio``" cross-loop task cancellation and error | ||||
|   propagation by vastly simplifying the cross-loop-task streaming approach.  | ||||
|   We may end up just going with a use of ``anyio`` in the medium term to | ||||
|   avoid re-doing work done by their cross-event-loop portals.  See the | ||||
|   ``infect_asyncio`` for details. | ||||
| 
 | ||||
| 
 | ||||
| Improved Documentation | ||||
| ---------------------- | ||||
| - `Updated our readme <https://github.com/goodboy/tractor/pull/211>`_ to | ||||
|   include more (and better) `examples | ||||
|   <https://github.com/goodboy/tractor#run-a-func-in-a-process>`_ (with | ||||
|   matching multi-terminal process monitoring shell commands) as well as | ||||
|   added many more examples to the `repo set | ||||
|   <https://github.com/goodboy/tractor/tree/master/examples>`_. | ||||
| 
 | ||||
| - Added a readme `"actors under the hood" section | ||||
|   <https://github.com/goodboy/tractor#under-the-hood>`_ in an effort to | ||||
|   guard against suggestions for changing the API away from ``trio``'s | ||||
|   *tasks-as-functions* style. | ||||
| 
 | ||||
| - Moved to using the `sphinx book theme | ||||
|   <https://sphinx-book-theme.readthedocs.io/en/latest/index.html>`_ | ||||
|   though it needs some heavy tweaking and doesn't seem to show our logo | ||||
|   on rtd :( | ||||
| 
 | ||||
| 
 | ||||
| Trivial/Internal Changes | ||||
| ------------------------ | ||||
| - Added a new ``TransportClosed`` internal exception/signal (`#215 | ||||
|   <https://github.com/goodboy/tractor/pull/215>`_ for catching TCP | ||||
|   channel gentle closes instead of silently falling through the message | ||||
|   handler loop via an async generator ``return``. | ||||
| 
 | ||||
| 
 | ||||
| Deprecations and Removals | ||||
| ------------------------- | ||||
| - Dropped support for invoking sync functions (`#205 | ||||
|   <https://github.com/goodboy/tractor/pull/205>`_) in other | ||||
|   actors/processes since you can always wrap a sync function from an | ||||
|   async one.  Users can instead consider using ``trio-parallel`` which | ||||
|   is a project specifically geared for purely synchronous calls in | ||||
|   sub-processes. | ||||
| 
 | ||||
| - Deprecated our ``tractor.run()`` entrypoint `#197 | ||||
|   <https://github.com/goodboy/tractor/pull/197>`_; the runtime is now | ||||
|   either started implicitly in first actor nursery use or via an | ||||
|   explicit call to ``tractor.open_root_actor()``. Full removal of | ||||
|   ``tractor.run()`` will come by beta release. | ||||
| 
 | ||||
| 
 | ||||
| tractor 0.1.0a0 (2021-02-28) | ||||
| ============================ | ||||
| 
 | ||||
| .. | ||||
|     TODO: fill out more of the details of the initial feature set in some TLDR form | ||||
| 
 | ||||
| Summary | ||||
| ------- | ||||
| - ``trio`` based process spawner (using ``subprocess``) | ||||
| - initial multi-process debugging with ``pdb++`` | ||||
| - windows support using both ``trio`` and ``multiprocessing`` spawners | ||||
| - "portal" api for cross-process, structured concurrent, (streaming) IPC | ||||
							
								
								
									
										394
									
								
								docs/README.rst
								
								
								
								
							
							
						
						
									
										394
									
								
								docs/README.rst
								
								
								
								
							|  | @ -3,22 +3,18 @@ | |||
| |gh_actions| | ||||
| |docs| | ||||
| 
 | ||||
| ``tractor`` is a `structured concurrent`_, multi-processing_ runtime | ||||
| built on trio_. | ||||
| .. _actor model: https://en.wikipedia.org/wiki/Actor_model | ||||
| .. _trio: https://github.com/python-trio/trio | ||||
| .. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing | ||||
| .. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles | ||||
| .. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich | ||||
| .. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228 | ||||
| 
 | ||||
| Fundamentally, ``tractor`` gives you parallelism via | ||||
| ``trio``-"*actors*": independent Python processes (aka | ||||
| non-shared-memory threads) which maintain structured | ||||
| concurrency (SC) *end-to-end* inside a *supervision tree*. | ||||
| 
 | ||||
| Cross-process (and thus cross-host) SC is accomplished through the | ||||
| combined use of our "actor nurseries_" and an "SC-transitive IPC | ||||
| protocol" constructed on top of multiple Pythons each running a ``trio`` | ||||
| scheduled runtime - a call to ``trio.run()``. | ||||
| ``tractor`` is a `structured concurrent`_ "`actor model`_" built on trio_ and multi-processing_. | ||||
| 
 | ||||
| We believe the system adheres to the `3 axioms`_ of an "`actor model`_" | ||||
| but likely *does not* look like what *you* probably think an "actor | ||||
| model" looks like, and that's *intentional*. | ||||
| We pair structured concurrency and true multi-core parallelism with | ||||
| the aim of being the multi-processing framework *you always wanted*. | ||||
| 
 | ||||
| The first step to grok ``tractor`` is to get the basics of ``trio`` down. | ||||
| A great place to start is the `trio docs`_ and this `blog post`_. | ||||
|  | @ -27,17 +23,12 @@ A great place to start is the `trio docs`_ and this `blog post`_. | |||
| Features | ||||
| -------- | ||||
| - **It's just** a ``trio`` API | ||||
| - *Infinitely nesteable* process trees | ||||
| - Builtin IPC streaming APIs with task fan-out broadcasting | ||||
| - A "native" multi-core debugger REPL using `pdbp`_ (a fork & fix of | ||||
|   `pdb++`_ thanks to @mdmintz!) | ||||
| - Support for a swappable, OS specific, process spawning layer | ||||
| - A modular transport stack, allowing for custom serialization (eg. with | ||||
|   `msgspec`_), communications protocols, and environment specific IPC | ||||
|   primitives | ||||
| - Support for spawning process-level-SC, inter-loop one-to-one-task oriented | ||||
|   ``asyncio`` actors via "infected ``asyncio``" mode | ||||
| - `structured chadcurrency`_ from the ground up | ||||
| - Infinitely nesteable process trees | ||||
| - Built-in APIs for inter-process streaming | ||||
| - A (first ever?) "native" multi-core debugger for Python using `pdb++`_ | ||||
| - Support for multiple process spawning backends | ||||
| - A modular transport layer, allowing for custom serialization, | ||||
|   communications protocols, and environment specific IPC primitives | ||||
| 
 | ||||
| 
 | ||||
| Run a func in a process | ||||
|  | @ -138,8 +129,7 @@ Zombie safe: self-destruct a process tree | |||
|             print('This process tree will self-destruct in 1 sec...') | ||||
|             await trio.sleep(1) | ||||
| 
 | ||||
|             # raise an error in root actor/process and trigger | ||||
|             # reaping of all minions | ||||
|             # you could have done this yourself | ||||
|             raise Exception('Self Destructed') | ||||
| 
 | ||||
| 
 | ||||
|  | @ -156,7 +146,7 @@ it **is a bug**. | |||
| 
 | ||||
| "Native" multi-process debugging | ||||
| -------------------------------- | ||||
| Using the magic of `pdbp`_ and our internal IPC, we've | ||||
| Using the magic of `pdb++`_ and our internal IPC, we've | ||||
| been able to create a native feeling debugging experience for | ||||
| any (sub-)process in your ``tractor`` tree. | ||||
| 
 | ||||
|  | @ -209,98 +199,6 @@ And, yes, there's a built-in crash handling mode B) | |||
| We're hoping to add a respawn-from-repl system soon! | ||||
| 
 | ||||
| 
 | ||||
| SC compatible bi-directional streaming | ||||
| -------------------------------------- | ||||
| Yes, you saw it here first; we provide 2-way streams | ||||
| with reliable, transitive setup/teardown semantics. | ||||
| 
 | ||||
| Our nascent api is remniscent of ``trio.Nursery.start()`` | ||||
| style invocation: | ||||
| 
 | ||||
| .. code:: python | ||||
| 
 | ||||
|     import trio | ||||
|     import tractor | ||||
| 
 | ||||
| 
 | ||||
|     @tractor.context | ||||
|     async def simple_rpc( | ||||
| 
 | ||||
|         ctx: tractor.Context, | ||||
|         data: int, | ||||
| 
 | ||||
|     ) -> None: | ||||
|         '''Test a small ping-pong 2-way streaming server. | ||||
| 
 | ||||
|         ''' | ||||
|         # signal to parent that we're up much like | ||||
|         # ``trio_typing.TaskStatus.started()`` | ||||
|         await ctx.started(data + 1) | ||||
| 
 | ||||
|         async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|             count = 0 | ||||
|             async for msg in stream: | ||||
| 
 | ||||
|                 assert msg == 'ping' | ||||
|                 await stream.send('pong') | ||||
|                 count += 1 | ||||
| 
 | ||||
|             else: | ||||
|                 assert count == 10 | ||||
| 
 | ||||
| 
 | ||||
|     async def main() -> None: | ||||
| 
 | ||||
|         async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|             portal = await n.start_actor( | ||||
|                 'rpc_server', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             # XXX: this syntax requires py3.9 | ||||
|             async with ( | ||||
| 
 | ||||
|                 portal.open_context( | ||||
|                     simple_rpc, | ||||
|                     data=10, | ||||
|                 ) as (ctx, sent), | ||||
| 
 | ||||
|                 ctx.open_stream() as stream, | ||||
|             ): | ||||
| 
 | ||||
|                 assert sent == 11 | ||||
| 
 | ||||
|                 count = 0 | ||||
|                 # receive msgs using async for style | ||||
|                 await stream.send('ping') | ||||
| 
 | ||||
|                 async for msg in stream: | ||||
|                     assert msg == 'pong' | ||||
|                     await stream.send('ping') | ||||
|                     count += 1 | ||||
| 
 | ||||
|                     if count >= 9: | ||||
|                         break | ||||
| 
 | ||||
| 
 | ||||
|             # explicitly teardown the daemon-actor | ||||
|             await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
|     if __name__ == '__main__': | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| See original proposal and discussion in `#53`_ as well | ||||
| as follow up improvements in `#223`_ that we'd love to | ||||
| hear your thoughts on! | ||||
| 
 | ||||
| .. _#53: https://github.com/goodboy/tractor/issues/53 | ||||
| .. _#223: https://github.com/goodboy/tractor/issues/223 | ||||
| 
 | ||||
| 
 | ||||
| Worker poolz are easy peasy | ||||
| --------------------------- | ||||
| The initial ask from most new users is *"how do I make a worker | ||||
|  | @ -323,173 +221,9 @@ real time:: | |||
| This uses no extra threads, fancy semaphores or futures; all we need | ||||
| is ``tractor``'s IPC! | ||||
| 
 | ||||
| "Infected ``asyncio``" mode | ||||
| --------------------------- | ||||
| Have a bunch of ``asyncio`` code you want to force to be SC at the process level? | ||||
| 
 | ||||
| Check out our experimental system for `guest-mode`_ controlled | ||||
| ``asyncio`` actors: | ||||
| 
 | ||||
| .. code:: python | ||||
| 
 | ||||
|     import asyncio | ||||
|     from statistics import mean | ||||
|     import time | ||||
| 
 | ||||
|     import trio | ||||
|     import tractor | ||||
| 
 | ||||
| 
 | ||||
|     async def aio_echo_server( | ||||
|         to_trio: trio.MemorySendChannel, | ||||
|         from_trio: asyncio.Queue, | ||||
|     ) -> None: | ||||
| 
 | ||||
|         # a first message must be sent **from** this ``asyncio`` | ||||
|         # task or the ``trio`` side will never unblock from | ||||
|         # ``tractor.to_asyncio.open_channel_from():`` | ||||
|         to_trio.send_nowait('start') | ||||
| 
 | ||||
|         # XXX: this uses an ``from_trio: asyncio.Queue`` currently but we | ||||
|         # should probably offer something better. | ||||
|         while True: | ||||
|             # echo the msg back | ||||
|             to_trio.send_nowait(await from_trio.get()) | ||||
|             await asyncio.sleep(0) | ||||
| 
 | ||||
| 
 | ||||
|     @tractor.context | ||||
|     async def trio_to_aio_echo_server( | ||||
|         ctx: tractor.Context, | ||||
|     ): | ||||
|         # this will block until the ``asyncio`` task sends a "first" | ||||
|         # message. | ||||
|         async with tractor.to_asyncio.open_channel_from( | ||||
|             aio_echo_server, | ||||
|         ) as (first, chan): | ||||
| 
 | ||||
|             assert first == 'start' | ||||
|             await ctx.started(first) | ||||
| 
 | ||||
|             async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                 async for msg in stream: | ||||
|                     await chan.send(msg) | ||||
| 
 | ||||
|                     out = await chan.receive() | ||||
|                     # echo back to parent actor-task | ||||
|                     await stream.send(out) | ||||
| 
 | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         async with tractor.open_nursery() as n: | ||||
|             p = await n.start_actor( | ||||
|                 'aio_server', | ||||
|                 enable_modules=[__name__], | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
|             async with p.open_context( | ||||
|                 trio_to_aio_echo_server, | ||||
|             ) as (ctx, first): | ||||
| 
 | ||||
|                 assert first == 'start' | ||||
| 
 | ||||
|                 count = 0 | ||||
|                 async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                     delays = [] | ||||
|                     send = time.time() | ||||
| 
 | ||||
|                     await stream.send(count) | ||||
|                     async for msg in stream: | ||||
|                         recv = time.time() | ||||
|                         delays.append(recv - send) | ||||
|                         assert msg == count | ||||
|                         count += 1 | ||||
|                         send = time.time() | ||||
|                         await stream.send(count) | ||||
| 
 | ||||
|                         if count >= 1e3: | ||||
|                             break | ||||
| 
 | ||||
|             print(f'mean round trip rate (Hz): {1/mean(delays)}') | ||||
|             await p.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
|     if __name__ == '__main__': | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| Yes, we spawn a python process, run ``asyncio``, start ``trio`` on the | ||||
| ``asyncio`` loop, then send commands to the ``trio`` scheduled tasks to | ||||
| tell ``asyncio`` tasks what to do XD | ||||
| 
 | ||||
| We need help refining the `asyncio`-side channel API to be more | ||||
| `trio`-like. Feel free to sling your opinion in `#273`_! | ||||
| 
 | ||||
| 
 | ||||
| .. _#273: https://github.com/goodboy/tractor/issues/273 | ||||
| 
 | ||||
| 
 | ||||
| Higher level "cluster" APIs | ||||
| --------------------------- | ||||
| To be extra terse the ``tractor`` devs have started hacking some "higher | ||||
| level" APIs for managing actor trees/clusters. These interfaces should | ||||
| generally be condsidered provisional for now but we encourage you to try | ||||
| them and provide feedback. Here's a new API that let's you quickly | ||||
| spawn a flat cluster: | ||||
| 
 | ||||
| .. code:: python | ||||
| 
 | ||||
|     import trio | ||||
|     import tractor | ||||
| 
 | ||||
| 
 | ||||
|     async def sleepy_jane(): | ||||
|         uid = tractor.current_actor().uid | ||||
|         print(f'Yo i am actor {uid}') | ||||
|         await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
|     async def main(): | ||||
|         ''' | ||||
|         Spawn a flat actor cluster, with one process per | ||||
|         detected core. | ||||
| 
 | ||||
|         ''' | ||||
|         portal_map: dict[str, tractor.Portal] | ||||
|         results: dict[str, str] | ||||
| 
 | ||||
|         # look at this hip new syntax! | ||||
|         async with ( | ||||
| 
 | ||||
|             tractor.open_actor_cluster( | ||||
|                 modules=[__name__] | ||||
|             ) as portal_map, | ||||
| 
 | ||||
|             trio.open_nursery() as n, | ||||
|         ): | ||||
| 
 | ||||
|             for (name, portal) in portal_map.items(): | ||||
|                 n.start_soon(portal.run, sleepy_jane) | ||||
| 
 | ||||
|             await trio.sleep(0.5) | ||||
| 
 | ||||
|             # kill the cluster with a cancel | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
| 
 | ||||
|     if __name__ == '__main__': | ||||
|         try: | ||||
|             trio.run(main) | ||||
|         except KeyboardInterrupt: | ||||
|             pass | ||||
| 
 | ||||
| 
 | ||||
| .. _full worker pool re-implementation: https://github.com/goodboy/tractor/blob/master/examples/parallelism/concurrent_actors_primes.py | ||||
| 
 | ||||
| 
 | ||||
| Install | ||||
| ------- | ||||
| From PyPi:: | ||||
|  | @ -510,77 +244,29 @@ distributed Python. You can think of it as a ``trio`` | |||
| stdlib's ``multiprocessing`` but built on async programming primitives | ||||
| from the ground up. | ||||
| 
 | ||||
| ``tractor``'s nurseries let you spawn ``trio`` *"actors"*: new Python | ||||
| processes which each run a ``trio`` scheduled runtime - a call to ``trio.run()``. | ||||
| 
 | ||||
| Don't be scared off by this description. ``tractor`` **is just** ``trio`` | ||||
| but with nurseries for process management and cancel-able streaming IPC. | ||||
| If you understand how to work with ``trio``, ``tractor`` will give you | ||||
| the parallelism you may have been needing. | ||||
| the parallelism you've been missing. | ||||
| 
 | ||||
| 
 | ||||
| Wait, huh?! I thought "actors" have messages, and mailboxes and stuff?! | ||||
| *********************************************************************** | ||||
| Let's stop and ask how many canon actor model papers have you actually read ;) | ||||
| 
 | ||||
| From our experience many "actor systems" aren't really "actor models" | ||||
| since they **don't adhere** to the `3 axioms`_ and pay even less | ||||
| attention to the problem of *unbounded non-determinism* (which was the | ||||
| whole point for creation of the model in the first place). | ||||
| 
 | ||||
| From the author's mouth, **the only thing required** is `adherance to`_ | ||||
| the `3 axioms`_, *and that's it*. | ||||
| 
 | ||||
| ``tractor`` adheres to said base requirements of an "actor model":: | ||||
| 
 | ||||
|     In response to a message, an actor may: | ||||
| 
 | ||||
|     - send a finite number of new messages | ||||
|     - create a finite number of new actors | ||||
|     - designate a new behavior to process subsequent messages | ||||
| 
 | ||||
| 
 | ||||
| **and** requires *no further api changes* to accomplish this. | ||||
| 
 | ||||
| If you want do debate this further please feel free to chime in on our | ||||
| chat or discuss on one of the following issues *after you've read | ||||
| everything in them*: | ||||
| 
 | ||||
| - https://github.com/goodboy/tractor/issues/210 | ||||
| - https://github.com/goodboy/tractor/issues/18 | ||||
| 
 | ||||
| 
 | ||||
| Let's clarify our parlance | ||||
| ************************** | ||||
| Whether or not ``tractor`` has "actors" underneath should be mostly | ||||
| irrelevant to users other then for referring to the interactions of our | ||||
| primary runtime primitives: each Python process + ``trio.run()`` | ||||
| + surrounding IPC machinery. These are our high level, base | ||||
| *runtime-units-of-abstraction* which both *are* (as much as they can | ||||
| be in Python) and will be referred to as our *"actors"*. | ||||
| 
 | ||||
| The main goal of ``tractor`` is is to allow for highly distributed | ||||
| software that, through the adherence to *structured concurrency*, | ||||
| results in systems which fail in predictable, recoverable and maybe even | ||||
| understandable ways; being an "actor model" is just one way to describe | ||||
| properties of the system. | ||||
| "Actors" communicate by exchanging asynchronous messages_ and avoid | ||||
| sharing state. The intention of this model is to allow for highly | ||||
| distributed software that, through the adherence to *structured | ||||
| concurrency*, results in systems which fail in predictable and | ||||
| recoverable ways. | ||||
| 
 | ||||
| 
 | ||||
| What's on the TODO: | ||||
| ------------------- | ||||
| Help us push toward the future of distributed `Python`. | ||||
| Help us push toward the future. | ||||
| 
 | ||||
| - Erlang-style supervisors via composed context managers (see `#22 | ||||
|   <https://github.com/goodboy/tractor/issues/22>`_) | ||||
| - Typed messaging protocols (ex. via ``msgspec.Struct``, see `#36 | ||||
|   <https://github.com/goodboy/tractor/issues/36>`_) | ||||
| - Typed capability-based (dialog) protocols ( see `#196 | ||||
|   <https://github.com/goodboy/tractor/issues/196>`_ with draft work | ||||
|   started in `#311 <https://github.com/goodboy/tractor/pull/311>`_) | ||||
| - We **recently disabled CI-testing on windows** and need help getting | ||||
|   it running again! (see `#327 | ||||
|   <https://github.com/goodboy/tractor/pull/327>`_). **We do have windows | ||||
|   support** (and have for quite a while) but since no active hacker | ||||
|   exists in the user-base to help test on that OS, for now we're not | ||||
|   actively maintaining testing due to the added hassle and general | ||||
|   latency.. | ||||
| - (Soon to land) ``asyncio`` support allowing for "infected" actors where | ||||
|   `trio` drives the `asyncio` scheduler via the astounding "`guest mode`_" | ||||
| - Typed messaging protocols (ex. via ``msgspec``) | ||||
| - Erlang-style supervisors via composed context managers | ||||
| 
 | ||||
| 
 | ||||
| Feel like saying hi? | ||||
|  | @ -592,32 +278,18 @@ say hi, please feel free to reach us in our `matrix channel`_.  If | |||
| matrix seems too hip, we're also mostly all in the the `trio gitter | ||||
| channel`_! | ||||
| 
 | ||||
| .. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228 | ||||
| .. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing | ||||
| .. _trio: https://github.com/python-trio/trio | ||||
| .. _nurseries: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/#nurseries-a-structured-replacement-for-go-statements | ||||
| .. _actor model: https://en.wikipedia.org/wiki/Actor_model | ||||
| .. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles | ||||
| .. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich | ||||
| .. _3 axioms: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=162s | ||||
| .. .. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts | ||||
| .. _adherance to: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=1821s | ||||
| .. _trio gitter channel: https://gitter.im/python-trio/general | ||||
| .. _matrix channel: https://matrix.to/#/!tractor:matrix.org | ||||
| .. _pdbp: https://github.com/mdmintz/pdbp | ||||
| .. _pdb++: https://github.com/pdbpp/pdbpp | ||||
| .. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops | ||||
| .. _messages: https://en.wikipedia.org/wiki/Message_passing | ||||
| .. _trio docs: https://trio.readthedocs.io/en/latest/ | ||||
| .. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/ | ||||
| .. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency | ||||
| .. _structured chadcurrency: https://en.wikipedia.org/wiki/Structured_concurrency | ||||
| .. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency | ||||
| .. _structured concurrency: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/ | ||||
| .. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts | ||||
| .. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony | ||||
| .. _async generators: https://www.python.org/dev/peps/pep-0525/ | ||||
| .. _trio-parallel: https://github.com/richardsheridan/trio-parallel | ||||
| .. _msgspec: https://jcristharif.com/msgspec/ | ||||
| .. _guest-mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops | ||||
| 
 | ||||
| 
 | ||||
| .. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square | ||||
|  |  | |||
							
								
								
									
										38
									
								
								docs/conf.py
								
								
								
								
							
							
						
						
									
										38
									
								
								docs/conf.py
								
								
								
								
							|  | @ -54,44 +54,28 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] | |||
| # The theme to use for HTML and HTML Help pages.  See the documentation for | ||||
| # a list of builtin themes. | ||||
| # | ||||
| html_theme = 'sphinx_book_theme' | ||||
| html_theme = 'sphinx_typlog_theme' | ||||
| 
 | ||||
| pygments_style = 'algol_nu' | ||||
| pygments_style = 'sphinx' | ||||
| 
 | ||||
| # Theme options are theme-specific and customize the look and feel of a theme | ||||
| # further.  For a list of options available for each theme, see the | ||||
| # documentation. | ||||
| html_theme_options = { | ||||
|     # 'logo': 'tractor_logo_side.svg', | ||||
|     # 'description': 'Structured concurrent "actors"', | ||||
|     "repository_url": "https://github.com/goodboy/tractor", | ||||
|     "use_repository_button": True, | ||||
|     "home_page_in_toc": False, | ||||
|     "show_toc_level": 1, | ||||
|     "path_to_docs": "docs", | ||||
| 
 | ||||
|     'logo': 'tractor_logo_side.svg', | ||||
|     'description': 'Structured concurrent "actors"', | ||||
|     'github_user': 'goodboy', | ||||
|     'github_repo': 'tractor', | ||||
| } | ||||
| html_sidebars = { | ||||
|     "**": [ | ||||
|         "sbt-sidebar-nav.html", | ||||
|         # "sidebar-search-bs.html", | ||||
|         # 'localtoc.html', | ||||
|     ], | ||||
|     #     'logo.html', | ||||
|     #     'github.html', | ||||
|     #     'relations.html', | ||||
|     #     'searchbox.html' | ||||
|     # ] | ||||
|         'logo.html', | ||||
|         'github.html', | ||||
|         'relations.html', | ||||
|         'searchbox.html' | ||||
|     ] | ||||
| } | ||||
| 
 | ||||
| # doesn't seem to work? | ||||
| # extra_navbar = "<p>nextttt-gennnnn</p>" | ||||
| 
 | ||||
| html_title = '' | ||||
| html_logo = '_static/tractor_logo_side.svg' | ||||
| html_favicon = '_static/tractor_logo_side.svg' | ||||
| # show_navbar_depth = 1 | ||||
| 
 | ||||
| # Add any paths that contain custom static files (such as style sheets) here, | ||||
| # relative to this directory. They are copied after the builtin static files, | ||||
| # so a file named "default.css" will overwrite the builtin "default.css". | ||||
|  |  | |||
|  | @ -1,51 +0,0 @@ | |||
| Hot tips for ``tractor`` hackers | ||||
| ================================ | ||||
| 
 | ||||
| This is a WIP guide for newcomers to the project mostly to do with | ||||
| dev, testing, CI and release gotchas, reminders and best practises. | ||||
| 
 | ||||
| ``tractor`` is a fairly novel project compared to most since it is | ||||
| effectively a new way of doing distributed computing in Python and is | ||||
| much closer to working with an "application level runtime" (like erlang | ||||
| OTP or scala's akka project) then it is a traditional Python library. | ||||
| As such, having an arsenal of tools and recipes for figuring out the | ||||
| right way to debug problems when they do arise is somewhat of | ||||
| a necessity. | ||||
| 
 | ||||
| 
 | ||||
| Making a Release | ||||
| ---------------- | ||||
| We currently do nothing special here except the traditional | ||||
| PyPa release recipe as in `documented by twine`_. I personally | ||||
| create sub-dirs within the generated `dist/` with an explicit | ||||
| release name such as `alpha3/` when there's been a sequence of | ||||
| releases I've made, but it really is up to you how you like to | ||||
| organize generated sdists locally. | ||||
| 
 | ||||
| The resulting build cmds are approximately: | ||||
| 
 | ||||
| .. code:: bash | ||||
| 
 | ||||
|     python setup.py sdist -d ./dist/XXX.X/ | ||||
| 
 | ||||
|     twine upload -r testpypi dist/XXX.X/* | ||||
| 
 | ||||
|     twine upload dist/XXX.X/* | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| .. _documented by twine: https://twine.readthedocs.io/en/latest/#using-twine | ||||
| 
 | ||||
| 
 | ||||
| Debugging and monitoring actor trees | ||||
| ------------------------------------ | ||||
| TODO: but there are tips in the readme for some terminal commands | ||||
| which can be used to see the process trees easily on Linux. | ||||
| 
 | ||||
| 
 | ||||
| Using the log system to trace `trio` task flow | ||||
| ---------------------------------------------- | ||||
| TODO: the logging system is meant to be oriented around | ||||
| stack "layers" of the runtime such that you can track | ||||
| "logical abstraction layers" in the code such as errors, cancellation, | ||||
| IPC and streaming, and the low level transport and wire protocols. | ||||
|  | @ -3,13 +3,12 @@ | |||
|    You can adapt this file completely to your liking, but it should at least | ||||
|    contain the root `toctree` directive. | ||||
| 
 | ||||
| ``tractor`` | ||||
| =========== | ||||
| 
 | ||||
| tractor | ||||
| ======= | ||||
| A `structured concurrent`_, async-native "`actor model`_" built on trio_ and multiprocessing_. | ||||
| 
 | ||||
| .. toctree:: | ||||
|    :maxdepth: 1 | ||||
|    :maxdepth: 2 | ||||
|    :caption: Contents: | ||||
| 
 | ||||
| .. _actor model: https://en.wikipedia.org/wiki/Actor_model | ||||
|  | @ -59,6 +58,8 @@ say hi, please feel free to ping me on the `trio gitter channel`_! | |||
| .. _trio gitter channel: https://gitter.im/python-trio/general | ||||
| 
 | ||||
| 
 | ||||
| .. contents:: | ||||
| 
 | ||||
| 
 | ||||
| Philosophy | ||||
| ---------- | ||||
|  | @ -144,7 +145,7 @@ and use the ``run_in_actor()`` method: | |||
| 
 | ||||
| What's going on? | ||||
| 
 | ||||
| - an initial *actor* is started with ``trio.run()`` and told to execute | ||||
| - an initial *actor* is started with ``tractor.run()`` and told to execute | ||||
|   its main task_: ``main()`` | ||||
| 
 | ||||
| - inside ``main()`` an actor is *spawned* using an ``ActorNusery`` and is told | ||||
|  | @ -181,7 +182,7 @@ Here is a similar example using the latter method: | |||
| 
 | ||||
| .. literalinclude:: ../examples/actor_spawning_and_causality_with_daemon.py | ||||
| 
 | ||||
| The ``enable_modules`` `kwarg` above is a list of module path | ||||
| The ``rpc_module_paths`` `kwarg` above is a list of module path | ||||
| strings that will be loaded and made accessible for execution in the | ||||
| remote actor through a call to ``Portal.run()``. For now this is | ||||
| a simple mechanism to restrict the functionality of the remote | ||||
|  | @ -396,7 +397,7 @@ tasks spawned via multiple RPC calls to an actor can modify | |||
| 
 | ||||
| 
 | ||||
|         # a per process cache | ||||
|         _actor_cache: dict[str, bool] = {} | ||||
|         _actor_cache: Dict[str, bool] = {} | ||||
| 
 | ||||
| 
 | ||||
|         def ping_endpoints(endpoints: List[str]): | ||||
|  | @ -457,7 +458,7 @@ find an actor's socket address by name use the ``find_actor()`` function: | |||
| .. literalinclude:: ../examples/service_discovery.py | ||||
| 
 | ||||
| The ``name`` value you should pass to ``find_actor()`` is the one you passed as the | ||||
| *first* argument to either ``trio.run()`` or ``ActorNursery.start_actor()``. | ||||
| *first* argument to either ``tractor.run()`` or ``ActorNursery.start_actor()``. | ||||
| 
 | ||||
| 
 | ||||
| Running actors standalone | ||||
|  | @ -471,17 +472,7 @@ need to hop into a debugger. You just need to pass the existing | |||
| 
 | ||||
| .. code:: python | ||||
| 
 | ||||
|     import trio | ||||
|     import tractor | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         async with tractor.open_root_actor( | ||||
|             arbiter_addr=('192.168.0.10', 1616) | ||||
|         ): | ||||
|             await trio.sleep_forever() | ||||
| 
 | ||||
|     trio.run(main) | ||||
|     tractor.run(main, arbiter_addr=('192.168.0.10', 1616)) | ||||
| 
 | ||||
| 
 | ||||
| Choosing a process spawning backend | ||||
|  | @ -489,7 +480,7 @@ Choosing a process spawning backend | |||
| ``tractor`` is architected to support multiple actor (sub-process) | ||||
| spawning backends. Specific defaults are chosen based on your system | ||||
| but you can also explicitly select a backend of choice at startup | ||||
| via a ``start_method`` kwarg to ``tractor.open_nursery()``. | ||||
| via a ``start_method`` kwarg to ``tractor.run()``. | ||||
| 
 | ||||
| Currently the options available are: | ||||
| 
 | ||||
|  | @ -545,14 +536,13 @@ main python module of the program: | |||
| .. code:: python | ||||
| 
 | ||||
|     # application/__main__.py | ||||
|     import trio | ||||
|     import tractor | ||||
|     import multiprocessing | ||||
|     from . import tractor_app | ||||
| 
 | ||||
|     if __name__ == '__main__': | ||||
|         multiprocessing.freeze_support() | ||||
|         trio.run(tractor_app.main) | ||||
|         tractor.run(tractor_app.main) | ||||
| 
 | ||||
| And execute as:: | ||||
| 
 | ||||
|  |  | |||
|  | @ -16,4 +16,4 @@ if __name__ == '__main__': | |||
|     # temporary dir and name it test_example.py. We import that script | ||||
|     # module here and invoke it's ``main()``. | ||||
|     from . import test_example | ||||
|     test_example.trio.run(test_example.main) | ||||
|     test_example.tractor.run(test_example.main, start_method='spawn') | ||||
|  |  | |||
|  | @ -1,4 +1,3 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| _this_module = __name__ | ||||
|  | @ -41,4 +40,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main) | ||||
|  |  | |||
|  | @ -1,8 +1,7 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def cellar_door(): | ||||
| def cellar_door(): | ||||
|     assert not tractor.is_root_process() | ||||
|     return "Dang that's beautiful" | ||||
| 
 | ||||
|  | @ -24,4 +23,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main) | ||||
|  |  | |||
|  | @ -1,8 +1,7 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def movie_theatre_question(): | ||||
| def movie_theatre_question(): | ||||
|     """A question asked in a dark theatre, in a tangent | ||||
|     (errr, I mean different) process. | ||||
|     """ | ||||
|  | @ -17,7 +16,7 @@ async def main(): | |||
|         portal = await n.start_actor( | ||||
|             'frank', | ||||
|             # enable the actor to run funcs from this current module | ||||
|             enable_modules=[__name__], | ||||
|             rpc_module_paths=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         print(await portal.run(movie_theatre_question)) | ||||
|  | @ -31,4 +30,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main) | ||||
|  |  | |||
|  | @ -1,151 +0,0 @@ | |||
| ''' | ||||
| Complex edge case where during real-time streaming the IPC tranport | ||||
| channels are wiped out (purposely in this example though it could have | ||||
| been an outage) and we want to ensure that despite being in debug mode | ||||
| (or not) the user can sent SIGINT once they notice the hang and the | ||||
| actor tree will eventually be cancelled without leaving any zombies. | ||||
| 
 | ||||
| ''' | ||||
| import trio | ||||
| from tractor import ( | ||||
|     open_nursery, | ||||
|     context, | ||||
|     Context, | ||||
|     MsgStream, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| async def break_channel_silently_then_error( | ||||
|     stream: MsgStream, | ||||
| ): | ||||
|     async for msg in stream: | ||||
|         await stream.send(msg) | ||||
| 
 | ||||
|         # XXX: close the channel right after an error is raised | ||||
|         # purposely breaking the IPC transport to make sure the parent | ||||
|         # doesn't get stuck in debug or hang on the connection join. | ||||
|         # this more or less simulates an infinite msg-receive hang on | ||||
|         # the other end. | ||||
|         await stream._ctx.chan.send(None) | ||||
|         assert 0 | ||||
| 
 | ||||
| 
 | ||||
| async def close_stream_and_error( | ||||
|     stream: MsgStream, | ||||
| ): | ||||
|     async for msg in stream: | ||||
|         await stream.send(msg) | ||||
| 
 | ||||
|         # wipe out channel right before raising | ||||
|         await stream._ctx.chan.send(None) | ||||
|         await stream.aclose() | ||||
|         assert 0 | ||||
| 
 | ||||
| 
 | ||||
| @context | ||||
| async def recv_and_spawn_net_killers( | ||||
| 
 | ||||
|     ctx: Context, | ||||
|     break_ipc_after: bool | int = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Receive stream msgs and spawn some IPC killers mid-stream. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     async with ( | ||||
|         ctx.open_stream() as stream, | ||||
|         trio.open_nursery() as n, | ||||
|     ): | ||||
|         async for i in stream: | ||||
|             print(f'child echoing {i}') | ||||
|             await stream.send(i) | ||||
|             if ( | ||||
|                 break_ipc_after | ||||
|                 and i > break_ipc_after | ||||
|             ): | ||||
|                 '#################################\n' | ||||
|                 'Simulating child-side IPC BREAK!\n' | ||||
|                 '#################################' | ||||
|                 n.start_soon(break_channel_silently_then_error, stream) | ||||
|                 n.start_soon(close_stream_and_error, stream) | ||||
| 
 | ||||
| 
 | ||||
| async def main( | ||||
|     debug_mode: bool = False, | ||||
|     start_method: str = 'trio', | ||||
| 
 | ||||
|     # by default we break the parent IPC first (if configured to break | ||||
|     # at all), but this can be changed so the child does first (even if | ||||
|     # both are set to break). | ||||
|     break_parent_ipc_after: int | bool = False, | ||||
|     break_child_ipc_after: int | bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     async with ( | ||||
|         open_nursery( | ||||
|             start_method=start_method, | ||||
| 
 | ||||
|             # NOTE: even debugger is used we shouldn't get | ||||
|             # a hang since it never engages due to broken IPC | ||||
|             debug_mode=debug_mode, | ||||
|             loglevel='warning', | ||||
| 
 | ||||
|         ) as an, | ||||
|     ): | ||||
|         portal = await an.start_actor( | ||||
|             'chitty_hijo', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             recv_and_spawn_net_killers, | ||||
|             break_ipc_after=break_child_ipc_after, | ||||
| 
 | ||||
|         ) as (ctx, sent): | ||||
|             async with ctx.open_stream() as stream: | ||||
|                 for i in range(1000): | ||||
| 
 | ||||
|                     if ( | ||||
|                         break_parent_ipc_after | ||||
|                         and i > break_parent_ipc_after | ||||
|                     ): | ||||
|                         print( | ||||
|                             '#################################\n' | ||||
|                             'Simulating parent-side IPC BREAK!\n' | ||||
|                             '#################################' | ||||
|                         ) | ||||
|                         await stream._ctx.chan.send(None) | ||||
| 
 | ||||
|                     # it actually breaks right here in the | ||||
|                     # mp_spawn/forkserver backends and thus the zombie | ||||
|                     # reaper never even kicks in? | ||||
|                     print(f'parent sending {i}') | ||||
|                     await stream.send(i) | ||||
| 
 | ||||
|                     with trio.move_on_after(2) as cs: | ||||
| 
 | ||||
|                         # NOTE: in the parent side IPC failure case this | ||||
|                         # will raise an ``EndOfChannel`` after the child | ||||
|                         # is killed and sends a stop msg back to it's | ||||
|                         # caller/this-parent. | ||||
|                         rx = await stream.receive() | ||||
| 
 | ||||
|                         print(f"I'm a happy user and echoed to me is {rx}") | ||||
| 
 | ||||
|                     if cs.cancelled_caught: | ||||
|                         # pretend to be a user seeing no streaming action | ||||
|                         # thinking it's a hang, and then hitting ctl-c.. | ||||
|                         print("YOO i'm a user anddd thingz hangin..") | ||||
| 
 | ||||
|                 print( | ||||
|                     "YOO i'm mad send side dun but thingz hangin..\n" | ||||
|                     'MASHING CTlR-C Ctl-c..' | ||||
|                 ) | ||||
|                 raise KeyboardInterrupt | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -1,42 +1,36 @@ | |||
| from typing import AsyncIterator | ||||
| from itertools import repeat | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| tractor.log.get_console_log("INFO") | ||||
| 
 | ||||
| async def stream_forever() -> AsyncIterator[int]: | ||||
| 
 | ||||
| async def stream_forever(): | ||||
|     for i in repeat("I can see these little future bubble things"): | ||||
|         # each yielded value is sent over the ``Channel`` to the parent actor | ||||
|         # each yielded value is sent over the ``Channel`` to the | ||||
|         # parent actor | ||||
|         yield i | ||||
|         await trio.sleep(0.01) | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
| 
 | ||||
|     # stream for at most 1 seconds | ||||
|     with trio.move_on_after(1) as cancel_scope: | ||||
|         async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|             portal = await n.start_actor( | ||||
|             'donny', | ||||
|             enable_modules=[__name__], | ||||
|                 f'donny', | ||||
|                 rpc_module_paths=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             # this async for loop streams values from the above | ||||
|             # async generator running in a separate process | ||||
|         async with portal.open_stream_from(stream_forever) as stream: | ||||
|             count = 0 | ||||
|             async for letter in stream: | ||||
|             async for letter in await portal.run(stream_forever): | ||||
|                 print(letter) | ||||
|                 count += 1 | ||||
| 
 | ||||
|                 if count > 50: | ||||
|                     break | ||||
| 
 | ||||
|         print('stream terminated') | ||||
| 
 | ||||
|         await portal.cancel_actor() | ||||
|     # we support trio's cancellation system | ||||
|     assert cancel_scope.cancelled_caught | ||||
|     assert n.cancelled | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main) | ||||
|  |  | |||
|  | @ -1,54 +0,0 @@ | |||
| ''' | ||||
| Fast fail test with a context. | ||||
| 
 | ||||
| Ensure the partially initialized sub-actor process | ||||
| doesn't cause a hang on error/cancel of the parent | ||||
| nursery. | ||||
| 
 | ||||
| ''' | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def sleep( | ||||
|     ctx: tractor.Context, | ||||
| ): | ||||
|     await trio.sleep(0.5) | ||||
|     await ctx.started() | ||||
|     await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| async def open_ctx( | ||||
|     n: tractor._supervise.ActorNursery | ||||
| ): | ||||
| 
 | ||||
|     # spawn both actors | ||||
|     portal = await n.start_actor( | ||||
|         name='sleeper', | ||||
|         enable_modules=[__name__], | ||||
|     ) | ||||
| 
 | ||||
|     async with portal.open_context( | ||||
|         sleep, | ||||
|     ) as (ctx, first): | ||||
|         assert first is None | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|         loglevel='runtime', | ||||
|     ) as an: | ||||
| 
 | ||||
|         async with trio.open_nursery() as n: | ||||
|             n.start_soon(open_ctx, an) | ||||
| 
 | ||||
|             await trio.sleep(0.2) | ||||
|             await trio.sleep(0.1) | ||||
|             assert 0 | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -11,7 +11,7 @@ async def breakpoint_forever(): | |||
| 
 | ||||
| async def name_error(): | ||||
|     "Raise a ``NameError``" | ||||
|     getattr(doggypants)  # noqa | ||||
|     getattr(doggypants) | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
|  | @ -26,18 +26,7 @@ async def main(): | |||
|         p1 = await n.start_actor('name_error', enable_modules=[__name__]) | ||||
| 
 | ||||
|         # retreive results | ||||
|         async with p0.open_stream_from(breakpoint_forever) as stream: | ||||
| 
 | ||||
|             # triggers the first name error | ||||
|             try: | ||||
|                 await p1.run(name_error) | ||||
|             except tractor.RemoteActorError as rae: | ||||
|                 assert rae.type is NameError | ||||
| 
 | ||||
|             async for i in stream: | ||||
| 
 | ||||
|                 # a second time try the failing subactor and this tie | ||||
|                 # let error propagate up to the parent/nursery. | ||||
|         stream = await p0.run(breakpoint_forever) | ||||
|         await p1.run(name_error) | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -4,7 +4,7 @@ import tractor | |||
| 
 | ||||
| async def name_error(): | ||||
|     "Raise a ``NameError``" | ||||
|     getattr(doggypants)  # noqa | ||||
|     getattr(doggypants) | ||||
| 
 | ||||
| 
 | ||||
| async def breakpoint_forever(): | ||||
|  | @ -12,31 +12,18 @@ async def breakpoint_forever(): | |||
|     while True: | ||||
|         await tractor.breakpoint() | ||||
| 
 | ||||
|         # NOTE: if the test never sent 'q'/'quit' commands | ||||
|         # on the pdb repl, without this checkpoint line the | ||||
|         # repl would spin in this actor forever. | ||||
|         # await trio.sleep(0) | ||||
| 
 | ||||
| 
 | ||||
| async def spawn_until(depth=0): | ||||
|     """"A nested nursery that triggers another ``NameError``. | ||||
|     """ | ||||
|     async with tractor.open_nursery() as n: | ||||
|         if depth < 1: | ||||
| 
 | ||||
|             await n.run_in_actor(breakpoint_forever) | ||||
| 
 | ||||
|             p = await n.run_in_actor( | ||||
|             # await n.run_in_actor('breakpoint_forever', breakpoint_forever) | ||||
|             await n.run_in_actor( | ||||
|                 name_error, | ||||
|                 name='name_error' | ||||
|             ) | ||||
|             await trio.sleep(0.5) | ||||
|             # rx and propagate error from child | ||||
|             await p.result() | ||||
| 
 | ||||
|         else: | ||||
|             # recusrive call to spawn another process branching layer of | ||||
|             # the tree | ||||
|             depth -= 1 | ||||
|             await n.run_in_actor( | ||||
|                 spawn_until, | ||||
|  | @ -66,7 +53,6 @@ async def main(): | |||
|     """ | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|         # loglevel='cancel', | ||||
|     ) as n: | ||||
| 
 | ||||
|         # spawn both actors | ||||
|  | @ -81,16 +67,8 @@ async def main(): | |||
|             name='spawner1', | ||||
|         ) | ||||
| 
 | ||||
|         # TODO: test this case as well where the parent don't see | ||||
|         # the sub-actor errors by default and instead expect a user | ||||
|         # ctrl-c to kill the root. | ||||
|         with trio.move_on_after(3): | ||||
|             await trio.sleep_forever() | ||||
| 
 | ||||
|         # gah still an issue here. | ||||
|         await portal.result() | ||||
| 
 | ||||
|         # should never get here | ||||
|         await portal1.result() | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,15 +1,9 @@ | |||
| ''' | ||||
| Test that a nested nursery will avoid clobbering | ||||
| the debugger latched by a broken child. | ||||
| 
 | ||||
| ''' | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def name_error(): | ||||
|     "Raise a ``NameError``" | ||||
|     getattr(doggypants)  # noqa | ||||
|     getattr(doggypants) | ||||
| 
 | ||||
| 
 | ||||
| async def spawn_error(): | ||||
|  | @ -38,10 +32,7 @@ async def main(): | |||
|         - root actor should then fail on assert | ||||
|         - program termination | ||||
|     """ | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|         # loglevel='cancel', | ||||
|     ) as n: | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         # spawn both actors | ||||
|         portal = await n.run_in_actor( | ||||
|  | @ -63,4 +54,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main, debug_mode=True) | ||||
|  |  | |||
|  | @ -11,7 +11,7 @@ async def breakpoint_forever(): | |||
| 
 | ||||
| async def name_error(): | ||||
|     "Raise a ``NameError``" | ||||
|     getattr(doggypants)  # noqa | ||||
|     getattr(doggypants) | ||||
| 
 | ||||
| 
 | ||||
| async def spawn_error(): | ||||
|  | @ -36,9 +36,7 @@ async def main(): | |||
|     `-python -m tractor._child --uid ('spawn_error', '52ee14a5 ...) | ||||
|        `-python -m tractor._child --uid ('name_error', '3391222c ...) | ||||
|     """ | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|     ) as n: | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         # Spawn both actors, don't bother with collecting results | ||||
|         # (would result in a different debugger outcome due to parent's | ||||
|  | @ -49,4 +47,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main, debug_mode=True) | ||||
|  |  | |||
|  | @ -1,40 +0,0 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def just_sleep( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Start and sleep. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| async def main() -> None: | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|     ) as n: | ||||
|         portal = await n.start_actor( | ||||
|             'ctx_child', | ||||
| 
 | ||||
|             # XXX: we don't enable the current module in order | ||||
|             # to trigger `ModuleNotFound`. | ||||
|             enable_modules=[], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             just_sleep,  # taken from pytest parameterization | ||||
|         ) as (ctx, sent): | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -1,27 +0,0 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| async def die(): | ||||
|     raise RuntimeError | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
|     async with tractor.open_nursery() as tn: | ||||
| 
 | ||||
|         debug_actor = await tn.start_actor( | ||||
|             'debugged_boi', | ||||
|             enable_modules=[__name__], | ||||
|             debug_mode=True, | ||||
|         ) | ||||
|         crash_boi = await tn.start_actor( | ||||
|             'crash_boi', | ||||
|             enable_modules=[__name__], | ||||
|             # debug_mode=True, | ||||
|         ) | ||||
| 
 | ||||
|         async with trio.open_nursery() as n: | ||||
|             n.start_soon(debug_actor.run, die) | ||||
|             n.start_soon(crash_boi.run, die) | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -1,24 +0,0 @@ | |||
| import os | ||||
| import sys | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def main() -> None: | ||||
|     async with tractor.open_nursery(debug_mode=True) as an: | ||||
| 
 | ||||
|         assert os.environ['PYTHONBREAKPOINT'] == 'tractor._debug._set_trace' | ||||
| 
 | ||||
|         # TODO: an assert that verifies the hook has indeed been, hooked | ||||
|         # XD | ||||
|         assert sys.breakpointhook is not tractor._debug._set_trace | ||||
| 
 | ||||
|         breakpoint() | ||||
| 
 | ||||
|     # TODO: an assert that verifies the hook is unhooked.. | ||||
|     assert sys.breakpointhook | ||||
|     breakpoint() | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -4,10 +4,6 @@ import tractor | |||
| 
 | ||||
| async def main(): | ||||
| 
 | ||||
|     async with tractor.open_root_actor( | ||||
|         debug_mode=True, | ||||
|     ): | ||||
| 
 | ||||
|     await trio.sleep(0.1) | ||||
| 
 | ||||
|     await tractor.breakpoint() | ||||
|  | @ -16,4 +12,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main, debug_mode=True) | ||||
|  |  | |||
|  | @ -1,15 +1,11 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
| 
 | ||||
|     async with tractor.open_root_actor( | ||||
|         debug_mode=True, | ||||
|     ): | ||||
|     while True: | ||||
|         await tractor.breakpoint() | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main, debug_mode=True) | ||||
|  |  | |||
|  | @ -1,13 +1,9 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
|     async with tractor.open_root_actor( | ||||
|         debug_mode=True, | ||||
|     ): | ||||
|     assert 0 | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main, debug_mode=True) | ||||
|  |  | |||
|  | @ -1,10 +1,9 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def name_error(): | ||||
|     "Raise a ``NameError``" | ||||
|     getattr(doggypants)  # noqa | ||||
|     getattr(doggypants) | ||||
| 
 | ||||
| 
 | ||||
| async def spawn_until(depth=0): | ||||
|  | @ -38,10 +37,7 @@ async def main(): | |||
|        └─ python -m tractor._child --uid ('name_error', '6c2733b8 ...) | ||||
| 
 | ||||
|     """ | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|         loglevel='warning' | ||||
|     ) as n: | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         # spawn both actors | ||||
|         portal = await n.run_in_actor( | ||||
|  | @ -62,4 +58,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main, debug_mode=True, loglevel='warning') | ||||
|  |  | |||
|  | @ -1,31 +0,0 @@ | |||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def key_error(): | ||||
|     "Raise a ``NameError``" | ||||
|     return {}['doggy'] | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
|     """Root dies  | ||||
| 
 | ||||
|     """ | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|         loglevel='debug' | ||||
|     ) as n: | ||||
| 
 | ||||
|         # spawn both actors | ||||
|         portal = await n.run_in_actor(key_error) | ||||
| 
 | ||||
|         # XXX: originally a bug caused by this is where root would enter | ||||
|         # the debugger and clobber the tty used by the repl even though | ||||
|         # child should have it locked. | ||||
|         with trio.fail_after(1): | ||||
|             await trio.Event().wait() | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -1,50 +0,0 @@ | |||
| import tractor | ||||
| import trio | ||||
| 
 | ||||
| 
 | ||||
| async def gen(): | ||||
|     yield 'yo' | ||||
|     await tractor.breakpoint() | ||||
|     yield 'yo' | ||||
|     await tractor.breakpoint() | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def just_bp( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
| 
 | ||||
|     await ctx.started() | ||||
|     await tractor.breakpoint() | ||||
| 
 | ||||
|     # TODO: bps and errors in this call.. | ||||
|     async for val in gen(): | ||||
|         print(val) | ||||
| 
 | ||||
|     # await trio.sleep(0.5) | ||||
| 
 | ||||
|     # prematurely destroy the connection | ||||
|     await ctx.chan.aclose() | ||||
| 
 | ||||
|     # THIS CAUSES AN UNRECOVERABLE HANG | ||||
|     # without latest ``pdbpp``: | ||||
|     assert 0 | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|     ) as n: | ||||
|         p = await n.start_actor( | ||||
|             'bp_boi', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
|         async with p.open_context( | ||||
|             just_bp, | ||||
|         ) as (ctx, first): | ||||
|             await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -12,9 +12,7 @@ async def breakpoint_forever(): | |||
| 
 | ||||
| async def main(): | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|     ) as n: | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.run_in_actor( | ||||
|             breakpoint_forever, | ||||
|  | @ -23,4 +21,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main, debug_mode=True) | ||||
|  |  | |||
|  | @ -1,4 +1,3 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
|  | @ -7,13 +6,11 @@ async def name_error(): | |||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|     ) as n: | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.run_in_actor(name_error) | ||||
|         await portal.result() | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|     tractor.run(main, debug_mode=True) | ||||
|  |  | |||
|  | @ -7,7 +7,7 @@ import tractor | |||
| async def stream_data(seed): | ||||
|     for i in range(seed): | ||||
|         yield i | ||||
|         await trio.sleep(0.0001)  # trigger scheduler | ||||
|         await trio.sleep(0)  # trigger scheduler | ||||
| 
 | ||||
| 
 | ||||
| # this is the third actor; the aggregator | ||||
|  | @ -21,7 +21,7 @@ async def aggregate(seed): | |||
|             # fork point | ||||
|             portal = await nursery.start_actor( | ||||
|                 name=f'streamer_{i}', | ||||
|                 enable_modules=[__name__], | ||||
|                 rpc_module_paths=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             portals.append(portal) | ||||
|  | @ -29,11 +29,8 @@ async def aggregate(seed): | |||
|         send_chan, recv_chan = trio.open_memory_channel(500) | ||||
| 
 | ||||
|         async def push_to_chan(portal, send_chan): | ||||
| 
 | ||||
|             # TODO: https://github.com/goodboy/tractor/issues/207 | ||||
|             async with send_chan: | ||||
|                 async with portal.open_stream_from(stream_data, seed=seed) as stream: | ||||
|                     async for value in stream: | ||||
|                 async for value in await portal.run(stream_data, seed=seed): | ||||
|                     # leverage trio's built-in backpressure | ||||
|                     await send_chan.send(value) | ||||
| 
 | ||||
|  | @ -68,32 +65,25 @@ async def aggregate(seed): | |||
| # this is the main actor and *arbiter* | ||||
| async def main(): | ||||
|     # a nursery which spawns "actors" | ||||
|     async with tractor.open_nursery( | ||||
|         arbiter_addr=('127.0.0.1', 1616) | ||||
|     ) as nursery: | ||||
|     async with tractor.open_nursery() as nursery: | ||||
| 
 | ||||
|         seed = int(1e3) | ||||
|         import time | ||||
|         pre_start = time.time() | ||||
| 
 | ||||
|         portal = await nursery.start_actor( | ||||
|             name='aggregator', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_stream_from( | ||||
|         portal = await nursery.run_in_actor( | ||||
|             aggregate, | ||||
|             name='aggregator', | ||||
|             seed=seed, | ||||
|         ) as stream: | ||||
|         ) | ||||
| 
 | ||||
|         start = time.time() | ||||
|         # the portal call returns exactly what you'd expect | ||||
|         # as if the remote "aggregate" function was called locally | ||||
|         result_stream = [] | ||||
|             async for value in stream: | ||||
|         async for value in await portal.result(): | ||||
|             result_stream.append(value) | ||||
| 
 | ||||
|         await portal.cancel_actor() | ||||
| 
 | ||||
|         print(f"STREAM TIME = {time.time() - start}") | ||||
|         print(f"STREAM + SPAWN TIME = {time.time() - pre_start}") | ||||
|         assert result_stream == list(range(seed)) | ||||
|  | @ -101,4 +91,4 @@ async def main(): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     final_stream = trio.run(main) | ||||
|     final_stream = tractor.run(main, arbiter_addr=('127.0.0.1', 1616)) | ||||
|  |  | |||
|  | @ -1,92 +0,0 @@ | |||
| ''' | ||||
| An SC compliant infected ``asyncio`` echo server. | ||||
| 
 | ||||
| ''' | ||||
| import asyncio | ||||
| from statistics import mean | ||||
| import time | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def aio_echo_server( | ||||
|     to_trio: trio.MemorySendChannel, | ||||
|     from_trio: asyncio.Queue, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     # a first message must be sent **from** this ``asyncio`` | ||||
|     # task or the ``trio`` side will never unblock from | ||||
|     # ``tractor.to_asyncio.open_channel_from():`` | ||||
|     to_trio.send_nowait('start') | ||||
| 
 | ||||
|     # XXX: this uses an ``from_trio: asyncio.Queue`` currently but we | ||||
|     # should probably offer something better. | ||||
|     while True: | ||||
|         # echo the msg back | ||||
|         to_trio.send_nowait(await from_trio.get()) | ||||
|         await asyncio.sleep(0) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def trio_to_aio_echo_server( | ||||
|     ctx: tractor.Context, | ||||
| ): | ||||
|     # this will block until the ``asyncio`` task sends a "first" | ||||
|     # message. | ||||
|     async with tractor.to_asyncio.open_channel_from( | ||||
|         aio_echo_server, | ||||
|     ) as (first, chan): | ||||
| 
 | ||||
|         assert first == 'start' | ||||
|         await ctx.started(first) | ||||
| 
 | ||||
|         async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|             async for msg in stream: | ||||
|                 await chan.send(msg) | ||||
| 
 | ||||
|                 out = await chan.receive() | ||||
|                 # echo back to parent actor-task | ||||
|                 await stream.send(out) | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
| 
 | ||||
|     async with tractor.open_nursery() as n: | ||||
|         p = await n.start_actor( | ||||
|             'aio_server', | ||||
|             enable_modules=[__name__], | ||||
|             infect_asyncio=True, | ||||
|         ) | ||||
|         async with p.open_context( | ||||
|             trio_to_aio_echo_server, | ||||
|         ) as (ctx, first): | ||||
| 
 | ||||
|             assert first == 'start' | ||||
| 
 | ||||
|             count = 0 | ||||
|             async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                 delays = [] | ||||
|                 send = time.time() | ||||
| 
 | ||||
|                 await stream.send(count) | ||||
|                 async for msg in stream: | ||||
|                     recv = time.time() | ||||
|                     delays.append(recv - send) | ||||
|                     assert msg == count | ||||
|                     count += 1 | ||||
|                     send = time.time() | ||||
|                     await stream.send(count) | ||||
| 
 | ||||
|                     if count >= 1e3: | ||||
|                         break | ||||
| 
 | ||||
|         print(f'mean round trip rate (Hz): {1/mean(delays)}') | ||||
|         await p.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -1,49 +0,0 @@ | |||
| import trio | ||||
| import click | ||||
| import tractor | ||||
| import pydantic | ||||
| # from multiprocessing import shared_memory | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def just_sleep( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Test a small ping-pong 2-way streaming server. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| async def main() -> None: | ||||
| 
 | ||||
|     proc = await trio.open_process( ( | ||||
|         'python', | ||||
|         '-c', | ||||
|         'import trio; trio.run(trio.sleep_forever)', | ||||
|     )) | ||||
|     await proc.wait() | ||||
|     # await trio.sleep_forever() | ||||
|     # async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|     #     portal = await n.start_actor( | ||||
|     #         'rpc_server', | ||||
|     #         enable_modules=[__name__], | ||||
|     #     ) | ||||
| 
 | ||||
|     #     async with portal.open_context( | ||||
|     #         just_sleep,  # taken from pytest parameterization | ||||
|     #     ) as (ctx, sent): | ||||
|     #         await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     import time | ||||
|     # time.sleep(999) | ||||
|     trio.run(main) | ||||
|  | @ -15,8 +15,7 @@ async def stream_data(seed=10): | |||
| 
 | ||||
| async def stream_from_portal(p, consumed): | ||||
| 
 | ||||
|     async with p.open_stream_from(stream_data) as stream: | ||||
|         async for item in stream: | ||||
|     async for item in await p.run(stream_data): | ||||
|         if item in consumed: | ||||
|             consumed.remove(item) | ||||
|         else: | ||||
|  |  | |||
|  | @ -10,7 +10,6 @@ PRIMES = [ | |||
|     115797848077099, | ||||
|     1099726899285419] | ||||
| 
 | ||||
| 
 | ||||
| def is_prime(n): | ||||
|     if n < 2: | ||||
|         return False | ||||
|  | @ -25,7 +24,6 @@ def is_prime(n): | |||
|             return False | ||||
|     return True | ||||
| 
 | ||||
| 
 | ||||
| def main(): | ||||
|     with concurrent.futures.ProcessPoolExecutor() as executor: | ||||
|         start = time.time() | ||||
|  | @ -35,7 +33,6 @@ def main(): | |||
| 
 | ||||
|         print(f'processing took {time.time() - start} seconds') | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
| 
 | ||||
|     start = time.time() | ||||
|  |  | |||
|  | @ -9,7 +9,7 @@ is ``tractor``'s channels. | |||
| 
 | ||||
| """ | ||||
| from contextlib import asynccontextmanager | ||||
| from typing import Callable | ||||
| from typing import List, Callable | ||||
| import itertools | ||||
| import math | ||||
| import time | ||||
|  | @ -29,7 +29,7 @@ PRIMES = [ | |||
| ] | ||||
| 
 | ||||
| 
 | ||||
| async def is_prime(n): | ||||
| def is_prime(n): | ||||
|     if n < 2: | ||||
|         return False | ||||
|     if n == 2: | ||||
|  | @ -71,8 +71,8 @@ async def worker_pool(workers=4): | |||
| 
 | ||||
|         async def _map( | ||||
|             worker_func: Callable[[int], bool], | ||||
|             sequence: list[int] | ||||
|         ) -> list[bool]: | ||||
|             sequence: List[int] | ||||
|         ) -> List[bool]: | ||||
| 
 | ||||
|             # define an async (local) task to collect results from workers | ||||
|             async def send_result(func, value, portal): | ||||
|  |  | |||
|  | @ -1,44 +0,0 @@ | |||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def sleepy_jane(): | ||||
|     uid = tractor.current_actor().uid | ||||
|     print(f'Yo i am actor {uid}') | ||||
|     await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| async def main(): | ||||
|     ''' | ||||
|     Spawn a flat actor cluster, with one process per | ||||
|     detected core. | ||||
| 
 | ||||
|     ''' | ||||
|     portal_map: dict[str, tractor.Portal] | ||||
|     results: dict[str, str] | ||||
| 
 | ||||
|     # look at this hip new syntax! | ||||
|     async with ( | ||||
| 
 | ||||
|         tractor.open_actor_cluster( | ||||
|             modules=[__name__] | ||||
|         ) as portal_map, | ||||
| 
 | ||||
|         trio.open_nursery() as n, | ||||
|     ): | ||||
| 
 | ||||
|         for (name, portal) in portal_map.items(): | ||||
|             n.start_soon(portal.run, sleepy_jane) | ||||
| 
 | ||||
|         await trio.sleep(0.5) | ||||
| 
 | ||||
|         # kill the cluster with a cancel | ||||
|         raise KeyboardInterrupt | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     try: | ||||
|         trio.run(main) | ||||
|     except KeyboardInterrupt: | ||||
|         pass | ||||
|  | @ -1,4 +1,3 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
|  | @ -12,7 +11,7 @@ async def main(): | |||
|         for i in range(3): | ||||
|             real_actors.append(await n.start_actor( | ||||
|                 f'actor_{i}', | ||||
|                 enable_modules=[__name__], | ||||
|                 rpc_module_paths=[__name__], | ||||
|             )) | ||||
| 
 | ||||
|         # start one actor that will fail immediately | ||||
|  | @ -25,6 +24,6 @@ async def main(): | |||
| if __name__ == '__main__': | ||||
|     try: | ||||
|         # also raises | ||||
|         trio.run(main) | ||||
|         tractor.run(main) | ||||
|     except tractor.RemoteActorError: | ||||
|         print("Look Maa that actor failed hard, hehhh!") | ||||
|  |  | |||
|  | @ -1,72 +0,0 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def simple_rpc( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
|     data: int, | ||||
| 
 | ||||
| ) -> None: | ||||
|     '''Test a small ping-pong 2-way streaming server. | ||||
| 
 | ||||
|     ''' | ||||
|     # signal to parent that we're up much like | ||||
|     # ``trio_typing.TaskStatus.started()`` | ||||
|     await ctx.started(data + 1) | ||||
| 
 | ||||
|     async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|         count = 0 | ||||
|         async for msg in stream: | ||||
| 
 | ||||
|             assert msg == 'ping' | ||||
|             await stream.send('pong') | ||||
|             count += 1 | ||||
| 
 | ||||
|         else: | ||||
|             assert count == 10 | ||||
| 
 | ||||
| 
 | ||||
| async def main() -> None: | ||||
| 
 | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.start_actor( | ||||
|             'rpc_server', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         # XXX: syntax requires py3.9 | ||||
|         async with ( | ||||
| 
 | ||||
|             portal.open_context( | ||||
|                 simple_rpc,  # taken from pytest parameterization | ||||
|                 data=10, | ||||
| 
 | ||||
|             ) as (ctx, sent), | ||||
| 
 | ||||
|             ctx.open_stream() as stream, | ||||
|         ): | ||||
| 
 | ||||
|             assert sent == 11 | ||||
| 
 | ||||
|             count = 0 | ||||
|             # receive msgs using async for style | ||||
|             await stream.send('ping') | ||||
| 
 | ||||
|             async for msg in stream: | ||||
|                 assert msg == 'pong' | ||||
|                 await stream.send('ping') | ||||
|                 count += 1 | ||||
| 
 | ||||
|                 if count >= 9: | ||||
|                     break | ||||
| 
 | ||||
|         # explicitly teardown the daemon-actor | ||||
|         await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -1,9 +1,7 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| tractor.log.get_console_log("INFO") | ||||
| 
 | ||||
| 
 | ||||
| async def main(service_name): | ||||
| 
 | ||||
|     async with tractor.open_nursery() as an: | ||||
|  | @ -19,4 +17,4 @@ async def main(service_name): | |||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main, 'some_actor_name') | ||||
|     tractor.run(main, 'some_actor_name') | ||||
|  |  | |||
|  | @ -1 +0,0 @@ | |||
| !.gitignore | ||||
|  | @ -1,16 +0,0 @@ | |||
| Strictly support Python 3.10+, start runtime machinery reorg | ||||
| 
 | ||||
| Since we want to push forward using the new `match:` syntax for our | ||||
| internal RPC-msg loops, we officially drop 3.9 support for the next | ||||
| release which should coincide well with the first release of 3.11. | ||||
| 
 | ||||
| This patch set also officially removes the ``tractor.run()`` API (which | ||||
| has been deprecated for some time) as well as starts an initial re-org | ||||
| of the internal runtime core by: | ||||
| - renaming ``tractor._actor`` -> ``._runtime`` | ||||
| - moving the ``._runtime.ActorActor._process_messages()`` and | ||||
|   ``._async_main()`` to be module level singleton-task-functions since | ||||
|   they are only started once for each connection and actor spawn | ||||
|   respectively; this internal API thus looks more similar to (at the | ||||
|   time of writing) the ``trio``-internals in ``trio._core._run``. | ||||
| - officially remove ``tractor.run()``, now deprecated for some time. | ||||
|  | @ -1,4 +0,0 @@ | |||
| Only set `._debug.Lock.local_pdb_complete` if has been created. | ||||
| 
 | ||||
| This can be triggered by a very rare race condition (and thus we have no | ||||
| working test yet) but it is known to exist in (a) consumer project(s). | ||||
|  | @ -1,25 +0,0 @@ | |||
| Add support for ``trio >= 0.22`` and support for the new Python 3.11 | ||||
| ``[Base]ExceptionGroup`` from `pep 654`_ via the backported | ||||
| `exceptiongroup`_ package and some final fixes to the debug mode | ||||
| subsystem. | ||||
| 
 | ||||
| This port ended up driving some (hopefully) final fixes to our debugger | ||||
| subsystem including the solution to all lingering stdstreams locking | ||||
| race-conditions and deadlock scenarios. This includes extending the | ||||
| debugger tests suite as well as cancellation and ``asyncio`` mode cases. | ||||
| Some of the notable details: | ||||
| 
 | ||||
| - always reverting to the ``trio`` SIGINT handler when leaving debug | ||||
|   mode. | ||||
| - bypassing child attempts to acquire the debug lock when detected | ||||
|   to be amdist actor-runtime-cancellation. | ||||
| - allowing the root actor to cancel local but IPC-stale subactor | ||||
|   requests-tasks for the debug lock when in a "no IPC peers" state. | ||||
| 
 | ||||
| Further we refined our ``ActorNursery`` semantics to be more similar to | ||||
| ``trio`` in the sense that parent task errors are always packed into the | ||||
| actor-nursery emitted exception group and adjusted all tests and | ||||
| examples accordingly. | ||||
| 
 | ||||
| .. _pep 654: https://peps.python.org/pep-0654/#handling-exception-groups | ||||
| .. _exceptiongroup: https://github.com/python-trio/exceptiongroup | ||||
|  | @ -1,5 +0,0 @@ | |||
| Establish an explicit "backend spawning" method table; use it from CI | ||||
| 
 | ||||
| More clearly lays out the current set of (3) backends: ``['trio', | ||||
| 'mp_spawn', 'mp_forkserver']`` and adjusts the ``._spawn.py`` internals | ||||
| as well as the test suite to accommodate. | ||||
|  | @ -1,4 +0,0 @@ | |||
| Add ``key: Callable[..., Hashable]`` support to ``.trionics.maybe_open_context()`` | ||||
| 
 | ||||
| Gives users finer grained control over cache hit behaviour using | ||||
| a callable which receives the input ``kwargs: dict``. | ||||
|  | @ -1,41 +0,0 @@ | |||
| Add support for debug-lock blocking using a ``._debug.Lock._blocked: | ||||
| set[tuple]`` and add ids when no-more IPC connections with the | ||||
| root actor are detected. | ||||
| 
 | ||||
| This is an enhancement which (mostly) solves a lingering debugger | ||||
| locking race case we needed to handle: | ||||
| 
 | ||||
| - child crashes acquires TTY lock in root and attaches to ``pdb`` | ||||
| - child IPC goes down such that all channels to the root are broken | ||||
|   / non-functional. | ||||
| - root is stuck thinking the child is still in debug even though it | ||||
|   can't be contacted and the child actor machinery hasn't been | ||||
|   cancelled by its parent. | ||||
| - root get's stuck in deadlock with child since it won't send a cancel | ||||
|   request until the child is finished debugging (to avoid clobbering | ||||
|   a child that is actually using the debugger), but the child can't | ||||
|   unlock the debugger bc IPC is down and it can't contact the root. | ||||
| 
 | ||||
| To avoid this scenario add debug lock blocking list via | ||||
| `._debug.Lock._blocked: set[tuple]` which holds actor uids for any actor | ||||
| that is detected by the root as having no transport channel connections | ||||
| (of which at least one should exist if this sub-actor at some point | ||||
| acquired the debug lock). The root consequently checks this list for any | ||||
| actor that tries to (re)acquire the lock and blocks with | ||||
| a ``ContextCancelled``. Further, when a debug condition is tested in | ||||
| ``._runtime._invoke``, the context's ``._enter_debugger_on_cancel`` is | ||||
| set to `False` if the actor was put on the block list then all | ||||
| post-mortem / crash handling will be bypassed for that task. | ||||
| 
 | ||||
| In theory this approach to block list management may cause problems | ||||
| where some nested child actor acquires and releases the lock multiple | ||||
| times and it gets stuck on the block list after the first use? If this | ||||
| turns out to be an issue we can try changing the strat so blocks are | ||||
| only added when the root has zero IPC peers left? | ||||
| 
 | ||||
| Further, this adds a root-locking-task side cancel scope, | ||||
| ``Lock._root_local_task_cs_in_debug``, which can be ``.cancel()``-ed by the root | ||||
| runtime when a stale lock is detected during the IPC channel testing. | ||||
| However, right now we're NOT using this since it seems to cause test | ||||
| failures likely due to causing pre-mature cancellation and maybe needs | ||||
| a bit more experimenting? | ||||
|  | @ -1,19 +0,0 @@ | |||
| Rework our ``.trionics.BroadcastReceiver`` internals to avoid method | ||||
| recursion and approach a design and interface closer to ``trio``'s | ||||
| ``MemoryReceiveChannel``. | ||||
| 
 | ||||
| The details of the internal changes include: | ||||
| 
 | ||||
| - implementing a ``BroadcastReceiver.receive_nowait()`` and using it | ||||
|   within the async ``.receive()`` thus avoiding recursion from | ||||
|   ``.receive()``. | ||||
| - failing over to an internal ``._receive_from_underlying()`` when the | ||||
|   ``_nowait()`` call raises ``trio.WouldBlock`` | ||||
| - adding ``BroadcastState.statistics()`` for debugging and testing both | ||||
|   internals and by users. | ||||
| - add an internal ``BroadcastReceiver._raise_on_lag: bool`` which can be | ||||
|   set to avoid ``Lagged`` raising for possible use cases where a user | ||||
|   wants to choose between a [cheap or nasty | ||||
|   pattern](https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern) | ||||
|   the the particular stream (we use this in ``piker``'s dark clearing | ||||
|   engine to avoid fast feeds breaking during HFT periods). | ||||
|  | @ -1,11 +0,0 @@ | |||
| Always ``list``-cast the ``mngrs`` input to | ||||
| ``.trionics.gather_contexts()`` and ensure its size otherwise raise | ||||
| a ``ValueError``. | ||||
| 
 | ||||
| Turns out that trying to pass an inline-style generator comprehension | ||||
| doesn't seem to work inside the ``async with`` expression? Further, in | ||||
| such a case we can get a hang waiting on the all-entered event | ||||
| completion when the internal mngrs iteration is a noop. Instead we | ||||
| always greedily check a size and error on empty input; the lazy | ||||
| iteration of a generator input is not beneficial anyway since we're | ||||
| entering all manager instances in concurrent tasks. | ||||
|  | @ -1,15 +0,0 @@ | |||
| Fixes to ensure IPC (channel) breakage doesn't result in hung actor | ||||
| trees; the zombie reaping and general supervision machinery will always | ||||
| clean up and terminate. | ||||
| 
 | ||||
| This includes not only the (mostly minor) fixes to solve these cases but | ||||
| also a new extensive test suite in `test_advanced_faults.py` with an | ||||
| accompanying highly configurable example module-script in | ||||
| `examples/advanced_faults/ipc_failure_during_stream.py`. Tests ensure we | ||||
| never get hang or zombies despite operating in debug mode and attempt to | ||||
| simulate all possible IPC transport failure cases for a local-host actor | ||||
| tree. | ||||
| 
 | ||||
| Further we simplify `Context.open_stream.__aexit__()` to just call | ||||
| `MsgStream.aclose()` directly more or less avoiding a pure duplicate | ||||
| code path. | ||||
|  | @ -1,10 +0,0 @@ | |||
| Always redraw the `pdbpp` prompt on `SIGINT` during REPL use. | ||||
| 
 | ||||
| There was recent changes todo with Python 3.10 that required us to pin | ||||
| to a specific commit in `pdbpp` which have recently been fixed minus | ||||
| this last issue with `SIGINT` shielding: not clobbering or not | ||||
| showing the `(Pdb++)` prompt on ctlr-c by the user. This repairs all | ||||
| that by firstly removing the standard KBI intercepting of the std lib's | ||||
| `pdb.Pdb._cmdloop()` as well as ensuring that only the actor with REPL | ||||
| control ever reports `SIGINT` handler log msgs and prompt redraws. With | ||||
| this we move back to using pypi `pdbpp` release. | ||||
|  | @ -1,7 +0,0 @@ | |||
| Drop `trio.Process.aclose()` usage, copy into our spawning code. | ||||
| 
 | ||||
| The details are laid out in https://github.com/goodboy/tractor/issues/330. | ||||
| `trio` changed is process running quite some time ago, this just copies | ||||
| out the small bit we needed (from the old `.aclose()`) for hard kills | ||||
| where a soft runtime cancel request fails and our "zombie killer" | ||||
| implementation kicks in. | ||||
|  | @ -1,15 +0,0 @@ | |||
| Switch to using the fork & fix of `pdb++`, `pdbp`: | ||||
| https://github.com/mdmintz/pdbp | ||||
| 
 | ||||
| Allows us to sidestep a variety of issues that aren't being maintained | ||||
| in the upstream project thanks to the hard work of @mdmintz! | ||||
| 
 | ||||
| We also include some default settings adjustments as per recent | ||||
| development on the fork: | ||||
| 
 | ||||
| - sticky mode is still turned on by default but now activates when | ||||
|   a using the `ll` repl command. | ||||
| - turn off line truncation by default to avoid inter-line gaps when | ||||
|   resizing the terimnal during use. | ||||
| - when using the backtrace cmd either by `w` or `bt`, the config | ||||
|   automatically switches to non-sticky mode. | ||||
|  | @ -1,8 +0,0 @@ | |||
| See both the `towncrier docs`_ and the `pluggy release readme`_ for hot | ||||
| tips. We basically have the most minimal setup and release process right | ||||
| now and use the default `fragment set`_. | ||||
| 
 | ||||
| 
 | ||||
| .. _towncrier docs: https://github.com/twisted/towncrier#quick-start | ||||
| .. _pluggy release readme: https://github.com/pytest-dev/pluggy/blob/main/changelog/README.rst | ||||
| .. _fragment set: https://github.com/twisted/towncrier#news-fragments | ||||
|  | @ -1,37 +0,0 @@ | |||
| {% for section in sections %} | ||||
| {% set underline = "-" %} | ||||
| {% if section %} | ||||
| {{section}} | ||||
| {{ underline * section|length }}{% set underline = "~" %} | ||||
| 
 | ||||
| {% endif %} | ||||
| {% if sections[section] %} | ||||
| {% for category, val in definitions.items() if category in sections[section] %} | ||||
| 
 | ||||
| {{ definitions[category]['name'] }} | ||||
| {{ underline * definitions[category]['name']|length }} | ||||
| 
 | ||||
| {% if definitions[category]['showcontent'] %} | ||||
| {% for text, values in sections[section][category]|dictsort(by='value') %} | ||||
| {% set issue_joiner = joiner(', ') %} | ||||
| - {% for value in values|sort %}{{ issue_joiner() }}`{{ value }} <https://github.com/goodboy/tractor/issues/{{ value[1:] }}>`_{% endfor %}: {{ text }} | ||||
| 
 | ||||
| {% endfor %} | ||||
| {% else %} | ||||
| - {{ sections[section][category]['']|sort|join(', ') }} | ||||
| 
 | ||||
| 
 | ||||
| {% endif %} | ||||
| {% if sections[section][category]|length == 0 %} | ||||
| 
 | ||||
| No significant changes. | ||||
| 
 | ||||
| {% else %} | ||||
| {% endif %} | ||||
| {% endfor %} | ||||
| {% else %} | ||||
| 
 | ||||
| No significant changes. | ||||
| 
 | ||||
| {% endif %} | ||||
| {% endfor %} | ||||
|  | @ -1,28 +0,0 @@ | |||
| [tool.towncrier] | ||||
| package = "tractor" | ||||
| filename = "NEWS.rst" | ||||
| directory = "nooz/" | ||||
| version = "0.1.0a6" | ||||
| title_format = "tractor {version} ({project_date})" | ||||
| template = "nooz/_template.rst" | ||||
| all_bullets = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "feature" | ||||
|   name = "Features" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "bugfix" | ||||
|   name = "Bug Fixes" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "doc" | ||||
|   name = "Improved Documentation" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "trivial" | ||||
|   name = "Trivial/Internal Changes" | ||||
|   showcontent = true | ||||
|  | @ -1,2 +1,2 @@ | |||
| sphinx | ||||
| sphinx_book_theme | ||||
| sphinx_typlog_theme | ||||
|  | @ -1,8 +1,6 @@ | |||
| pytest | ||||
| pytest-trio | ||||
| pytest-timeout | ||||
| pdbp | ||||
| pdbpp | ||||
| mypy | ||||
| trio_typing | ||||
| pexpect | ||||
| towncrier | ||||
|  |  | |||
							
								
								
									
										73
									
								
								setup.py
								
								
								
								
							
							
						
						
									
										73
									
								
								setup.py
								
								
								
								
							|  | @ -1,22 +1,21 @@ | |||
| #!/usr/bin/env python | ||||
| # | ||||
| # tractor: structured concurrent "actors". | ||||
| # tractor: a trionic actor model built on `multiprocessing` and `trio` | ||||
| # | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| # Copyright (C) 2018-2020  Tyler Goodlet | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # it under the terms of the GNU General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| # GNU General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU General Public License | ||||
| # along with this program.  If not, see <http://www.gnu.org/licenses/>. | ||||
| from setuptools import setup | ||||
| 
 | ||||
| with open('docs/README.rst', encoding='utf-8') as f: | ||||
|  | @ -25,62 +24,36 @@ with open('docs/README.rst', encoding='utf-8') as f: | |||
| 
 | ||||
| setup( | ||||
|     name="tractor", | ||||
|     version='0.1.0a6dev0',  # alpha zone | ||||
|     description='structured concurrrent `trio`-"actors"', | ||||
|     version='0.1.0a0',  # first ever alpha | ||||
|     description='structured concurrrent "actors"', | ||||
|     long_description=readme, | ||||
|     license='AGPLv3', | ||||
|     license='GPLv3', | ||||
|     author='Tyler Goodlet', | ||||
|     maintainer='Tyler Goodlet', | ||||
|     maintainer_email='goodboy_foss@protonmail.com', | ||||
|     maintainer_email='jgbt@protonmail.com', | ||||
|     url='https://github.com/goodboy/tractor', | ||||
|     platforms=['linux', 'windows'], | ||||
|     packages=[ | ||||
|         'tractor', | ||||
|         'tractor.experimental', | ||||
|         'tractor.trionics', | ||||
|         'tractor.testing', | ||||
|     ], | ||||
|     install_requires=[ | ||||
| 
 | ||||
|         # trio related | ||||
|         # proper range spec: | ||||
|         # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5 | ||||
|         'trio >= 0.22', | ||||
|         'trio>0.8', | ||||
|         'msgpack', | ||||
|         'async_generator', | ||||
|         'trio_typing', | ||||
|         'exceptiongroup', | ||||
| 
 | ||||
|         # tooling | ||||
|         'tricycle', | ||||
|         'trio_typing', | ||||
|         'colorlog', | ||||
|         'wrapt', | ||||
| 
 | ||||
|         # IPC serialization | ||||
|         'msgspec', | ||||
| 
 | ||||
|         # debug mode REPL | ||||
|         'pdbp', | ||||
| 
 | ||||
|         # pip ref docs on these specs: | ||||
|         # https://pip.pypa.io/en/stable/reference/requirement-specifiers/#examples | ||||
|         # and pep: | ||||
|         # https://peps.python.org/pep-0440/#version-specifiers | ||||
| 
 | ||||
|         # windows deps workaround for ``pdbpp`` | ||||
|         # https://github.com/pdbpp/pdbpp/issues/498 | ||||
|         # https://github.com/pdbpp/fancycompleter/issues/37 | ||||
|         'pyreadline3 ; platform_system == "Windows"', | ||||
| 
 | ||||
|         'trio_typing', | ||||
|         'pdbpp', | ||||
|     ], | ||||
|     tests_require=['pytest'], | ||||
|     python_requires=">=3.10", | ||||
|     python_requires=">=3.7", | ||||
|     keywords=[ | ||||
|         'trio', | ||||
|         'async', | ||||
|         'concurrency', | ||||
|         'structured concurrency', | ||||
|         'actor model', | ||||
|         'distributed', | ||||
|         "async", | ||||
|         "concurrency", | ||||
|         "actor model", | ||||
|         "distributed", | ||||
|         'multiprocessing' | ||||
|     ], | ||||
|     classifiers=[ | ||||
|  | @ -88,10 +61,12 @@ setup( | |||
|         "Operating System :: POSIX :: Linux", | ||||
|         "Operating System :: Microsoft :: Windows", | ||||
|         "Framework :: Trio", | ||||
|         "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", | ||||
|         "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", | ||||
|         "Programming Language :: Python :: Implementation :: CPython", | ||||
|         "Programming Language :: Python :: 3 :: Only", | ||||
|         "Programming Language :: Python :: 3.10", | ||||
|         "Programming Language :: Python :: 3.7", | ||||
|         "Programming Language :: Python :: 3.8", | ||||
|         "Programming Language :: Python :: 3.9", | ||||
|         "Intended Audience :: Science/Research", | ||||
|         "Intended Audience :: Developers", | ||||
|         "Topic :: System :: Distributed Computing", | ||||
|  |  | |||
|  | @ -7,91 +7,16 @@ import os | |||
| import random | ||||
| import signal | ||||
| import platform | ||||
| import pathlib | ||||
| import time | ||||
| import inspect | ||||
| from functools import partial, wraps | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| # export for tests | ||||
| from tractor.testing import tractor_test  # noqa | ||||
| 
 | ||||
| 
 | ||||
| pytest_plugins = ['pytester'] | ||||
| 
 | ||||
| 
 | ||||
| def tractor_test(fn): | ||||
|     """ | ||||
|     Use: | ||||
| 
 | ||||
|     @tractor_test | ||||
|     async def test_whatever(): | ||||
|         await ... | ||||
| 
 | ||||
|     If fixtures: | ||||
| 
 | ||||
|         - ``arb_addr`` (a socket addr tuple where arbiter is listening) | ||||
|         - ``loglevel`` (logging level passed to tractor internals) | ||||
|         - ``start_method`` (subprocess spawning backend) | ||||
| 
 | ||||
|     are defined in the `pytest` fixture space they will be automatically | ||||
|     injected to tests declaring these funcargs. | ||||
|     """ | ||||
|     @wraps(fn) | ||||
|     def wrapper( | ||||
|         *args, | ||||
|         loglevel=None, | ||||
|         arb_addr=None, | ||||
|         start_method=None, | ||||
|         **kwargs | ||||
|     ): | ||||
|         # __tracebackhide__ = True | ||||
| 
 | ||||
|         if 'arb_addr' in inspect.signature(fn).parameters: | ||||
|             # injects test suite fixture value to test as well | ||||
|             # as `run()` | ||||
|             kwargs['arb_addr'] = arb_addr | ||||
| 
 | ||||
|         if 'loglevel' in inspect.signature(fn).parameters: | ||||
|             # allows test suites to define a 'loglevel' fixture | ||||
|             # that activates the internal logging | ||||
|             kwargs['loglevel'] = loglevel | ||||
| 
 | ||||
|         if start_method is None: | ||||
|             if platform.system() == "Windows": | ||||
|                 start_method = 'trio' | ||||
| 
 | ||||
|         if 'start_method' in inspect.signature(fn).parameters: | ||||
|             # set of subprocess spawning backends | ||||
|             kwargs['start_method'] = start_method | ||||
| 
 | ||||
|         if kwargs: | ||||
| 
 | ||||
|             # use explicit root actor start | ||||
| 
 | ||||
|             async def _main(): | ||||
|                 async with tractor.open_root_actor( | ||||
|                     # **kwargs, | ||||
|                     arbiter_addr=arb_addr, | ||||
|                     loglevel=loglevel, | ||||
|                     start_method=start_method, | ||||
| 
 | ||||
|                     # TODO: only enable when pytest is passed --pdb | ||||
|                     # debug_mode=True, | ||||
| 
 | ||||
|                 ): | ||||
|                     await fn(*args, **kwargs) | ||||
| 
 | ||||
|             main = _main | ||||
| 
 | ||||
|         else: | ||||
|             # use implicit root actor start | ||||
|             main = partial(fn, *args, **kwargs) | ||||
| 
 | ||||
|         return trio.run(main) | ||||
| 
 | ||||
|     return wrapper | ||||
| 
 | ||||
| 
 | ||||
| _arb_addr = '127.0.0.1', random.randint(1000, 9999) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -114,27 +39,20 @@ no_windows = pytest.mark.skipif( | |||
| ) | ||||
| 
 | ||||
| 
 | ||||
| def repodir() -> pathlib.Path: | ||||
|     ''' | ||||
|     Return the abspath to the repo directory. | ||||
| 
 | ||||
|     ''' | ||||
|     # 2 parents up to step up through tests/<repo_dir> | ||||
|     return pathlib.Path(__file__).parent.parent.absolute() | ||||
| 
 | ||||
| 
 | ||||
| def examples_dir() -> pathlib.Path: | ||||
|     ''' | ||||
|     Return the abspath to the examples directory as `pathlib.Path`. | ||||
| 
 | ||||
|     ''' | ||||
|     return repodir() / 'examples' | ||||
| def repodir(): | ||||
|     """Return the abspath to the repo directory. | ||||
|     """ | ||||
|     dirname = os.path.dirname | ||||
|     dirpath = os.path.abspath( | ||||
|         dirname(dirname(os.path.realpath(__file__))) | ||||
|         ) | ||||
|     return dirpath | ||||
| 
 | ||||
| 
 | ||||
| def pytest_addoption(parser): | ||||
|     parser.addoption( | ||||
|         "--ll", action="store", dest='loglevel', | ||||
|         default='ERROR', help="logging level to set when testing" | ||||
|         default=None, help="logging level to set when testing" | ||||
|     ) | ||||
| 
 | ||||
|     parser.addoption( | ||||
|  | @ -146,6 +64,10 @@ def pytest_addoption(parser): | |||
| 
 | ||||
| def pytest_configure(config): | ||||
|     backend = config.option.spawn_backend | ||||
| 
 | ||||
|     if backend == 'mp': | ||||
|         tractor._spawn.try_set_start_method('spawn') | ||||
|     elif backend == 'trio': | ||||
|         tractor._spawn.try_set_start_method(backend) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -153,24 +75,20 @@ def pytest_configure(config): | |||
| def loglevel(request): | ||||
|     orig = tractor.log._default_loglevel | ||||
|     level = tractor.log._default_loglevel = request.config.option.loglevel | ||||
|     tractor.log.get_console_log(level) | ||||
|     yield level | ||||
|     tractor.log._default_loglevel = orig | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session') | ||||
| def spawn_backend(request) -> str: | ||||
| def spawn_backend(request): | ||||
|     return request.config.option.spawn_backend | ||||
| 
 | ||||
| 
 | ||||
| _ci_env: bool = os.environ.get('CI', False) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session') | ||||
| def ci_env() -> bool: | ||||
|     """Detect CI envoirment. | ||||
|     """ | ||||
|     return _ci_env | ||||
|     return os.environ.get('TRAVIS', False) or os.environ.get('CI', False) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session') | ||||
|  | @ -180,24 +98,24 @@ def arb_addr(): | |||
| 
 | ||||
| def pytest_generate_tests(metafunc): | ||||
|     spawn_backend = metafunc.config.option.spawn_backend | ||||
| 
 | ||||
|     if not spawn_backend: | ||||
|         # XXX some weird windows bug with `pytest`? | ||||
|         spawn_backend = 'trio' | ||||
|         spawn_backend = 'mp' | ||||
|     assert spawn_backend in ('mp', 'trio') | ||||
| 
 | ||||
|     # TODO: maybe just use the literal `._spawn.SpawnMethodKey`? | ||||
|     assert spawn_backend in ( | ||||
|         'mp_spawn', | ||||
|         'mp_forkserver', | ||||
|         'trio', | ||||
|     ) | ||||
| 
 | ||||
|     # NOTE: used to be used to dyanmically parametrize tests for when | ||||
|     # you just passed --spawn-backend=`mp` on the cli, but now we expect | ||||
|     # that cli input to be manually specified, BUT, maybe we'll do | ||||
|     # something like this again in the future? | ||||
|     if 'start_method' in metafunc.fixturenames: | ||||
|         metafunc.parametrize("start_method", [spawn_backend], scope='module') | ||||
|         if spawn_backend == 'mp': | ||||
|             from multiprocessing import get_all_start_methods | ||||
|             methods = get_all_start_methods() | ||||
|             if 'fork' in methods: | ||||
|                 # fork not available on windows, so check before | ||||
|                 # removing XXX: the fork method is in general | ||||
|                 # incompatible with trio's global scheduler state | ||||
|                 methods.remove('fork') | ||||
|         elif spawn_backend == 'trio': | ||||
|             methods = ['trio'] | ||||
| 
 | ||||
|         metafunc.parametrize("start_method", methods, scope='module') | ||||
| 
 | ||||
| 
 | ||||
| def sig_prog(proc, sig): | ||||
|  | @ -213,22 +131,16 @@ def sig_prog(proc, sig): | |||
| 
 | ||||
| 
 | ||||
| @pytest.fixture | ||||
| def daemon( | ||||
|     loglevel: str, | ||||
|     testdir, | ||||
|     arb_addr: tuple[str, int], | ||||
| ): | ||||
|     ''' | ||||
|     Run a daemon actor as a "remote arbiter". | ||||
| 
 | ||||
|     ''' | ||||
| def daemon(loglevel, testdir, arb_addr): | ||||
|     """Run a daemon actor as a "remote arbiter". | ||||
|     """ | ||||
|     if loglevel in ('trace', 'debug'): | ||||
|         # too much logging will lock up the subproc (smh) | ||||
|         loglevel = 'info' | ||||
| 
 | ||||
|     cmdargs = [ | ||||
|         sys.executable, '-c', | ||||
|         "import tractor; tractor.run_daemon([], registry_addr={}, loglevel={})" | ||||
|         "import tractor; tractor.run_daemon([], arbiter_addr={}, loglevel={})" | ||||
|         .format( | ||||
|             arb_addr, | ||||
|             "'{}'".format(loglevel) if loglevel else None) | ||||
|  |  | |||
|  | @ -1,129 +0,0 @@ | |||
| """ | ||||
| Bidirectional streaming. | ||||
| 
 | ||||
| """ | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def simple_rpc( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
|     data: int, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Test a small ping-pong server. | ||||
| 
 | ||||
|     ''' | ||||
|     # signal to parent that we're up | ||||
|     await ctx.started(data + 1) | ||||
| 
 | ||||
|     print('opening stream in callee') | ||||
|     async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|         count = 0 | ||||
|         while True: | ||||
|             try: | ||||
|                 await stream.receive() == 'ping' | ||||
|             except trio.EndOfChannel: | ||||
|                 assert count == 10 | ||||
|                 break | ||||
|             else: | ||||
|                 print('pong') | ||||
|                 await stream.send('pong') | ||||
|                 count += 1 | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def simple_rpc_with_forloop( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
|     data: int, | ||||
| 
 | ||||
| ) -> None: | ||||
|     """Same as previous test but using ``async for`` syntax/api. | ||||
| 
 | ||||
|     """ | ||||
| 
 | ||||
|     # signal to parent that we're up | ||||
|     await ctx.started(data + 1) | ||||
| 
 | ||||
|     print('opening stream in callee') | ||||
|     async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|         count = 0 | ||||
|         async for msg in stream: | ||||
| 
 | ||||
|             assert msg == 'ping' | ||||
|             print('pong') | ||||
|             await stream.send('pong') | ||||
|             count += 1 | ||||
| 
 | ||||
|         else: | ||||
|             assert count == 10 | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'use_async_for', | ||||
|     [True, False], | ||||
| ) | ||||
| @pytest.mark.parametrize( | ||||
|     'server_func', | ||||
|     [simple_rpc, simple_rpc_with_forloop], | ||||
| ) | ||||
| def test_simple_rpc(server_func, use_async_for): | ||||
|     ''' | ||||
|     The simplest request response pattern. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|             portal = await n.start_actor( | ||||
|                 'rpc_server', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             async with portal.open_context( | ||||
|                 server_func,  # taken from pytest parameterization | ||||
|                 data=10, | ||||
|             ) as (ctx, sent): | ||||
| 
 | ||||
|                 assert sent == 11 | ||||
| 
 | ||||
|                 async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                     if use_async_for: | ||||
| 
 | ||||
|                         count = 0 | ||||
|                         # receive msgs using async for style | ||||
|                         print('ping') | ||||
|                         await stream.send('ping') | ||||
| 
 | ||||
|                         async for msg in stream: | ||||
|                             assert msg == 'pong' | ||||
|                             print('ping') | ||||
|                             await stream.send('ping') | ||||
|                             count += 1 | ||||
| 
 | ||||
|                             if count >= 9: | ||||
|                                 break | ||||
| 
 | ||||
|                     else: | ||||
|                         # classic send/receive style | ||||
|                         for _ in range(10): | ||||
| 
 | ||||
|                             print('ping') | ||||
|                             await stream.send('ping') | ||||
|                             assert await stream.receive() == 'pong' | ||||
| 
 | ||||
|                 # stream should terminate here | ||||
| 
 | ||||
|             # final context result(s) should be consumed here in __aexit__() | ||||
| 
 | ||||
|             await portal.cancel_actor() | ||||
| 
 | ||||
|     trio.run(main) | ||||
|  | @ -1,193 +0,0 @@ | |||
| ''' | ||||
| Sketchy network blackoutz, ugly byzantine gens, puedes eschuchar la | ||||
| cancelacion?.. | ||||
| 
 | ||||
| ''' | ||||
| from functools import partial | ||||
| 
 | ||||
| import pytest | ||||
| from _pytest.pathlib import import_path | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| from conftest import ( | ||||
|     examples_dir, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'debug_mode', | ||||
|     [False, True], | ||||
|     ids=['no_debug_mode', 'debug_mode'], | ||||
| ) | ||||
| @pytest.mark.parametrize( | ||||
|     'ipc_break', | ||||
|     [ | ||||
|         # no breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': False, | ||||
|             'break_child_ipc_after': False, | ||||
|         }, | ||||
| 
 | ||||
|         # only parent breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': 500, | ||||
|             'break_child_ipc_after': False, | ||||
|         }, | ||||
| 
 | ||||
|         # only child breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': False, | ||||
|             'break_child_ipc_after': 500, | ||||
|         }, | ||||
| 
 | ||||
|         # both: break parent first | ||||
|         { | ||||
|             'break_parent_ipc_after': 500, | ||||
|             'break_child_ipc_after': 800, | ||||
|         }, | ||||
|         # both: break child first | ||||
|         { | ||||
|             'break_parent_ipc_after': 800, | ||||
|             'break_child_ipc_after': 500, | ||||
|         }, | ||||
| 
 | ||||
|     ], | ||||
|     ids=[ | ||||
|         'no_break', | ||||
|         'break_parent', | ||||
|         'break_child', | ||||
|         'break_both_parent_first', | ||||
|         'break_both_child_first', | ||||
|     ], | ||||
| ) | ||||
| def test_ipc_channel_break_during_stream( | ||||
|     debug_mode: bool, | ||||
|     spawn_backend: str, | ||||
|     ipc_break: dict | None, | ||||
| ): | ||||
|     ''' | ||||
|     Ensure we can have an IPC channel break its connection during | ||||
|     streaming and it's still possible for the (simulated) user to kill | ||||
|     the actor tree using SIGINT. | ||||
| 
 | ||||
|     We also verify the type of connection error expected in the parent | ||||
|     depending on which side if the IPC breaks first. | ||||
| 
 | ||||
|     ''' | ||||
|     if spawn_backend != 'trio': | ||||
|         if debug_mode: | ||||
|             pytest.skip('`debug_mode` only supported on `trio` spawner') | ||||
| 
 | ||||
|         # non-`trio` spawners should never hit the hang condition that | ||||
|         # requires the user to do ctl-c to cancel the actor tree. | ||||
|         expect_final_exc = trio.ClosedResourceError | ||||
| 
 | ||||
|     mod = import_path( | ||||
|         examples_dir() / 'advanced_faults' / 'ipc_failure_during_stream.py', | ||||
|         root=examples_dir(), | ||||
|     ) | ||||
| 
 | ||||
|     expect_final_exc = KeyboardInterrupt | ||||
| 
 | ||||
|     # when ONLY the child breaks we expect the parent to get a closed | ||||
|     # resource error on the next `MsgStream.receive()` and then fail out | ||||
|     # and cancel the child from there. | ||||
|     if ( | ||||
| 
 | ||||
|         # only child breaks | ||||
|         ( | ||||
|             ipc_break['break_child_ipc_after'] | ||||
|             and ipc_break['break_parent_ipc_after'] is False | ||||
|         ) | ||||
| 
 | ||||
|         # both break but, parent breaks first | ||||
|         or ( | ||||
|             ipc_break['break_child_ipc_after'] is not False | ||||
|             and ( | ||||
|                 ipc_break['break_parent_ipc_after'] | ||||
|                 > ipc_break['break_child_ipc_after'] | ||||
|             ) | ||||
|         ) | ||||
| 
 | ||||
|     ): | ||||
|         expect_final_exc = trio.ClosedResourceError | ||||
| 
 | ||||
|     # when the parent IPC side dies (even if the child's does as well | ||||
|     # but the child fails BEFORE the parent) we expect the channel to be | ||||
|     # sent a stop msg from the child at some point which will signal the | ||||
|     # parent that the stream has been terminated. | ||||
|     # NOTE: when the parent breaks "after" the child you get this same | ||||
|     # case as well, the child breaks the IPC channel with a stop msg | ||||
|     # before any closure takes place. | ||||
|     elif ( | ||||
|         # only parent breaks | ||||
|         ( | ||||
|             ipc_break['break_parent_ipc_after'] | ||||
|             and ipc_break['break_child_ipc_after'] is False | ||||
|         ) | ||||
| 
 | ||||
|         # both break but, child breaks first | ||||
|         or ( | ||||
|             ipc_break['break_parent_ipc_after'] is not False | ||||
|             and ( | ||||
|                 ipc_break['break_child_ipc_after'] | ||||
|                 > ipc_break['break_parent_ipc_after'] | ||||
|             ) | ||||
|         ) | ||||
|     ): | ||||
|         expect_final_exc = trio.EndOfChannel | ||||
| 
 | ||||
|     with pytest.raises(expect_final_exc): | ||||
|         trio.run( | ||||
|             partial( | ||||
|                 mod.main, | ||||
|                 debug_mode=debug_mode, | ||||
|                 start_method=spawn_backend, | ||||
|                 **ipc_break, | ||||
|             ) | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def break_ipc_after_started( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
|     await ctx.started() | ||||
|     async with ctx.open_stream() as stream: | ||||
|         await stream.aclose() | ||||
|         await trio.sleep(0.2) | ||||
|         await ctx.chan.send(None) | ||||
|         print('child broke IPC and terminating') | ||||
| 
 | ||||
| 
 | ||||
| def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages(): | ||||
|     ''' | ||||
|     Verify that is a subactor's IPC goes down just after bringing up a stream | ||||
|     the parent can trigger a SIGINT and the child will be reaped out-of-IPC by | ||||
|     the localhost process supervision machinery: aka "zombie lord". | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.start_actor( | ||||
|                 'ipc_breaker', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             with trio.move_on_after(1): | ||||
|                 async with ( | ||||
|                     portal.open_context( | ||||
|                         break_ipc_after_started | ||||
|                     ) as (ctx, sent), | ||||
|                 ): | ||||
|                     async with ctx.open_stream(): | ||||
|                         await trio.sleep(0.5) | ||||
| 
 | ||||
|                     print('parent waiting on context') | ||||
| 
 | ||||
|             print('parent exited context') | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|  | @ -1,380 +0,0 @@ | |||
| ''' | ||||
| Advanced streaming patterns using bidirectional streams and contexts. | ||||
| 
 | ||||
| ''' | ||||
| from collections import Counter | ||||
| import itertools | ||||
| import platform | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| def is_win(): | ||||
|     return platform.system() == 'Windows' | ||||
| 
 | ||||
| 
 | ||||
| _registry: dict[str, set[tractor.MsgStream]] = { | ||||
|     'even': set(), | ||||
|     'odd': set(), | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| async def publisher( | ||||
| 
 | ||||
|     seed: int = 0, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     global _registry | ||||
| 
 | ||||
|     def is_even(i): | ||||
|         return i % 2 == 0 | ||||
| 
 | ||||
|     for val in itertools.count(seed): | ||||
| 
 | ||||
|         sub = 'even' if is_even(val) else 'odd' | ||||
| 
 | ||||
|         for sub_stream in _registry[sub].copy(): | ||||
|             await sub_stream.send(val) | ||||
| 
 | ||||
|         # throttle send rate to ~1kHz | ||||
|         # making it readable to a human user | ||||
|         await trio.sleep(1/1000) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def subscribe( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     global _registry | ||||
| 
 | ||||
|     # syn caller | ||||
|     await ctx.started(None) | ||||
| 
 | ||||
|     async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|         # update subs list as consumer requests | ||||
|         async for new_subs in stream: | ||||
| 
 | ||||
|             new_subs = set(new_subs) | ||||
|             remove = new_subs - _registry.keys() | ||||
| 
 | ||||
|             print(f'setting sub to {new_subs} for {ctx.chan.uid}') | ||||
| 
 | ||||
|             # remove old subs | ||||
|             for sub in remove: | ||||
|                 _registry[sub].remove(stream) | ||||
| 
 | ||||
|             # add new subs for consumer | ||||
|             for sub in new_subs: | ||||
|                 _registry[sub].add(stream) | ||||
| 
 | ||||
| 
 | ||||
| async def consumer( | ||||
| 
 | ||||
|     subs: list[str], | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     uid = tractor.current_actor().uid | ||||
| 
 | ||||
|     async with tractor.wait_for_actor('publisher') as portal: | ||||
|         async with portal.open_context(subscribe) as (ctx, first): | ||||
|             async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                 # flip between the provided subs dynamically | ||||
|                 if len(subs) > 1: | ||||
| 
 | ||||
|                     for sub in itertools.cycle(subs): | ||||
|                         print(f'setting dynamic sub to {sub}') | ||||
|                         await stream.send([sub]) | ||||
| 
 | ||||
|                         count = 0 | ||||
|                         async for value in stream: | ||||
|                             print(f'{uid} got: {value}') | ||||
|                             if count > 5: | ||||
|                                 break | ||||
|                             count += 1 | ||||
| 
 | ||||
|                 else:  # static sub | ||||
| 
 | ||||
|                     await stream.send(subs) | ||||
|                     async for value in stream: | ||||
|                         print(f'{uid} got: {value}') | ||||
| 
 | ||||
| 
 | ||||
| def test_dynamic_pub_sub(): | ||||
| 
 | ||||
|     global _registry | ||||
| 
 | ||||
|     from multiprocessing import cpu_count | ||||
|     cpus = cpu_count() | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|             # name of this actor will be same as target func | ||||
|             await n.run_in_actor(publisher) | ||||
| 
 | ||||
|             for i, sub in zip( | ||||
|                 range(cpus - 2), | ||||
|                 itertools.cycle(_registry.keys()) | ||||
|             ): | ||||
|                 await n.run_in_actor( | ||||
|                     consumer, | ||||
|                     name=f'consumer_{sub}', | ||||
|                     subs=[sub], | ||||
|                 ) | ||||
| 
 | ||||
|             # make one dynamic subscriber | ||||
|             await n.run_in_actor( | ||||
|                 consumer, | ||||
|                 name='consumer_dynamic', | ||||
|                 subs=list(_registry.keys()), | ||||
|             ) | ||||
| 
 | ||||
|             # block until cancelled by user | ||||
|             with trio.fail_after(3): | ||||
|                 await trio.sleep_forever() | ||||
| 
 | ||||
|     try: | ||||
|         trio.run(main) | ||||
|     except trio.TooSlowError: | ||||
|         pass | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def one_task_streams_and_one_handles_reqresp( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     await ctx.started() | ||||
| 
 | ||||
|     async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|         async def pingpong(): | ||||
|             '''Run a simple req/response service. | ||||
| 
 | ||||
|             ''' | ||||
|             async for msg in stream: | ||||
|                 print('rpc server ping') | ||||
|                 assert msg == 'ping' | ||||
|                 print('rpc server pong') | ||||
|                 await stream.send('pong') | ||||
| 
 | ||||
|         async with trio.open_nursery() as n: | ||||
|             n.start_soon(pingpong) | ||||
| 
 | ||||
|             for _ in itertools.count(): | ||||
|                 await stream.send('yo') | ||||
|                 await trio.sleep(0.01) | ||||
| 
 | ||||
| 
 | ||||
| def test_reqresp_ontopof_streaming(): | ||||
|     ''' | ||||
|     Test a subactor that both streams with one task and | ||||
|     spawns another which handles a small requests-response | ||||
|     dialogue over the same bidir-stream. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
| 
 | ||||
|         # flat to make sure we get at least one pong | ||||
|         got_pong: bool = False | ||||
|         timeout: int = 2 | ||||
| 
 | ||||
|         if is_win():  # smh | ||||
|             timeout = 4 | ||||
| 
 | ||||
|         with trio.move_on_after(timeout): | ||||
|             async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|                 # name of this actor will be same as target func | ||||
|                 portal = await n.start_actor( | ||||
|                     'dual_tasks', | ||||
|                     enable_modules=[__name__] | ||||
|                 ) | ||||
| 
 | ||||
|                 async with portal.open_context( | ||||
|                     one_task_streams_and_one_handles_reqresp, | ||||
| 
 | ||||
|                 ) as (ctx, first): | ||||
| 
 | ||||
|                     assert first is None | ||||
| 
 | ||||
|                     async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                         await stream.send('ping') | ||||
| 
 | ||||
|                         async for msg in stream: | ||||
|                             print(f'client received: {msg}') | ||||
| 
 | ||||
|                             assert msg in {'pong', 'yo'} | ||||
| 
 | ||||
|                             if msg == 'pong': | ||||
|                                 got_pong = True | ||||
|                                 await stream.send('ping') | ||||
|                                 print('client sent ping') | ||||
| 
 | ||||
|         assert got_pong | ||||
| 
 | ||||
|     try: | ||||
|         trio.run(main) | ||||
|     except trio.TooSlowError: | ||||
|         pass | ||||
| 
 | ||||
| 
 | ||||
| async def async_gen_stream(sequence): | ||||
|     for i in sequence: | ||||
|         yield i | ||||
|         await trio.sleep(0.1) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def echo_ctx_stream( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
|     await ctx.started() | ||||
| 
 | ||||
|     async with ctx.open_stream() as stream: | ||||
|         async for msg in stream: | ||||
|             await stream.send(msg) | ||||
| 
 | ||||
| 
 | ||||
| def test_sigint_both_stream_types(): | ||||
|     '''Verify that running a bi-directional and recv only stream | ||||
|     side-by-side will cancel correctly from SIGINT. | ||||
| 
 | ||||
|     ''' | ||||
|     timeout: float = 2 | ||||
|     if is_win():  # smh | ||||
|         timeout += 1 | ||||
| 
 | ||||
|     async def main(): | ||||
|         with trio.fail_after(timeout): | ||||
|             async with tractor.open_nursery() as n: | ||||
|                 # name of this actor will be same as target func | ||||
|                 portal = await n.start_actor( | ||||
|                     '2_way', | ||||
|                     enable_modules=[__name__] | ||||
|                 ) | ||||
| 
 | ||||
|                 async with portal.open_context(echo_ctx_stream) as (ctx, _): | ||||
|                     async with ctx.open_stream() as stream: | ||||
|                         async with portal.open_stream_from( | ||||
|                             async_gen_stream, | ||||
|                             sequence=list(range(1)), | ||||
|                         ) as gen_stream: | ||||
| 
 | ||||
|                             msg = await gen_stream.receive() | ||||
|                             await stream.send(msg) | ||||
|                             resp = await stream.receive() | ||||
|                             assert resp == msg | ||||
|                             raise KeyboardInterrupt | ||||
| 
 | ||||
|     try: | ||||
|         trio.run(main) | ||||
|         assert 0, "Didn't receive KBI!?" | ||||
|     except KeyboardInterrupt: | ||||
|         pass | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def inf_streamer( | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Stream increasing ints until terminated with a 'done' msg. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
| 
 | ||||
|     async with ( | ||||
|         ctx.open_stream() as stream, | ||||
|         trio.open_nursery() as n, | ||||
|     ): | ||||
|         async def bail_on_sentinel(): | ||||
|             async for msg in stream: | ||||
|                 if msg == 'done': | ||||
|                     await stream.aclose() | ||||
|                 else: | ||||
|                     print(f'streamer received {msg}') | ||||
| 
 | ||||
|         # start termination detector | ||||
|         n.start_soon(bail_on_sentinel) | ||||
| 
 | ||||
|         for val in itertools.count(): | ||||
|             try: | ||||
|                 await stream.send(val) | ||||
|             except trio.ClosedResourceError: | ||||
|                 # close out the stream gracefully | ||||
|                 break | ||||
| 
 | ||||
|     print('terminating streamer') | ||||
| 
 | ||||
| 
 | ||||
| def test_local_task_fanout_from_stream(): | ||||
|     ''' | ||||
|     Single stream with multiple local consumer tasks using the | ||||
|     ``MsgStream.subscribe()` api. | ||||
| 
 | ||||
|     Ensure all tasks receive all values after stream completes sending. | ||||
| 
 | ||||
|     ''' | ||||
|     consumers = 22 | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         counts = Counter() | ||||
| 
 | ||||
|         async with tractor.open_nursery() as tn: | ||||
|             p = await tn.start_actor( | ||||
|                 'inf_streamer', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
|             async with ( | ||||
|                 p.open_context(inf_streamer) as (ctx, _), | ||||
|                 ctx.open_stream() as stream, | ||||
|             ): | ||||
| 
 | ||||
|                 async def pull_and_count(name: str): | ||||
|                     # name = trio.lowlevel.current_task().name | ||||
|                     async with stream.subscribe() as recver: | ||||
|                         assert isinstance( | ||||
|                             recver, | ||||
|                             tractor.trionics.BroadcastReceiver | ||||
|                         ) | ||||
|                         async for val in recver: | ||||
|                             # print(f'{name}: {val}') | ||||
|                             counts[name] += 1 | ||||
| 
 | ||||
|                         print(f'{name} bcaster ended') | ||||
| 
 | ||||
|                     print(f'{name} completed') | ||||
| 
 | ||||
|                 with trio.fail_after(3): | ||||
|                     async with trio.open_nursery() as nurse: | ||||
|                         for i in range(consumers): | ||||
|                             nurse.start_soon(pull_and_count, i) | ||||
| 
 | ||||
|                         await trio.sleep(0.5) | ||||
|                         print('\nterminating') | ||||
|                         await stream.send('done') | ||||
| 
 | ||||
|             print('closed stream connection') | ||||
| 
 | ||||
|             assert len(counts) == consumers | ||||
|             mx = max(counts.values()) | ||||
|             # make sure each task received all stream values | ||||
|             assert all(val == mx for val in counts.values()) | ||||
| 
 | ||||
|             await p.cancel_actor() | ||||
| 
 | ||||
|     trio.run(main) | ||||
|  | @ -1,6 +1,5 @@ | |||
| """ | ||||
| Cancellation and error propagation | ||||
| 
 | ||||
| """ | ||||
| import os | ||||
| import signal | ||||
|  | @ -8,10 +7,6 @@ import platform | |||
| import time | ||||
| from itertools import repeat | ||||
| 
 | ||||
| from exceptiongroup import ( | ||||
|     BaseExceptionGroup, | ||||
|     ExceptionGroup, | ||||
| ) | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
|  | @ -19,10 +14,6 @@ import tractor | |||
| from conftest import tractor_test, no_windows | ||||
| 
 | ||||
| 
 | ||||
| def is_win(): | ||||
|     return platform.system() == 'Windows' | ||||
| 
 | ||||
| 
 | ||||
| async def assert_err(delay=0): | ||||
|     await trio.sleep(delay) | ||||
|     assert 0 | ||||
|  | @ -56,57 +47,31 @@ def test_remote_error(arb_addr, args_err): | |||
|     args, errtype = args_err | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|         ) as nursery: | ||||
|         async with tractor.open_nursery() as nursery: | ||||
| 
 | ||||
|             # on a remote type error caused by bad input args | ||||
|             # this should raise directly which means we **don't** get | ||||
|             # an exception group outside the nursery since the error | ||||
|             # here and the far end task error are one in the same? | ||||
|             portal = await nursery.run_in_actor( | ||||
|                 assert_err, name='errorer', **args | ||||
|             ) | ||||
|             portal = await nursery.run_in_actor(assert_err, name='errorer', **args) | ||||
| 
 | ||||
|             # get result(s) from main task | ||||
|             try: | ||||
|                 # this means the root actor will also raise a local | ||||
|                 # parent task error and thus an eg will propagate out | ||||
|                 # of this actor nursery. | ||||
|                 await portal.result() | ||||
|             except tractor.RemoteActorError as err: | ||||
|                 assert err.type == errtype | ||||
|                 print("Look Maa that actor failed hard, hehh") | ||||
|                 raise | ||||
| 
 | ||||
|     # ensure boxed errors | ||||
|     if args: | ||||
|     with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||
|             trio.run(main) | ||||
|         tractor.run(main, arbiter_addr=arb_addr) | ||||
| 
 | ||||
|     # ensure boxed error is correct | ||||
|     assert excinfo.value.type == errtype | ||||
| 
 | ||||
|     else: | ||||
|         # the root task will also error on the `.result()` call | ||||
|         # so we expect an error from there AND the child. | ||||
|         with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|             trio.run(main) | ||||
| 
 | ||||
|         # ensure boxed errors | ||||
|         for exc in excinfo.value.exceptions: | ||||
|             assert exc.type == errtype | ||||
| 
 | ||||
| 
 | ||||
| def test_multierror(arb_addr): | ||||
|     ''' | ||||
|     Verify we raise a ``BaseExceptionGroup`` out of a nursery where | ||||
|     """Verify we raise a ``trio.MultiError`` out of a nursery where | ||||
|     more then one actor errors. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|         ) as nursery: | ||||
|         async with tractor.open_nursery() as nursery: | ||||
| 
 | ||||
|             await nursery.run_in_actor(assert_err, name='errorer1') | ||||
|             portal2 = await nursery.run_in_actor(assert_err, name='errorer2') | ||||
|  | @ -119,11 +84,11 @@ def test_multierror(arb_addr): | |||
|                 print("Look Maa that first actor failed hard, hehh") | ||||
|                 raise | ||||
| 
 | ||||
|         # here we should get a ``BaseExceptionGroup`` containing exceptions | ||||
|         # here we should get a `trio.MultiError` containing exceptions | ||||
|         # from both subactors | ||||
| 
 | ||||
|     with pytest.raises(BaseExceptionGroup): | ||||
|         trio.run(main) | ||||
|     with pytest.raises(trio.MultiError): | ||||
|         tractor.run(main, arbiter_addr=arb_addr) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize('delay', (0, 0.5)) | ||||
|  | @ -131,15 +96,12 @@ def test_multierror(arb_addr): | |||
|     'num_subactors', range(25, 26), | ||||
| ) | ||||
| def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | ||||
|     """Verify we raise a ``BaseExceptionGroup`` out of a nursery where | ||||
|     """Verify we raise a ``trio.MultiError`` out of a nursery where | ||||
|     more then one actor errors and also with a delay before failure | ||||
|     to test failure during an ongoing spawning. | ||||
|     """ | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|         ) as nursery: | ||||
| 
 | ||||
|         async with tractor.open_nursery() as nursery: | ||||
|             for i in range(num_subactors): | ||||
|                 await nursery.run_in_actor( | ||||
|                     assert_err, | ||||
|  | @ -147,30 +109,18 @@ def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | |||
|                     delay=delay | ||||
|                 ) | ||||
| 
 | ||||
|     # with pytest.raises(trio.MultiError) as exc_info: | ||||
|     with pytest.raises(BaseExceptionGroup) as exc_info: | ||||
|         trio.run(main) | ||||
|     with pytest.raises(trio.MultiError) as exc_info: | ||||
|         tractor.run(main, arbiter_addr=arb_addr) | ||||
| 
 | ||||
|     assert exc_info.type == ExceptionGroup | ||||
|     assert exc_info.type == tractor.MultiError | ||||
|     err = exc_info.value | ||||
|     exceptions = err.exceptions | ||||
| 
 | ||||
|     if len(exceptions) == 2: | ||||
|         # sometimes oddly now there's an embedded BrokenResourceError ? | ||||
|         for exc in exceptions: | ||||
|             excs = getattr(exc, 'exceptions', None) | ||||
|             if excs: | ||||
|                 exceptions = excs | ||||
|                 break | ||||
| 
 | ||||
|     assert len(exceptions) == num_subactors | ||||
| 
 | ||||
|     for exc in exceptions: | ||||
|     assert len(err.exceptions) == num_subactors | ||||
|     for exc in err.exceptions: | ||||
|         assert isinstance(exc, tractor.RemoteActorError) | ||||
|         assert exc.type == AssertionError | ||||
| 
 | ||||
| 
 | ||||
| async def do_nothing(): | ||||
| def do_nothing(): | ||||
|     pass | ||||
| 
 | ||||
| 
 | ||||
|  | @ -182,12 +132,10 @@ def test_cancel_single_subactor(arb_addr, mechanism): | |||
|     async def spawn_actor(): | ||||
|         """Spawn an actor that blocks indefinitely. | ||||
|         """ | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|         ) as nursery: | ||||
|         async with tractor.open_nursery() as nursery: | ||||
| 
 | ||||
|             portal = await nursery.start_actor( | ||||
|                 'nothin', enable_modules=[__name__], | ||||
|                 'nothin', rpc_module_paths=[__name__], | ||||
|             ) | ||||
|             assert (await portal.run(do_nothing)) is None | ||||
| 
 | ||||
|  | @ -198,10 +146,10 @@ def test_cancel_single_subactor(arb_addr, mechanism): | |||
|                 raise mechanism | ||||
| 
 | ||||
|     if mechanism == 'nursery_cancel': | ||||
|         trio.run(spawn_actor) | ||||
|         tractor.run(spawn_actor, arbiter_addr=arb_addr) | ||||
|     else: | ||||
|         with pytest.raises(mechanism): | ||||
|             trio.run(spawn_actor) | ||||
|             tractor.run(spawn_actor, arbiter_addr=arb_addr) | ||||
| 
 | ||||
| 
 | ||||
| async def stream_forever(): | ||||
|  | @ -220,13 +168,12 @@ async def test_cancel_infinite_streamer(start_method): | |||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.start_actor( | ||||
|                 'donny', | ||||
|                 enable_modules=[__name__], | ||||
|                 rpc_module_paths=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             # this async for loop streams values from the above | ||||
|             # async generator running in a separate process | ||||
|             async with portal.open_stream_from(stream_forever) as stream: | ||||
|                 async for letter in stream: | ||||
|             async for letter in await portal.run(stream_forever): | ||||
|                 print(letter) | ||||
| 
 | ||||
|     # we support trio's cancellation system | ||||
|  | @ -239,8 +186,8 @@ async def test_cancel_infinite_streamer(start_method): | |||
|     [ | ||||
|         # daemon actors sit idle while single task actors error out | ||||
|         (1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None), | ||||
|         (2, BaseExceptionGroup, AssertionError, (assert_err, {}), None), | ||||
|         (3, BaseExceptionGroup, AssertionError, (assert_err, {}), None), | ||||
|         (2, tractor.MultiError, AssertionError, (assert_err, {}), None), | ||||
|         (3, tractor.MultiError, AssertionError, (assert_err, {}), None), | ||||
| 
 | ||||
|         # 1 daemon actor errors out while single task actors sleep forever | ||||
|         (3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}), | ||||
|  | @ -251,7 +198,7 @@ async def test_cancel_infinite_streamer(start_method): | |||
|          (do_nuthin, {}), (assert_err, {'delay': 1}, True)), | ||||
|         # daemon complete quickly delay while single task | ||||
|         # actors error after brief delay | ||||
|         (3, BaseExceptionGroup, AssertionError, | ||||
|         (3, tractor.MultiError, AssertionError, | ||||
|          (assert_err, {'delay': 1}), (do_nuthin, {}, False)), | ||||
|     ], | ||||
|     ids=[ | ||||
|  | @ -279,7 +226,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): | |||
|             for i in range(num_actors): | ||||
|                 dactor_portals.append(await n.start_actor( | ||||
|                     f'deamon_{i}', | ||||
|                     enable_modules=[__name__], | ||||
|                     rpc_module_paths=[__name__], | ||||
|                 )) | ||||
| 
 | ||||
|             func, kwargs = ria_func | ||||
|  | @ -318,7 +265,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): | |||
|         # should error here with a ``RemoteActorError`` or ``MultiError`` | ||||
| 
 | ||||
|     except first_err as err: | ||||
|         if isinstance(err, BaseExceptionGroup): | ||||
|         if isinstance(err, tractor.MultiError): | ||||
|             assert len(err.exceptions) == num_actors | ||||
|             for exc in err.exceptions: | ||||
|                 if isinstance(exc, tractor.RemoteActorError): | ||||
|  | @ -361,12 +308,10 @@ async def spawn_and_error(breadth, depth) -> None: | |||
| 
 | ||||
| @tractor_test | ||||
| async def test_nested_multierrors(loglevel, start_method): | ||||
|     ''' | ||||
|     Test that failed actor sets are wrapped in `BaseExceptionGroup`s. This | ||||
|     test goes only 2 nurseries deep but we should eventually have tests | ||||
|     """Test that failed actor sets are wrapped in `trio.MultiError`s. | ||||
|     This test goes only 2 nurseries deep but we should eventually have tests | ||||
|     for arbitrary n-depth actor trees. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     if start_method == 'trio': | ||||
|         depth = 3 | ||||
|         subactor_breadth = 2 | ||||
|  | @ -390,36 +335,24 @@ async def test_nested_multierrors(loglevel, start_method): | |||
|                         breadth=subactor_breadth, | ||||
|                         depth=depth, | ||||
|                     ) | ||||
|         except BaseExceptionGroup as err: | ||||
|         except trio.MultiError as err: | ||||
|             assert len(err.exceptions) == subactor_breadth | ||||
|             for subexc in err.exceptions: | ||||
| 
 | ||||
|                 # verify first level actor errors are wrapped as remote | ||||
|                 if is_win(): | ||||
|                 if platform.system() == 'Windows': | ||||
| 
 | ||||
|                     # windows is often too slow and cancellation seems | ||||
|                     # to happen before an actor is spawned | ||||
|                     if isinstance(subexc, trio.Cancelled): | ||||
|                         continue | ||||
| 
 | ||||
|                     elif isinstance(subexc, tractor.RemoteActorError): | ||||
|                     else: | ||||
|                         # on windows it seems we can't exactly be sure wtf | ||||
|                         # will happen.. | ||||
|                         assert subexc.type in ( | ||||
|                             tractor.RemoteActorError, | ||||
|                             trio.Cancelled, | ||||
|                             BaseExceptionGroup, | ||||
|                         ) | ||||
| 
 | ||||
|                     elif isinstance(subexc, BaseExceptionGroup): | ||||
|                         for subsub in subexc.exceptions: | ||||
| 
 | ||||
|                             if subsub in (tractor.RemoteActorError,): | ||||
|                                 subsub = subsub.type | ||||
| 
 | ||||
|                             assert type(subsub) in ( | ||||
|                                 trio.Cancelled, | ||||
|                                 BaseExceptionGroup, | ||||
|                             trio.MultiError | ||||
|                         ) | ||||
|                 else: | ||||
|                     assert isinstance(subexc, tractor.RemoteActorError) | ||||
|  | @ -428,21 +361,14 @@ async def test_nested_multierrors(loglevel, start_method): | |||
|                     # XXX not sure what's up with this.. | ||||
|                     # on windows sometimes spawning is just too slow and | ||||
|                     # we get back the (sent) cancel signal instead | ||||
|                     if is_win(): | ||||
|                         if isinstance(subexc, tractor.RemoteActorError): | ||||
|                             assert subexc.type in ( | ||||
|                                 BaseExceptionGroup, | ||||
|                                 tractor.RemoteActorError | ||||
|                             ) | ||||
|                     if platform.system() == 'Windows': | ||||
|                         assert (subexc.type is trio.MultiError) or ( | ||||
|                             subexc.type is tractor.RemoteActorError) | ||||
|                     else: | ||||
|                             assert isinstance(subexc, BaseExceptionGroup) | ||||
|                         assert subexc.type is trio.MultiError | ||||
|                 else: | ||||
|                         assert subexc.type is ExceptionGroup | ||||
|                 else: | ||||
|                     assert subexc.type in ( | ||||
|                         tractor.RemoteActorError, | ||||
|                         trio.Cancelled | ||||
|                     ) | ||||
|                     assert (subexc.type is tractor.RemoteActorError) or ( | ||||
|                         subexc.type is trio.Cancelled) | ||||
| 
 | ||||
| 
 | ||||
| @no_windows | ||||
|  | @ -460,13 +386,13 @@ def test_cancel_via_SIGINT( | |||
|         with trio.fail_after(2): | ||||
|             async with tractor.open_nursery() as tn: | ||||
|                 await tn.start_actor('sucka') | ||||
|                 if 'mp' in spawn_backend: | ||||
|                 if spawn_backend == 'mp': | ||||
|                     time.sleep(0.1) | ||||
|                 os.kill(pid, signal.SIGINT) | ||||
|                 await trio.sleep_forever() | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|         tractor.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @no_windows | ||||
|  | @ -480,9 +406,6 @@ def test_cancel_via_SIGINT_other_task( | |||
|     from a seperate ``trio`` child  task. | ||||
|     """ | ||||
|     pid = os.getpid() | ||||
|     timeout: float = 2 | ||||
|     if is_win():  # smh | ||||
|         timeout += 1 | ||||
| 
 | ||||
|     async def spawn_and_sleep_forever(task_status=trio.TASK_STATUS_IGNORED): | ||||
|         async with tractor.open_nursery() as tn: | ||||
|  | @ -496,15 +419,16 @@ def test_cancel_via_SIGINT_other_task( | |||
| 
 | ||||
|     async def main(): | ||||
|         # should never timeout since SIGINT should cancel the current program | ||||
|         with trio.fail_after(timeout): | ||||
|         with trio.fail_after(2): | ||||
|             async with trio.open_nursery() as n: | ||||
|                 await n.start(spawn_and_sleep_forever) | ||||
|                 if 'mp' in spawn_backend: | ||||
|                 if spawn_backend == 'mp': | ||||
|                     time.sleep(0.1) | ||||
|                 os.kill(pid, signal.SIGINT) | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|         tractor.run(main) | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| async def spin_for(period=3): | ||||
|  | @ -514,7 +438,7 @@ async def spin_for(period=3): | |||
| 
 | ||||
| async def spawn(): | ||||
|     async with tractor.open_nursery() as tn: | ||||
|         await tn.run_in_actor( | ||||
|         portal = await tn.run_in_actor( | ||||
|             spin_for, | ||||
|             name='sleeper', | ||||
|         ) | ||||
|  | @ -536,7 +460,7 @@ def test_cancel_while_childs_child_in_sync_sleep( | |||
|     async def main(): | ||||
|         with trio.fail_after(2): | ||||
|             async with tractor.open_nursery() as tn: | ||||
|                 await tn.run_in_actor( | ||||
|                 portal = await tn.run_in_actor( | ||||
|                     spawn, | ||||
|                     name='spawn', | ||||
|                 ) | ||||
|  | @ -544,58 +468,4 @@ def test_cancel_while_childs_child_in_sync_sleep( | |||
|                 assert 0 | ||||
| 
 | ||||
|     with pytest.raises(AssertionError): | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_fast_graceful_cancel_when_spawn_task_in_soft_proc_wait_for_daemon( | ||||
|     start_method, | ||||
| ): | ||||
|     ''' | ||||
|     This is a very subtle test which demonstrates how cancellation | ||||
|     during process collection can result in non-optimal teardown | ||||
|     performance on daemon actors. The fix for this test was to handle | ||||
|     ``trio.Cancelled`` specially in the spawn task waiting in | ||||
|     `proc.wait()` such that ``Portal.cancel_actor()`` is called before | ||||
|     executing the "hard reap" sequence (which has an up to 3 second | ||||
|     delay currently). | ||||
| 
 | ||||
|     In other words, if we can cancel the actor using a graceful remote | ||||
|     cancellation, and it's faster, we might as well do it. | ||||
| 
 | ||||
|     ''' | ||||
|     kbi_delay = 0.5 | ||||
|     timeout: float = 2.9 | ||||
| 
 | ||||
|     if is_win():  # smh | ||||
|         timeout += 1 | ||||
| 
 | ||||
|     async def main(): | ||||
|         start = time.time() | ||||
|         try: | ||||
|             async with trio.open_nursery() as nurse: | ||||
|                 async with tractor.open_nursery() as tn: | ||||
|                     p = await tn.start_actor( | ||||
|                         'fast_boi', | ||||
|                         enable_modules=[__name__], | ||||
|                     ) | ||||
| 
 | ||||
|                     async def delayed_kbi(): | ||||
|                         await trio.sleep(kbi_delay) | ||||
|                         print(f'RAISING KBI after {kbi_delay} s') | ||||
|                         raise KeyboardInterrupt | ||||
| 
 | ||||
|                     # start task which raises a kbi **after** | ||||
|                     # the actor nursery ``__aexit__()`` has | ||||
|                     # been run. | ||||
|                     nurse.start_soon(delayed_kbi) | ||||
| 
 | ||||
|                     await p.run(do_nuthin) | ||||
|         finally: | ||||
|             duration = time.time() - start | ||||
|             if duration > timeout: | ||||
|                 raise trio.TooSlowError( | ||||
|                     'daemon cancel was slower then necessary..' | ||||
|                 ) | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|         tractor.run(main) | ||||
|  |  | |||
|  | @ -1,173 +0,0 @@ | |||
| ''' | ||||
| Test a service style daemon that maintains a nursery for spawning | ||||
| "remote async tasks" including both spawning other long living | ||||
| sub-sub-actor daemons. | ||||
| 
 | ||||
| ''' | ||||
| from typing import Optional | ||||
| import asyncio | ||||
| from contextlib import asynccontextmanager as acm | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| import tractor | ||||
| from tractor import RemoteActorError | ||||
| from async_generator import aclosing | ||||
| 
 | ||||
| 
 | ||||
| async def aio_streamer( | ||||
|     from_trio: asyncio.Queue, | ||||
|     to_trio: trio.abc.SendChannel, | ||||
| ) -> trio.abc.ReceiveChannel: | ||||
| 
 | ||||
|     # required first msg to sync caller | ||||
|     to_trio.send_nowait(None) | ||||
| 
 | ||||
|     from itertools import cycle | ||||
|     for i in cycle(range(10)): | ||||
|         to_trio.send_nowait(i) | ||||
|         await asyncio.sleep(0.01) | ||||
| 
 | ||||
| 
 | ||||
| async def trio_streamer(): | ||||
|     from itertools import cycle | ||||
|     for i in cycle(range(10)): | ||||
|         yield i | ||||
|         await trio.sleep(0.01) | ||||
| 
 | ||||
| 
 | ||||
| async def trio_sleep_and_err(delay: float = 0.5): | ||||
|     await trio.sleep(delay) | ||||
|     # name error | ||||
|     doggy()  # noqa | ||||
| 
 | ||||
| 
 | ||||
| _cached_stream: Optional[ | ||||
|     trio.abc.ReceiveChannel | ||||
| ] = None | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def wrapper_mngr( | ||||
| ): | ||||
|     from tractor.trionics import broadcast_receiver | ||||
|     global _cached_stream | ||||
|     in_aio = tractor.current_actor().is_infected_aio() | ||||
| 
 | ||||
|     if in_aio: | ||||
|         if _cached_stream: | ||||
| 
 | ||||
|             from_aio = _cached_stream | ||||
| 
 | ||||
|             # if we already have a cached feed deliver a rx side clone | ||||
|             # to consumer | ||||
|             async with broadcast_receiver(from_aio, 6) as from_aio: | ||||
|                 yield from_aio | ||||
|                 return | ||||
|         else: | ||||
|             async with tractor.to_asyncio.open_channel_from( | ||||
|                 aio_streamer, | ||||
|             ) as (first, from_aio): | ||||
|                 assert not first | ||||
| 
 | ||||
|                 # cache it so next task uses broadcast receiver | ||||
|                 _cached_stream = from_aio | ||||
| 
 | ||||
|                 yield from_aio | ||||
|     else: | ||||
|         async with aclosing(trio_streamer()) as stream: | ||||
|             # cache it so next task uses broadcast receiver | ||||
|             _cached_stream = stream | ||||
|             yield stream | ||||
| 
 | ||||
| 
 | ||||
| _nursery: trio.Nursery = None | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def trio_main( | ||||
|     ctx: tractor.Context, | ||||
| ): | ||||
|     # sync | ||||
|     await ctx.started() | ||||
| 
 | ||||
|     # stash a "service nursery" as "actor local" (aka a Python global) | ||||
|     global _nursery | ||||
|     n = _nursery | ||||
|     assert n | ||||
| 
 | ||||
|     async def consume_stream(): | ||||
|         async with wrapper_mngr() as stream: | ||||
|             async for msg in stream: | ||||
|                 print(msg) | ||||
| 
 | ||||
|     # run 2 tasks to ensure broadcaster chan use | ||||
|     n.start_soon(consume_stream) | ||||
|     n.start_soon(consume_stream) | ||||
| 
 | ||||
|     n.start_soon(trio_sleep_and_err) | ||||
| 
 | ||||
|     await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def open_actor_local_nursery( | ||||
|     ctx: tractor.Context, | ||||
| ): | ||||
|     global _nursery | ||||
|     async with trio.open_nursery() as n: | ||||
|         _nursery = n | ||||
|         await ctx.started() | ||||
|         await trio.sleep(10) | ||||
|         # await trio.sleep(1) | ||||
| 
 | ||||
|         # XXX: this causes the hang since | ||||
|         # the caller does not unblock from its own | ||||
|         # ``trio.sleep_forever()``. | ||||
| 
 | ||||
|         # TODO: we need to test a simple ctx task starting remote tasks | ||||
|         # that error and then blocking on a ``Nursery.start()`` which | ||||
|         # never yields back.. aka a scenario where the | ||||
|         # ``tractor.context`` task IS NOT in the service n's cancel | ||||
|         # scope. | ||||
|         n.cancel_scope.cancel() | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'asyncio_mode', | ||||
|     [True, False], | ||||
|     ids='asyncio_mode={}'.format, | ||||
| ) | ||||
| def test_actor_managed_trio_nursery_task_error_cancels_aio( | ||||
|     asyncio_mode: bool, | ||||
|     arb_addr | ||||
| ): | ||||
|     ''' | ||||
|     Verify that a ``trio`` nursery created managed in a child actor | ||||
|     correctly relays errors to the parent actor when one of its spawned | ||||
|     tasks errors even when running in infected asyncio mode and using | ||||
|     broadcast receivers for multi-task-per-actor subscription. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
| 
 | ||||
|         # cancel the nursery shortly after boot | ||||
|         async with tractor.open_nursery() as n: | ||||
|             p = await n.start_actor( | ||||
|                 'nursery_mngr', | ||||
|                 infect_asyncio=asyncio_mode, | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
|             async with ( | ||||
|                 p.open_context(open_actor_local_nursery) as (ctx, first), | ||||
|                 p.open_context(trio_main) as (ctx, first), | ||||
|             ): | ||||
|                 await trio.sleep_forever() | ||||
| 
 | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     # verify boxed error | ||||
|     err = excinfo.value | ||||
|     assert isinstance(err.type(), NameError) | ||||
|  | @ -1,84 +0,0 @@ | |||
| import itertools | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from tractor import open_actor_cluster | ||||
| from tractor.trionics import gather_contexts | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| MESSAGE = 'tractoring at full speed' | ||||
| 
 | ||||
| 
 | ||||
| def test_empty_mngrs_input_raises() -> None: | ||||
| 
 | ||||
|     async def main(): | ||||
|         with trio.fail_after(1): | ||||
|             async with ( | ||||
|                 open_actor_cluster( | ||||
|                     modules=[__name__], | ||||
| 
 | ||||
|                     # NOTE: ensure we can passthrough runtime opts | ||||
|                     loglevel='info', | ||||
|                     # debug_mode=True, | ||||
| 
 | ||||
|                 ) as portals, | ||||
| 
 | ||||
|                 gather_contexts( | ||||
|                     # NOTE: it's the use of inline-generator syntax | ||||
|                     # here that causes the empty input. | ||||
|                     mngrs=( | ||||
|                         p.open_context(worker) for p in portals.values() | ||||
|                     ), | ||||
|                 ), | ||||
|             ): | ||||
|                 assert 0 | ||||
| 
 | ||||
|     with pytest.raises(ValueError): | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def worker( | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     await ctx.started() | ||||
| 
 | ||||
|     async with ctx.open_stream( | ||||
|         backpressure=True, | ||||
|     ) as stream: | ||||
| 
 | ||||
|         # TODO: this with the below assert causes a hang bug? | ||||
|         # with trio.move_on_after(1): | ||||
| 
 | ||||
|         async for msg in stream: | ||||
|             # do something with msg | ||||
|             print(msg) | ||||
|             assert msg == MESSAGE | ||||
| 
 | ||||
|         # TODO: does this ever cause a hang | ||||
|         # assert 0 | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_streaming_to_actor_cluster() -> None: | ||||
| 
 | ||||
|     async with ( | ||||
|         open_actor_cluster(modules=[__name__]) as portals, | ||||
| 
 | ||||
|         gather_contexts( | ||||
|             mngrs=[p.open_context(worker) for p in portals.values()], | ||||
|         ) as contexts, | ||||
| 
 | ||||
|         gather_contexts( | ||||
|             mngrs=[ctx[0].open_stream() for ctx in contexts], | ||||
|         ) as streams, | ||||
| 
 | ||||
|     ): | ||||
|         with trio.move_on_after(1): | ||||
|             for stream in itertools.cycle(streams): | ||||
|                 await stream.send(MESSAGE) | ||||
|  | @ -1,798 +0,0 @@ | |||
| ''' | ||||
| ``async with ():`` inlined context-stream cancellation testing. | ||||
| 
 | ||||
| Verify the we raise errors when streams are opened prior to sync-opening | ||||
| a ``tractor.Context`` beforehand. | ||||
| 
 | ||||
| ''' | ||||
| from contextlib import asynccontextmanager as acm | ||||
| from itertools import count | ||||
| import platform | ||||
| from typing import Optional | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from tractor._exceptions import StreamOverrun | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| # ``Context`` semantics are as follows, | ||||
| #  ------------------------------------ | ||||
| 
 | ||||
| # - standard setup/teardown: | ||||
| #   ``Portal.open_context()`` starts a new | ||||
| #   remote task context in another actor. The target actor's task must | ||||
| #   call ``Context.started()`` to unblock this entry on the caller side. | ||||
| #   the callee task executes until complete and returns a final value | ||||
| #   which is delivered to the caller side and retreived via | ||||
| #   ``Context.result()``. | ||||
| 
 | ||||
| # - cancel termination: | ||||
| #   context can be cancelled on either side where either end's task can | ||||
| #   call ``Context.cancel()`` which raises a local ``trio.Cancelled`` | ||||
| #   and sends a task cancel request to the remote task which in turn | ||||
| #   raises a ``trio.Cancelled`` in that scope, catches it, and re-raises | ||||
| #   as ``ContextCancelled``. This is then caught by | ||||
| #   ``Portal.open_context()``'s exit and we get a graceful termination | ||||
| #   of the linked tasks. | ||||
| 
 | ||||
| # - error termination: | ||||
| #   error is caught after all context-cancel-scope tasks are cancelled | ||||
| #   via regular ``trio`` cancel scope semantics, error is sent to other | ||||
| #   side and unpacked as a `RemoteActorError`. | ||||
| 
 | ||||
| 
 | ||||
| # ``Context.open_stream() as stream: MsgStream:`` msg semantics are: | ||||
| #  ----------------------------------------------------------------- | ||||
| 
 | ||||
| # - either side can ``.send()`` which emits a 'yield' msgs and delivers | ||||
| #   a value to the a ``MsgStream.receive()`` call. | ||||
| 
 | ||||
| # - stream closure: one end relays a 'stop' message which terminates an | ||||
| #   ongoing ``MsgStream`` iteration. | ||||
| 
 | ||||
| # - cancel/error termination: as per the context semantics above but | ||||
| #   with implicit stream closure on the cancelling end. | ||||
| 
 | ||||
| 
 | ||||
| _state: bool = False | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def too_many_starteds( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Call ``Context.started()`` more then once (an error). | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     try: | ||||
|         await ctx.started() | ||||
|     except RuntimeError: | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def not_started_but_stream_opened( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Enter ``Context.open_stream()`` without calling ``.started()``. | ||||
| 
 | ||||
|     ''' | ||||
|     try: | ||||
|         async with ctx.open_stream(): | ||||
|             assert 0 | ||||
|     except RuntimeError: | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'target', | ||||
|     [too_many_starteds, not_started_but_stream_opened], | ||||
|     ids='misuse_type={}'.format, | ||||
| ) | ||||
| def test_started_misuse(target): | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.start_actor( | ||||
|                 target.__name__, | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             async with portal.open_context(target) as (ctx, sent): | ||||
|                 await trio.sleep(1) | ||||
| 
 | ||||
|     with pytest.raises(tractor.RemoteActorError): | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def simple_setup_teardown( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
|     data: int, | ||||
|     block_forever: bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     # startup phase | ||||
|     global _state | ||||
|     _state = True | ||||
| 
 | ||||
|     # signal to parent that we're up | ||||
|     await ctx.started(data + 1) | ||||
| 
 | ||||
|     try: | ||||
|         if block_forever: | ||||
|             # block until cancelled | ||||
|             await trio.sleep_forever() | ||||
|         else: | ||||
|             return 'yo' | ||||
|     finally: | ||||
|         _state = False | ||||
| 
 | ||||
| 
 | ||||
| async def assert_state(value: bool): | ||||
|     global _state | ||||
|     assert _state == value | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'error_parent', | ||||
|     [False, ValueError, KeyboardInterrupt], | ||||
| ) | ||||
| @pytest.mark.parametrize( | ||||
|     'callee_blocks_forever', | ||||
|     [False, True], | ||||
|     ids=lambda item: f'callee_blocks_forever={item}' | ||||
| ) | ||||
| @pytest.mark.parametrize( | ||||
|     'pointlessly_open_stream', | ||||
|     [False, True], | ||||
|     ids=lambda item: f'open_stream={item}' | ||||
| ) | ||||
| def test_simple_context( | ||||
|     error_parent, | ||||
|     callee_blocks_forever, | ||||
|     pointlessly_open_stream, | ||||
| ): | ||||
| 
 | ||||
|     timeout = 1.5 if not platform.system() == 'Windows' else 4 | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         with trio.fail_after(timeout): | ||||
|             async with tractor.open_nursery() as nursery: | ||||
| 
 | ||||
|                 portal = await nursery.start_actor( | ||||
|                     'simple_context', | ||||
|                     enable_modules=[__name__], | ||||
|                 ) | ||||
| 
 | ||||
|                 try: | ||||
|                     async with portal.open_context( | ||||
|                         simple_setup_teardown, | ||||
|                         data=10, | ||||
|                         block_forever=callee_blocks_forever, | ||||
|                     ) as (ctx, sent): | ||||
| 
 | ||||
|                         assert sent == 11 | ||||
| 
 | ||||
|                         if callee_blocks_forever: | ||||
|                             await portal.run(assert_state, value=True) | ||||
|                         else: | ||||
|                             assert await ctx.result() == 'yo' | ||||
| 
 | ||||
|                         if not error_parent: | ||||
|                             await ctx.cancel() | ||||
| 
 | ||||
|                         if pointlessly_open_stream: | ||||
|                             async with ctx.open_stream(): | ||||
|                                 if error_parent: | ||||
|                                     raise error_parent | ||||
| 
 | ||||
|                                 if callee_blocks_forever: | ||||
|                                     await ctx.cancel() | ||||
|                                 else: | ||||
|                                     # in this case the stream will send a | ||||
|                                     # 'stop' msg to the far end which needs | ||||
|                                     # to be ignored | ||||
|                                     pass | ||||
|                         else: | ||||
|                             if error_parent: | ||||
|                                 raise error_parent | ||||
| 
 | ||||
|                 finally: | ||||
| 
 | ||||
|                     # after cancellation | ||||
|                     if not error_parent: | ||||
|                         await portal.run(assert_state, value=False) | ||||
| 
 | ||||
|                     # shut down daemon | ||||
|                     await portal.cancel_actor() | ||||
| 
 | ||||
|     if error_parent: | ||||
|         try: | ||||
|             trio.run(main) | ||||
|         except error_parent: | ||||
|             pass | ||||
|         except trio.MultiError as me: | ||||
|             # XXX: on windows it seems we may have to expect the group error | ||||
|             from tractor._exceptions import is_multi_cancelled | ||||
|             assert is_multi_cancelled(me) | ||||
|     else: | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| # basic stream terminations: | ||||
| # - callee context closes without using stream | ||||
| # - caller context closes without using stream | ||||
| # - caller context calls `Context.cancel()` while streaming | ||||
| #   is ongoing resulting in callee being cancelled | ||||
| # - callee calls `Context.cancel()` while streaming and caller | ||||
| #   sees stream terminated in `RemoteActorError` | ||||
| 
 | ||||
| # TODO: future possible features | ||||
| # - restart request: far end raises `ContextRestart` | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def close_ctx_immediately( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     await ctx.started() | ||||
|     global _state | ||||
| 
 | ||||
|     async with ctx.open_stream(): | ||||
|         pass | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_callee_closes_ctx_after_stream_open(): | ||||
|     'callee context closes without using stream' | ||||
| 
 | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.start_actor( | ||||
|             'fast_stream_closer', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         with trio.fail_after(2): | ||||
|             async with portal.open_context( | ||||
|                 close_ctx_immediately, | ||||
| 
 | ||||
|                 # flag to avoid waiting the final result | ||||
|                 # cancel_on_exit=True, | ||||
| 
 | ||||
|             ) as (ctx, sent): | ||||
| 
 | ||||
|                 assert sent is None | ||||
| 
 | ||||
|                 with trio.fail_after(0.5): | ||||
|                     async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                         # should fall through since ``StopAsyncIteration`` | ||||
|                         # should be raised through translation of | ||||
|                         # a ``trio.EndOfChannel`` by | ||||
|                         # ``trio.abc.ReceiveChannel.__anext__()`` | ||||
|                         async for _ in stream: | ||||
|                             assert 0 | ||||
|                         else: | ||||
| 
 | ||||
|                             # verify stream is now closed | ||||
|                             try: | ||||
|                                 await stream.receive() | ||||
|                             except trio.EndOfChannel: | ||||
|                                 pass | ||||
| 
 | ||||
|                 # TODO: should be just raise the closed resource err | ||||
|                 # directly here to enforce not allowing a re-open | ||||
|                 # of a stream to the context (at least until a time of | ||||
|                 # if/when we decide that's a good idea?) | ||||
|                 try: | ||||
|                     with trio.fail_after(0.5): | ||||
|                         async with ctx.open_stream() as stream: | ||||
|                             pass | ||||
|                 except trio.ClosedResourceError: | ||||
|                     pass | ||||
| 
 | ||||
|         await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def expect_cancelled( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
|     global _state | ||||
|     _state = True | ||||
| 
 | ||||
|     await ctx.started() | ||||
| 
 | ||||
|     try: | ||||
|         async with ctx.open_stream() as stream: | ||||
|             async for msg in stream: | ||||
|                 await stream.send(msg)  # echo server | ||||
| 
 | ||||
|     except trio.Cancelled: | ||||
|         # expected case | ||||
|         _state = False | ||||
|         raise | ||||
| 
 | ||||
|     else: | ||||
|         assert 0, "Wasn't cancelled!?" | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'use_ctx_cancel_method', | ||||
|     [False, True], | ||||
| ) | ||||
| @tractor_test | ||||
| async def test_caller_closes_ctx_after_callee_opens_stream( | ||||
|     use_ctx_cancel_method: bool, | ||||
| ): | ||||
|     'caller context closes without using stream' | ||||
| 
 | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.start_actor( | ||||
|             'ctx_cancelled', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             expect_cancelled, | ||||
|         ) as (ctx, sent): | ||||
|             await portal.run(assert_state, value=True) | ||||
| 
 | ||||
|             assert sent is None | ||||
| 
 | ||||
|             # call cancel explicitly | ||||
|             if use_ctx_cancel_method: | ||||
| 
 | ||||
|                 await ctx.cancel() | ||||
| 
 | ||||
|                 try: | ||||
|                     async with ctx.open_stream() as stream: | ||||
|                         async for msg in stream: | ||||
|                             pass | ||||
| 
 | ||||
|                 except tractor.ContextCancelled: | ||||
|                     raise  # XXX: must be propagated to __aexit__ | ||||
| 
 | ||||
|                 else: | ||||
|                     assert 0, "Should have context cancelled?" | ||||
| 
 | ||||
|                 # channel should still be up | ||||
|                 assert portal.channel.connected() | ||||
| 
 | ||||
|                 # ctx is closed here | ||||
|                 await portal.run(assert_state, value=False) | ||||
| 
 | ||||
|             else: | ||||
|                 try: | ||||
|                     with trio.fail_after(0.2): | ||||
|                         await ctx.result() | ||||
|                         assert 0, "Callee should have blocked!?" | ||||
|                 except trio.TooSlowError: | ||||
|                     await ctx.cancel() | ||||
|         try: | ||||
|             async with ctx.open_stream() as stream: | ||||
|                 async for msg in stream: | ||||
|                     pass | ||||
|         except tractor.ContextCancelled: | ||||
|             pass | ||||
|         else: | ||||
|             assert 0, "Should have received closed resource error?" | ||||
| 
 | ||||
|         # ctx is closed here | ||||
|         await portal.run(assert_state, value=False) | ||||
| 
 | ||||
|         # channel should not have been destroyed yet, only the | ||||
|         # inter-actor-task context | ||||
|         assert portal.channel.connected() | ||||
| 
 | ||||
|         # teardown the actor | ||||
|         await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_multitask_caller_cancels_from_nonroot_task(): | ||||
| 
 | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.start_actor( | ||||
|             'ctx_cancelled', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             expect_cancelled, | ||||
|         ) as (ctx, sent): | ||||
| 
 | ||||
|             await portal.run(assert_state, value=True) | ||||
|             assert sent is None | ||||
| 
 | ||||
|             async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                 async def send_msg_then_cancel(): | ||||
|                     await stream.send('yo') | ||||
|                     await portal.run(assert_state, value=True) | ||||
|                     await ctx.cancel() | ||||
|                     await portal.run(assert_state, value=False) | ||||
| 
 | ||||
|                 async with trio.open_nursery() as n: | ||||
|                     n.start_soon(send_msg_then_cancel) | ||||
| 
 | ||||
|                     try: | ||||
|                         async for msg in stream: | ||||
|                             assert msg == 'yo' | ||||
| 
 | ||||
|                     except tractor.ContextCancelled: | ||||
|                         raise  # XXX: must be propagated to __aexit__ | ||||
| 
 | ||||
|                 # channel should still be up | ||||
|                 assert portal.channel.connected() | ||||
| 
 | ||||
|                 # ctx is closed here | ||||
|                 await portal.run(assert_state, value=False) | ||||
| 
 | ||||
|         # channel should not have been destroyed yet, only the | ||||
|         # inter-actor-task context | ||||
|         assert portal.channel.connected() | ||||
| 
 | ||||
|         # teardown the actor | ||||
|         await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def cancel_self( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
|     global _state | ||||
|     _state = True | ||||
| 
 | ||||
|     await ctx.cancel() | ||||
| 
 | ||||
|     # should inline raise immediately | ||||
|     try: | ||||
|         async with ctx.open_stream(): | ||||
|             pass | ||||
|     except tractor.ContextCancelled: | ||||
|         # suppress for now so we can do checkpoint tests below | ||||
|         pass | ||||
|     else: | ||||
|         raise RuntimeError('Context didnt cancel itself?!') | ||||
| 
 | ||||
|     # check a real ``trio.Cancelled`` is raised on a checkpoint | ||||
|     try: | ||||
|         with trio.fail_after(0.1): | ||||
|             await trio.sleep_forever() | ||||
|     except trio.Cancelled: | ||||
|         raise | ||||
| 
 | ||||
|     except trio.TooSlowError: | ||||
|         # should never get here | ||||
|         assert 0 | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_callee_cancels_before_started(): | ||||
|     ''' | ||||
|     Callee calls `Context.cancel()` while streaming and caller | ||||
|     sees stream terminated in `ContextCancelled`. | ||||
| 
 | ||||
|     ''' | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.start_actor( | ||||
|             'cancels_self', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
|         try: | ||||
| 
 | ||||
|             async with portal.open_context( | ||||
|                 cancel_self, | ||||
|             ) as (ctx, sent): | ||||
|                 async with ctx.open_stream(): | ||||
| 
 | ||||
|                     await trio.sleep_forever() | ||||
| 
 | ||||
|         # raises a special cancel signal | ||||
|         except tractor.ContextCancelled as ce: | ||||
|             ce.type == trio.Cancelled | ||||
| 
 | ||||
|             # the traceback should be informative | ||||
|             assert 'cancelled itself' in ce.msgdata['tb_str'] | ||||
| 
 | ||||
|         # teardown the actor | ||||
|         await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def never_open_stream( | ||||
| 
 | ||||
|     ctx:  tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Context which never opens a stream and blocks. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def keep_sending_from_callee( | ||||
| 
 | ||||
|     ctx:  tractor.Context, | ||||
|     msg_buffer_size: Optional[int] = None, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Send endlessly on the calleee stream. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     async with ctx.open_stream( | ||||
|         msg_buffer_size=msg_buffer_size, | ||||
|     ) as stream: | ||||
|         for msg in count(): | ||||
|             print(f'callee sending {msg}') | ||||
|             await stream.send(msg) | ||||
|             await trio.sleep(0.01) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'overrun_by', | ||||
|     [ | ||||
|         ('caller', 1, never_open_stream), | ||||
|         ('cancel_caller_during_overrun', 1, never_open_stream), | ||||
|         ('callee', 0, keep_sending_from_callee), | ||||
|     ], | ||||
|     ids='overrun_condition={}'.format, | ||||
| ) | ||||
| def test_one_end_stream_not_opened(overrun_by): | ||||
|     ''' | ||||
|     This should exemplify the bug from: | ||||
|     https://github.com/goodboy/tractor/issues/265 | ||||
| 
 | ||||
|     ''' | ||||
|     overrunner, buf_size_increase, entrypoint = overrun_by | ||||
|     from tractor._runtime import Actor | ||||
|     buf_size = buf_size_increase + Actor.msg_buffer_size | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.start_actor( | ||||
|                 entrypoint.__name__, | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             async with portal.open_context( | ||||
|                 entrypoint, | ||||
|             ) as (ctx, sent): | ||||
|                 assert sent is None | ||||
| 
 | ||||
|                 if 'caller' in overrunner: | ||||
| 
 | ||||
|                     async with ctx.open_stream() as stream: | ||||
|                         for i in range(buf_size): | ||||
|                             print(f'sending {i}') | ||||
|                             await stream.send(i) | ||||
| 
 | ||||
|                         if 'cancel' in overrunner: | ||||
|                             # without this we block waiting on the child side | ||||
|                             await ctx.cancel() | ||||
| 
 | ||||
|                         else: | ||||
|                             # expect overrun error to be relayed back | ||||
|                             # and this sleep interrupted | ||||
|                             await trio.sleep_forever() | ||||
| 
 | ||||
|                 else: | ||||
|                     # callee overruns caller case so we do nothing here | ||||
|                     await trio.sleep_forever() | ||||
| 
 | ||||
|             await portal.cancel_actor() | ||||
| 
 | ||||
|     # 2 overrun cases and the no overrun case (which pushes right up to | ||||
|     # the msg limit) | ||||
|     if overrunner == 'caller' or 'cance' in overrunner: | ||||
|         with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||
|             trio.run(main) | ||||
| 
 | ||||
|         assert excinfo.value.type == StreamOverrun | ||||
| 
 | ||||
|     elif overrunner == 'callee': | ||||
|         with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||
|             trio.run(main) | ||||
| 
 | ||||
|         # TODO: embedded remote errors so that we can verify the source | ||||
|         # error? the callee delivers an error which is an overrun | ||||
|         # wrapped in a remote actor error. | ||||
|         assert excinfo.value.type == tractor.RemoteActorError | ||||
| 
 | ||||
|     else: | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def echo_back_sequence( | ||||
| 
 | ||||
|     ctx:  tractor.Context, | ||||
|     seq: list[int], | ||||
|     msg_buffer_size: Optional[int] = None, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Send endlessly on the calleee stream. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     async with ctx.open_stream( | ||||
|         msg_buffer_size=msg_buffer_size, | ||||
|     ) as stream: | ||||
| 
 | ||||
|         seq = list(seq)  # bleh, `msgpack`... | ||||
|         count = 0 | ||||
|         while count < 3: | ||||
|             batch = [] | ||||
|             async for msg in stream: | ||||
|                 batch.append(msg) | ||||
|                 if batch == seq: | ||||
|                     break | ||||
| 
 | ||||
|             for msg in batch: | ||||
|                 print(f'callee sending {msg}') | ||||
|                 await stream.send(msg) | ||||
| 
 | ||||
|             count += 1 | ||||
| 
 | ||||
|         return 'yo' | ||||
| 
 | ||||
| 
 | ||||
| def test_stream_backpressure(): | ||||
|     ''' | ||||
|     Demonstrate small overruns of each task back and forth | ||||
|     on a stream not raising any errors by default. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.start_actor( | ||||
|                 'callee_sends_forever', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
|             seq = list(range(3)) | ||||
|             async with portal.open_context( | ||||
|                 echo_back_sequence, | ||||
|                 seq=seq, | ||||
|                 msg_buffer_size=1, | ||||
|             ) as (ctx, sent): | ||||
|                 assert sent is None | ||||
| 
 | ||||
|                 async with ctx.open_stream(msg_buffer_size=1) as stream: | ||||
|                     count = 0 | ||||
|                     while count < 3: | ||||
|                         for msg in seq: | ||||
|                             print(f'caller sending {msg}') | ||||
|                             await stream.send(msg) | ||||
|                             await trio.sleep(0.1) | ||||
| 
 | ||||
|                         batch = [] | ||||
|                         async for msg in stream: | ||||
|                             batch.append(msg) | ||||
|                             if batch == seq: | ||||
|                                 break | ||||
| 
 | ||||
|                         count += 1 | ||||
| 
 | ||||
|             # here the context should return | ||||
|             assert await ctx.result() == 'yo' | ||||
| 
 | ||||
|             # cancel the daemon | ||||
|             await portal.cancel_actor() | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def sleep_forever( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
|     await ctx.started() | ||||
|     async with ctx.open_stream(): | ||||
|         await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def attach_to_sleep_forever(): | ||||
|     ''' | ||||
|     Cancel a context **before** any underlying error is raised in order | ||||
|     to trigger a local reception of a ``ContextCancelled`` which **should not** | ||||
|     be re-raised in the local surrounding ``Context`` *iff* the cancel was | ||||
|     requested by **this** side of the context. | ||||
| 
 | ||||
|     ''' | ||||
|     async with tractor.wait_for_actor('sleeper') as p2: | ||||
|         async with ( | ||||
|             p2.open_context(sleep_forever) as (peer_ctx, first), | ||||
|             peer_ctx.open_stream(), | ||||
|         ): | ||||
|             try: | ||||
|                 yield | ||||
|             finally: | ||||
|                 # XXX: previously this would trigger local | ||||
|                 # ``ContextCancelled`` to be received and raised in the | ||||
|                 # local context overriding any local error due to | ||||
|                 # logic inside ``_invoke()`` which checked for | ||||
|                 # an error set on ``Context._error`` and raised it in | ||||
|                 # under a cancellation scenario. | ||||
| 
 | ||||
|                 # The problem is you can have a remote cancellation | ||||
|                 # that is part of a local error and we shouldn't raise | ||||
|                 # ``ContextCancelled`` **iff** we weren't the side of | ||||
|                 # the context to initiate it, i.e. | ||||
|                 # ``Context._cancel_called`` should **NOT** have been | ||||
|                 # set. The special logic to handle this case is now | ||||
|                 # inside ``Context._may_raise_from_remote_msg()`` XD | ||||
|                 await peer_ctx.cancel() | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def error_before_started( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
|     ''' | ||||
|     This simulates exactly an original bug discovered in: | ||||
|     https://github.com/pikers/piker/issues/244 | ||||
| 
 | ||||
|     ''' | ||||
|     async with attach_to_sleep_forever(): | ||||
|         # send an unserializable type which should raise a type error | ||||
|         # here and **NOT BE SWALLOWED** by the surrounding acm!!?! | ||||
|         await ctx.started(object()) | ||||
| 
 | ||||
| 
 | ||||
| def test_do_not_swallow_error_before_started_by_remote_contextcancelled(): | ||||
|     ''' | ||||
|     Verify that an error raised in a remote context which itself opens another | ||||
|     remote context, which it cancels, does not ovverride the original error that | ||||
|     caused the cancellation of the secondardy context. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.start_actor( | ||||
|                 'errorer', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
|             await n.start_actor( | ||||
|                 'sleeper', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             async with ( | ||||
|                 portal.open_context( | ||||
|                     error_before_started | ||||
|                 ) as (ctx, sent), | ||||
|             ): | ||||
|                 await trio.sleep_forever() | ||||
| 
 | ||||
|     with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     assert excinfo.value.type == TypeError | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -20,11 +20,8 @@ async def test_reg_then_unreg(arb_addr): | |||
|     assert actor.is_arbiter | ||||
|     assert len(actor._registry) == 1  # only self is registered | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         arbiter_addr=arb_addr, | ||||
|     ) as n: | ||||
| 
 | ||||
|         portal = await n.start_actor('actor', enable_modules=[__name__]) | ||||
|     async with tractor.open_nursery() as n: | ||||
|         portal = await n.start_actor('actor', rpc_module_paths=[__name__]) | ||||
|         uid = portal.channel.uid | ||||
| 
 | ||||
|         async with tractor.get_arbiter(*arb_addr) as aportal: | ||||
|  | @ -42,7 +39,7 @@ async def test_reg_then_unreg(arb_addr): | |||
| 
 | ||||
|         await trio.sleep(0.1) | ||||
|         assert uid not in aportal.actor._registry | ||||
|         sockaddrs = actor._registry.get(uid) | ||||
|         sockaddrs = actor._registry[uid] | ||||
|         assert not sockaddrs | ||||
| 
 | ||||
| 
 | ||||
|  | @ -69,7 +66,7 @@ async def say_hello_use_wait(other_actor): | |||
| 
 | ||||
| @tractor_test | ||||
| @pytest.mark.parametrize('func', [say_hello, say_hello_use_wait]) | ||||
| async def test_trynamic_trio(func, start_method, arb_addr): | ||||
| async def test_trynamic_trio(func, start_method): | ||||
|     """Main tractor entry point, the "master" process (for now | ||||
|     acts as the "director"). | ||||
|     """ | ||||
|  | @ -111,69 +108,49 @@ async def cancel(use_signal, delay=0): | |||
| 
 | ||||
| 
 | ||||
| async def stream_from(portal): | ||||
|     async with portal.open_stream_from(stream_forever) as stream: | ||||
|         async for value in stream: | ||||
|     async for value in await portal.result(): | ||||
|         print(value) | ||||
| 
 | ||||
| 
 | ||||
| async def unpack_reg(actor_or_portal): | ||||
|     ''' | ||||
|     Get and unpack a "registry" RPC request from the "arbiter" registry | ||||
|     system. | ||||
| 
 | ||||
|     ''' | ||||
|     if getattr(actor_or_portal, 'get_registry', None): | ||||
|         msg = await actor_or_portal.get_registry() | ||||
|     else: | ||||
|         msg = await actor_or_portal.run_from_ns('self', 'get_registry') | ||||
| 
 | ||||
|     return {tuple(key.split('.')): val for key, val in msg.items()} | ||||
| 
 | ||||
| 
 | ||||
| async def spawn_and_check_registry( | ||||
|     arb_addr: tuple, | ||||
|     use_signal: bool, | ||||
|     remote_arbiter: bool = False, | ||||
|     with_streaming: bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     async with tractor.open_root_actor( | ||||
|         arbiter_addr=arb_addr, | ||||
|     ): | ||||
|         async with tractor.get_arbiter(*arb_addr) as portal: | ||||
|             # runtime needs to be up to call this | ||||
|     actor = tractor.current_actor() | ||||
| 
 | ||||
|     if remote_arbiter: | ||||
|         assert not actor.is_arbiter | ||||
| 
 | ||||
|             if actor.is_arbiter: | ||||
|                 extra = 1  # arbiter is local root actor | ||||
|                 get_reg = partial(unpack_reg, actor) | ||||
|     async with tractor.get_arbiter(*arb_addr) as portal: | ||||
| 
 | ||||
|         if actor.is_arbiter: | ||||
| 
 | ||||
|             async def get_reg(): | ||||
|                 return actor._registry | ||||
| 
 | ||||
|             extra = 1  # arbiter is local root actor | ||||
|         else: | ||||
|                 get_reg = partial(unpack_reg, portal) | ||||
|             get_reg = partial(portal.run_from_ns, 'self', 'get_registry') | ||||
|             extra = 2  # local root actor + remote arbiter | ||||
| 
 | ||||
|         # ensure current actor is registered | ||||
|         registry = await get_reg() | ||||
|         assert actor.uid in registry | ||||
| 
 | ||||
|         if with_streaming: | ||||
|             to_run = stream_forever | ||||
|         else: | ||||
|             to_run = trio.sleep_forever | ||||
| 
 | ||||
|         async with trio.open_nursery() as trion: | ||||
|             try: | ||||
|                 async with tractor.open_nursery() as n: | ||||
|                     async with trio.open_nursery() as trion: | ||||
| 
 | ||||
|                     portals = {} | ||||
|                     for i in range(3): | ||||
|                         name = f'a{i}' | ||||
|                             if with_streaming: | ||||
|                                 portals[name] = await n.start_actor( | ||||
|                                     name=name, enable_modules=[__name__]) | ||||
| 
 | ||||
|                             else:  # no streaming | ||||
|                                 portals[name] = await n.run_in_actor( | ||||
|                                     trio.sleep_forever, name=name) | ||||
|                         portals[name] = await n.run_in_actor(to_run, name=name) | ||||
| 
 | ||||
|                     # wait on last actor to come up | ||||
|                     async with tractor.wait_for_actor(name): | ||||
|  | @ -194,12 +171,13 @@ async def spawn_and_check_registry( | |||
|                         trion.start_soon(cancel, use_signal, 1) | ||||
| 
 | ||||
|                         last_p = pts[-1] | ||||
|                             await stream_from(last_p) | ||||
| 
 | ||||
|                         async for value in await last_p.result(): | ||||
|                             print(value) | ||||
|                     else: | ||||
|                         await cancel(use_signal) | ||||
| 
 | ||||
|             finally: | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     await trio.sleep(0.5) | ||||
| 
 | ||||
|                     # all subactors should have de-registered | ||||
|  | @ -220,7 +198,7 @@ def test_subactors_unregister_on_cancel( | |||
|     deregistering themselves with the arbiter. | ||||
|     """ | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run( | ||||
|         tractor.run( | ||||
|             partial( | ||||
|                 spawn_and_check_registry, | ||||
|                 arb_addr, | ||||
|  | @ -228,6 +206,7 @@ def test_subactors_unregister_on_cancel( | |||
|                 remote_arbiter=False, | ||||
|                 with_streaming=with_streaming, | ||||
|             ), | ||||
|             arbiter_addr=arb_addr | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -245,7 +224,7 @@ def test_subactors_unregister_on_cancel_remote_daemon( | |||
|     tree) arbiter. | ||||
|     """ | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run( | ||||
|         tractor.run( | ||||
|             partial( | ||||
|                 spawn_and_check_registry, | ||||
|                 arb_addr, | ||||
|  | @ -253,6 +232,8 @@ def test_subactors_unregister_on_cancel_remote_daemon( | |||
|                 remote_arbiter=True, | ||||
|                 with_streaming=with_streaming, | ||||
|             ), | ||||
|             # XXX: required to use remote daemon! | ||||
|             arbiter_addr=arb_addr | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -274,27 +255,20 @@ async def close_chans_before_nursery( | |||
|     else: | ||||
|         entries_at_end = 1 | ||||
| 
 | ||||
|     async with tractor.open_root_actor( | ||||
|         arbiter_addr=arb_addr, | ||||
|     ): | ||||
|     async with tractor.get_arbiter(*arb_addr) as aportal: | ||||
|         try: | ||||
|                 get_reg = partial(unpack_reg, aportal) | ||||
|             get_reg = partial(aportal.run_from_ns, 'self', 'get_registry') | ||||
| 
 | ||||
|             async with tractor.open_nursery() as tn: | ||||
|                     portal1 = await tn.start_actor( | ||||
|                         name='consumer1', enable_modules=[__name__]) | ||||
|                     portal2 = await tn.start_actor( | ||||
|                         'consumer2', enable_modules=[__name__]) | ||||
|                 portal1 = await tn.run_in_actor( | ||||
|                     stream_forever, | ||||
|                     name='consumer1', | ||||
|                 ) | ||||
|                 agen1 = await portal1.result() | ||||
| 
 | ||||
|                 portal2 = await tn.start_actor('consumer2', rpc_module_paths=[__name__]) | ||||
|                 agen2 = await portal2.run(stream_forever) | ||||
| 
 | ||||
|                     # TODO: compact this back as was in last commit once | ||||
|                     # 3.9+, see https://github.com/goodboy/tractor/issues/207 | ||||
|                     async with portal1.open_stream_from( | ||||
|                         stream_forever | ||||
|                     ) as agen1: | ||||
|                         async with portal2.open_stream_from( | ||||
|                             stream_forever | ||||
|                         ) as agen2: | ||||
|                 async with trio.open_nursery() as n: | ||||
|                     n.start_soon(streamer, agen1) | ||||
|                     n.start_soon(cancel, use_signal, .5) | ||||
|  | @ -307,16 +281,15 @@ async def close_chans_before_nursery( | |||
|                         # reliably triggered by an external SIGINT. | ||||
|                         # tractor.current_actor()._root_nursery.cancel_scope.cancel() | ||||
| 
 | ||||
|                                     # XXX: THIS IS THE KEY THING that | ||||
|                                     # happens **before** exiting the | ||||
|                                     # actor nursery block | ||||
|                         # XXX: THIS IS THE KEY THING that happens | ||||
|                         # **before** exiting the actor nursery block | ||||
| 
 | ||||
|                         # also kill off channels cuz why not | ||||
|                         await agen1.aclose() | ||||
|                         await agen2.aclose() | ||||
|         finally: | ||||
|             with trio.CancelScope(shield=True): | ||||
|                     await trio.sleep(1) | ||||
|                 await trio.sleep(.5) | ||||
| 
 | ||||
|                 # all subactors should have de-registered | ||||
|                 registry = await get_reg() | ||||
|  | @ -336,13 +309,15 @@ def test_close_channel_explicit( | |||
|     results in subactor(s) deregistering from the arbiter. | ||||
|     """ | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run( | ||||
|         tractor.run( | ||||
|             partial( | ||||
|                 close_chans_before_nursery, | ||||
|                 arb_addr, | ||||
|                 use_signal, | ||||
|                 remote_arbiter=False, | ||||
|             ), | ||||
|             # XXX: required to use remote daemon! | ||||
|             arbiter_addr=arb_addr | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -358,11 +333,13 @@ def test_close_channel_explicit_remote_arbiter( | |||
|     results in subactor(s) deregistering from the arbiter. | ||||
|     """ | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run( | ||||
|         tractor.run( | ||||
|             partial( | ||||
|                 close_chans_before_nursery, | ||||
|                 arb_addr, | ||||
|                 use_signal, | ||||
|                 remote_arbiter=True, | ||||
|             ), | ||||
|             # XXX: required to use remote daemon! | ||||
|             arbiter_addr=arb_addr | ||||
|         ) | ||||
|  |  | |||
|  | @ -1,7 +1,6 @@ | |||
| ''' | ||||
| """ | ||||
| Let's make sure them docs work yah? | ||||
| 
 | ||||
| ''' | ||||
| """ | ||||
| from contextlib import contextmanager | ||||
| import itertools | ||||
| import os | ||||
|  | @ -12,17 +11,17 @@ import shutil | |||
| 
 | ||||
| import pytest | ||||
| 
 | ||||
| from conftest import ( | ||||
|     examples_dir, | ||||
| ) | ||||
| from conftest import repodir | ||||
| 
 | ||||
| 
 | ||||
| def examples_dir(): | ||||
|     """Return the abspath to the examples directory. | ||||
|     """ | ||||
|     return os.path.join(repodir(), 'examples') | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture | ||||
| def run_example_in_subproc( | ||||
|     loglevel: str, | ||||
|     testdir, | ||||
|     arb_addr: tuple[str, int], | ||||
| ): | ||||
| def run_example_in_subproc(loglevel, testdir, arb_addr): | ||||
| 
 | ||||
|     @contextmanager | ||||
|     def run(script_code): | ||||
|  | @ -32,8 +31,8 @@ def run_example_in_subproc( | |||
|             # on windows we need to create a special __main__.py which will | ||||
|             # be executed with ``python -m <modulename>`` on windows.. | ||||
|             shutil.copyfile( | ||||
|                 examples_dir() / '__main__.py', | ||||
|                 str(testdir / '__main__.py'), | ||||
|                 os.path.join(examples_dir(), '__main__.py'), | ||||
|                 os.path.join(str(testdir), '__main__.py') | ||||
|             ) | ||||
| 
 | ||||
|             # drop the ``if __name__ == '__main__'`` guard onwards from | ||||
|  | @ -81,16 +80,12 @@ def run_example_in_subproc( | |||
|     'example_script', | ||||
| 
 | ||||
|     # walk yields: (dirpath, dirnames, filenames) | ||||
|     [ | ||||
|         (p[0], f) for p in os.walk(examples_dir()) for f in p[2] | ||||
|     [(p[0], f) for p in os.walk(examples_dir()) for f in p[2] | ||||
| 
 | ||||
|         if '__' not in f | ||||
|         and f[0] != '_' | ||||
|         and 'debugging' not in p[0] | ||||
|         and 'integration' not in p[0] | ||||
|         and 'advanced_faults' not in p[0] | ||||
|     ], | ||||
| 
 | ||||
|     ids=lambda t: t[1], | ||||
| ) | ||||
| def test_example(run_example_in_subproc, example_script): | ||||
|  | @ -103,10 +98,6 @@ def test_example(run_example_in_subproc, example_script): | |||
|     test_example``. | ||||
|     """ | ||||
|     ex_file = os.path.join(*example_script) | ||||
| 
 | ||||
|     if 'rpc_bidir_streaming' in ex_file and sys.version_info < (3, 9): | ||||
|         pytest.skip("2-way streaming example requires py3.9 async with syntax") | ||||
| 
 | ||||
|     with open(ex_file, 'r') as ex: | ||||
|         code = ex.read() | ||||
| 
 | ||||
|  | @ -117,19 +108,9 @@ def test_example(run_example_in_subproc, example_script): | |||
|             # print(f'STDOUT: {out}') | ||||
| 
 | ||||
|             # if we get some gnarly output let's aggregate and raise | ||||
|             if err: | ||||
|             errmsg = err.decode() | ||||
|             errlines = errmsg.splitlines() | ||||
|                 last_error = errlines[-1] | ||||
|                 if ( | ||||
|                     'Error' in last_error | ||||
| 
 | ||||
|                     # XXX: currently we print this to console, but maybe | ||||
|                     # shouldn't eventually once we figure out what's | ||||
|                     # a better way to be explicit about aio side | ||||
|                     # cancels? | ||||
|                     and 'asyncio.exceptions.CancelledError' not in last_error | ||||
|                 ): | ||||
|             if err and 'Error' in errlines[-1]: | ||||
|                 raise Exception(errmsg) | ||||
| 
 | ||||
|             assert proc.returncode == 0 | ||||
|  |  | |||
|  | @ -1,564 +0,0 @@ | |||
| ''' | ||||
| The hipster way to force SC onto the stdlib's "async": 'infection mode'. | ||||
| 
 | ||||
| ''' | ||||
| from typing import Optional, Iterable, Union | ||||
| import asyncio | ||||
| import builtins | ||||
| import itertools | ||||
| import importlib | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from tractor import ( | ||||
|     to_asyncio, | ||||
|     RemoteActorError, | ||||
| ) | ||||
| from tractor.trionics import BroadcastReceiver | ||||
| 
 | ||||
| 
 | ||||
| async def sleep_and_err( | ||||
|     sleep_for: float = 0.1, | ||||
| 
 | ||||
|     # just signature placeholders for compat with | ||||
|     # ``to_asyncio.open_channel_from()`` | ||||
|     to_trio: Optional[trio.MemorySendChannel] = None, | ||||
|     from_trio: Optional[asyncio.Queue] = None, | ||||
| 
 | ||||
| ): | ||||
|     if to_trio: | ||||
|         to_trio.send_nowait('start') | ||||
| 
 | ||||
|     await asyncio.sleep(sleep_for) | ||||
|     assert 0 | ||||
| 
 | ||||
| 
 | ||||
| async def sleep_forever(): | ||||
|     await asyncio.sleep(float('inf')) | ||||
| 
 | ||||
| 
 | ||||
| async def trio_cancels_single_aio_task(): | ||||
| 
 | ||||
|     # spawn an ``asyncio`` task to run a func and return result | ||||
|     with trio.move_on_after(.2): | ||||
|         await tractor.to_asyncio.run_task(sleep_forever) | ||||
| 
 | ||||
| 
 | ||||
| def test_trio_cancels_aio_on_actor_side(arb_addr): | ||||
|     ''' | ||||
|     Spawn an infected actor that is cancelled by the ``trio`` side | ||||
|     task using std cancel scope apis. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr | ||||
|         ) as n: | ||||
|             await n.run_in_actor( | ||||
|                 trio_cancels_single_aio_task, | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| async def asyncio_actor( | ||||
| 
 | ||||
|     target: str, | ||||
|     expect_err: Optional[Exception] = None | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     assert tractor.current_actor().is_infected_aio() | ||||
|     target = globals()[target] | ||||
| 
 | ||||
|     if '.' in expect_err: | ||||
|         modpath, _, name = expect_err.rpartition('.') | ||||
|         mod = importlib.import_module(modpath) | ||||
|         error_type = getattr(mod, name) | ||||
| 
 | ||||
|     else:  # toplevel builtin error type | ||||
|         error_type = builtins.__dict__.get(expect_err) | ||||
| 
 | ||||
|     try: | ||||
|         # spawn an ``asyncio`` task to run a func and return result | ||||
|         await tractor.to_asyncio.run_task(target) | ||||
| 
 | ||||
|     except BaseException as err: | ||||
|         if expect_err: | ||||
|             assert isinstance(err, error_type) | ||||
| 
 | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| def test_aio_simple_error(arb_addr): | ||||
|     ''' | ||||
|     Verify a simple remote asyncio error propagates back through trio | ||||
|     to the parent actor. | ||||
| 
 | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr | ||||
|         ) as n: | ||||
|             await n.run_in_actor( | ||||
|                 asyncio_actor, | ||||
|                 target='sleep_and_err', | ||||
|                 expect_err='AssertionError', | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
| 
 | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     err = excinfo.value | ||||
|     assert isinstance(err, RemoteActorError) | ||||
|     assert err.type == AssertionError | ||||
| 
 | ||||
| 
 | ||||
| def test_tractor_cancels_aio(arb_addr): | ||||
|     ''' | ||||
|     Verify we can cancel a spawned asyncio task gracefully. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.run_in_actor( | ||||
|                 asyncio_actor, | ||||
|                 target='sleep_forever', | ||||
|                 expect_err='trio.Cancelled', | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
|             # cancel the entire remote runtime | ||||
|             await portal.cancel_actor() | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_trio_cancels_aio(arb_addr): | ||||
|     ''' | ||||
|     Much like the above test with ``tractor.Portal.cancel_actor()`` | ||||
|     except we just use a standard ``trio`` cancellation api. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
| 
 | ||||
|         with trio.move_on_after(1): | ||||
|             # cancel the nursery shortly after boot | ||||
| 
 | ||||
|             async with tractor.open_nursery() as n: | ||||
|                 await n.run_in_actor( | ||||
|                     asyncio_actor, | ||||
|                     target='sleep_forever', | ||||
|                     expect_err='trio.Cancelled', | ||||
|                     infect_asyncio=True, | ||||
|                 ) | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def trio_ctx( | ||||
|     ctx: tractor.Context, | ||||
| ): | ||||
| 
 | ||||
|     await ctx.started('start') | ||||
| 
 | ||||
|     # this will block until the ``asyncio`` task sends a "first" | ||||
|     # message. | ||||
|     with trio.fail_after(2): | ||||
|         async with ( | ||||
|             trio.open_nursery() as n, | ||||
| 
 | ||||
|             tractor.to_asyncio.open_channel_from( | ||||
|                 sleep_and_err, | ||||
|             ) as (first, chan), | ||||
|         ): | ||||
| 
 | ||||
|             assert first == 'start' | ||||
| 
 | ||||
|             # spawn another asyncio task for the cuck of it. | ||||
|             n.start_soon( | ||||
|                 tractor.to_asyncio.run_task, | ||||
|                 sleep_forever, | ||||
|             ) | ||||
|             await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'parent_cancels', [False, True], | ||||
|     ids='parent_actor_cancels_child={}'.format | ||||
| ) | ||||
| def test_context_spawns_aio_task_that_errors( | ||||
|     arb_addr, | ||||
|     parent_cancels: bool, | ||||
| ): | ||||
|     ''' | ||||
|     Verify that spawning a task via an intertask channel ctx mngr that | ||||
|     errors correctly propagates the error back from the `asyncio`-side | ||||
|     task. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
| 
 | ||||
|         with trio.fail_after(2): | ||||
|             async with tractor.open_nursery() as n: | ||||
|                 p = await n.start_actor( | ||||
|                     'aio_daemon', | ||||
|                     enable_modules=[__name__], | ||||
|                     infect_asyncio=True, | ||||
|                     # debug_mode=True, | ||||
|                     loglevel='cancel', | ||||
|                 ) | ||||
|                 async with p.open_context( | ||||
|                     trio_ctx, | ||||
|                 ) as (ctx, first): | ||||
| 
 | ||||
|                     assert first == 'start' | ||||
| 
 | ||||
|                     if parent_cancels: | ||||
|                         await p.cancel_actor() | ||||
| 
 | ||||
|                     await trio.sleep_forever() | ||||
| 
 | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     err = excinfo.value | ||||
|     assert isinstance(err, RemoteActorError) | ||||
|     if parent_cancels: | ||||
|         assert err.type == trio.Cancelled | ||||
|     else: | ||||
|         assert err.type == AssertionError | ||||
| 
 | ||||
| 
 | ||||
| async def aio_cancel(): | ||||
|     '''' | ||||
|     Cancel urself boi. | ||||
| 
 | ||||
|     ''' | ||||
|     await asyncio.sleep(0.5) | ||||
|     task = asyncio.current_task() | ||||
| 
 | ||||
|     # cancel and enter sleep | ||||
|     task.cancel() | ||||
|     await sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| def test_aio_cancelled_from_aio_causes_trio_cancelled(arb_addr): | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             await n.run_in_actor( | ||||
|                 asyncio_actor, | ||||
|                 target='aio_cancel', | ||||
|                 expect_err='tractor.to_asyncio.AsyncioCancelled', | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
| 
 | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     # ensure boxed error is correct | ||||
|     assert excinfo.value.type == to_asyncio.AsyncioCancelled | ||||
| 
 | ||||
| 
 | ||||
| # TODO: verify open_channel_from will fail on this.. | ||||
| async def no_to_trio_in_args(): | ||||
|     pass | ||||
| 
 | ||||
| 
 | ||||
| async def push_from_aio_task( | ||||
| 
 | ||||
|     sequence: Iterable, | ||||
|     to_trio: trio.abc.SendChannel, | ||||
|     expect_cancel: False, | ||||
|     fail_early: bool, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     try: | ||||
|         # sync caller ctx manager | ||||
|         to_trio.send_nowait(True) | ||||
| 
 | ||||
|         for i in sequence: | ||||
|             print(f'asyncio sending {i}') | ||||
|             to_trio.send_nowait(i) | ||||
|             await asyncio.sleep(0.001) | ||||
| 
 | ||||
|             if i == 50 and fail_early: | ||||
|                 raise Exception | ||||
| 
 | ||||
|         print('asyncio streamer complete!') | ||||
| 
 | ||||
|     except asyncio.CancelledError: | ||||
|         if not expect_cancel: | ||||
|             pytest.fail("aio task was cancelled unexpectedly") | ||||
|         raise | ||||
|     else: | ||||
|         if expect_cancel: | ||||
|             pytest.fail("aio task wasn't cancelled as expected!?") | ||||
| 
 | ||||
| 
 | ||||
| async def stream_from_aio( | ||||
| 
 | ||||
|     exit_early: bool = False, | ||||
|     raise_err: bool = False, | ||||
|     aio_raise_err: bool = False, | ||||
|     fan_out: bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|     seq = range(100) | ||||
|     expect = list(seq) | ||||
| 
 | ||||
|     try: | ||||
|         pulled = [] | ||||
| 
 | ||||
|         async with to_asyncio.open_channel_from( | ||||
|             push_from_aio_task, | ||||
|             sequence=seq, | ||||
|             expect_cancel=raise_err or exit_early, | ||||
|             fail_early=aio_raise_err, | ||||
|         ) as (first, chan): | ||||
| 
 | ||||
|             assert first is True | ||||
| 
 | ||||
|             async def consume( | ||||
|                 chan: Union[ | ||||
|                     to_asyncio.LinkedTaskChannel, | ||||
|                     BroadcastReceiver, | ||||
|                 ], | ||||
|             ): | ||||
|                 async for value in chan: | ||||
|                     print(f'trio received {value}') | ||||
|                     pulled.append(value) | ||||
| 
 | ||||
|                     if value == 50: | ||||
|                         if raise_err: | ||||
|                             raise Exception | ||||
|                         elif exit_early: | ||||
|                             break | ||||
| 
 | ||||
|             if fan_out: | ||||
|                 # start second task that get's the same stream value set. | ||||
|                 async with ( | ||||
| 
 | ||||
|                     # NOTE: this has to come first to avoid | ||||
|                     # the channel being closed before the nursery | ||||
|                     # tasks are joined.. | ||||
|                     chan.subscribe() as br, | ||||
| 
 | ||||
|                     trio.open_nursery() as n, | ||||
|                 ): | ||||
|                     n.start_soon(consume, br) | ||||
|                     await consume(chan) | ||||
| 
 | ||||
|             else: | ||||
|                 await consume(chan) | ||||
|     finally: | ||||
| 
 | ||||
|         if ( | ||||
|             not raise_err and | ||||
|             not exit_early and | ||||
|             not aio_raise_err | ||||
|         ): | ||||
|             if fan_out: | ||||
|                 # we get double the pulled values in the | ||||
|                 # ``.subscribe()`` fan out case. | ||||
|                 doubled = list(itertools.chain(*zip(expect, expect))) | ||||
|                 expect = doubled[:len(pulled)] | ||||
|                 assert list(sorted(pulled)) == expect | ||||
| 
 | ||||
|             else: | ||||
|                 assert pulled == expect | ||||
|         else: | ||||
|             assert not fan_out | ||||
|             assert pulled == expect[:51] | ||||
| 
 | ||||
|         print('trio guest mode task completed!') | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'fan_out', [False, True], | ||||
|     ids='fan_out_w_chan_subscribe={}'.format | ||||
| ) | ||||
| def test_basic_interloop_channel_stream(arb_addr, fan_out): | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.run_in_actor( | ||||
|                 stream_from_aio, | ||||
|                 infect_asyncio=True, | ||||
|                 fan_out=fan_out, | ||||
|             ) | ||||
|             await portal.result() | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| # TODO: parametrize the above test and avoid the duplication here? | ||||
| def test_trio_error_cancels_intertask_chan(arb_addr): | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.run_in_actor( | ||||
|                 stream_from_aio, | ||||
|                 raise_err=True, | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
|             # should trigger remote actor error | ||||
|             await portal.result() | ||||
| 
 | ||||
|     with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     # ensure boxed errors | ||||
|     for exc in excinfo.value.exceptions: | ||||
|         assert exc.type == Exception | ||||
| 
 | ||||
| 
 | ||||
| def test_trio_closes_early_and_channel_exits(arb_addr): | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.run_in_actor( | ||||
|                 stream_from_aio, | ||||
|                 exit_early=True, | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
|             # should trigger remote actor error | ||||
|             await portal.result() | ||||
| 
 | ||||
|     # should be a quiet exit on a simple channel exit | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_aio_errors_and_channel_propagates_and_closes(arb_addr): | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.run_in_actor( | ||||
|                 stream_from_aio, | ||||
|                 aio_raise_err=True, | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
|             # should trigger remote actor error | ||||
|             await portal.result() | ||||
| 
 | ||||
|     with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     # ensure boxed errors | ||||
|     for exc in excinfo.value.exceptions: | ||||
|         assert exc.type == Exception | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def trio_to_aio_echo_server( | ||||
|     ctx: tractor.Context, | ||||
| ): | ||||
| 
 | ||||
|     async def aio_echo_server( | ||||
|         to_trio: trio.MemorySendChannel, | ||||
|         from_trio: asyncio.Queue, | ||||
|     ) -> None: | ||||
| 
 | ||||
|         to_trio.send_nowait('start') | ||||
| 
 | ||||
|         while True: | ||||
|             msg = await from_trio.get() | ||||
| 
 | ||||
|             # echo the msg back | ||||
|             to_trio.send_nowait(msg) | ||||
| 
 | ||||
|             # if we get the terminate sentinel | ||||
|             # break the echo loop | ||||
|             if msg is None: | ||||
|                 print('breaking aio echo loop') | ||||
|                 break | ||||
| 
 | ||||
|         print('exiting asyncio task') | ||||
| 
 | ||||
|     async with to_asyncio.open_channel_from( | ||||
|         aio_echo_server, | ||||
|     ) as (first, chan): | ||||
| 
 | ||||
|         assert first == 'start' | ||||
|         await ctx.started(first) | ||||
| 
 | ||||
|         async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|             async for msg in stream: | ||||
|                 print(f'asyncio echoing {msg}') | ||||
|                 await chan.send(msg) | ||||
| 
 | ||||
|                 out = await chan.receive() | ||||
|                 # echo back to parent actor-task | ||||
|                 await stream.send(out) | ||||
| 
 | ||||
|                 if out is None: | ||||
|                     try: | ||||
|                         out = await chan.receive() | ||||
|                     except trio.EndOfChannel: | ||||
|                         break | ||||
|                     else: | ||||
|                         raise RuntimeError('aio channel never stopped?') | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'raise_error_mid_stream', | ||||
|     [False, Exception, KeyboardInterrupt], | ||||
|     ids='raise_error={}'.format, | ||||
| ) | ||||
| def test_echoserver_detailed_mechanics( | ||||
|     arb_addr, | ||||
|     raise_error_mid_stream, | ||||
| ): | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             p = await n.start_actor( | ||||
|                 'aio_server', | ||||
|                 enable_modules=[__name__], | ||||
|                 infect_asyncio=True, | ||||
|             ) | ||||
|             async with p.open_context( | ||||
|                 trio_to_aio_echo_server, | ||||
|             ) as (ctx, first): | ||||
| 
 | ||||
|                 assert first == 'start' | ||||
| 
 | ||||
|                 async with ctx.open_stream() as stream: | ||||
|                     for i in range(100): | ||||
|                         await stream.send(i) | ||||
|                         out = await stream.receive() | ||||
|                         assert i == out | ||||
| 
 | ||||
|                         if raise_error_mid_stream and i == 50: | ||||
|                             raise raise_error_mid_stream | ||||
| 
 | ||||
|                     # send terminate msg | ||||
|                     await stream.send(None) | ||||
|                     out = await stream.receive() | ||||
|                     assert out is None | ||||
| 
 | ||||
|                     if out is None: | ||||
|                         # ensure the stream is stopped | ||||
|                         # with trio.fail_after(0.1): | ||||
|                         try: | ||||
|                             await stream.receive() | ||||
|                         except trio.EndOfChannel: | ||||
|                             pass | ||||
|                         else: | ||||
|                             pytest.fail( | ||||
|                                 "stream wasn't stopped after sentinel?!") | ||||
| 
 | ||||
|             # TODO: the case where this blocks and | ||||
|             # is cancelled by kbi or out of task cancellation | ||||
|             await p.cancel_actor() | ||||
| 
 | ||||
|     if raise_error_mid_stream: | ||||
|         with pytest.raises(raise_error_mid_stream): | ||||
|             trio.run(main) | ||||
| 
 | ||||
|     else: | ||||
|         trio.run(main) | ||||
|  | @ -11,18 +11,25 @@ from conftest import tractor_test | |||
| 
 | ||||
| 
 | ||||
| @pytest.mark.trio | ||||
| async def test_no_runtime(): | ||||
| async def test_no_arbitter(): | ||||
|     """An arbitter must be established before any nurseries | ||||
|     can be created. | ||||
| 
 | ||||
|     (In other words ``tractor.open_root_actor()`` must be engaged at | ||||
|     some point?) | ||||
|     (In other words ``tractor.run`` must be used instead of ``trio.run`` as is | ||||
|     done by the ``pytest-trio`` plugin.) | ||||
|     """ | ||||
|     with pytest.raises(RuntimeError) : | ||||
|         async with tractor.find_actor('doggy'): | ||||
|     with pytest.raises(RuntimeError): | ||||
|         with tractor.open_nursery(): | ||||
|             pass | ||||
| 
 | ||||
| 
 | ||||
| def test_no_main(): | ||||
|     """An async function **must** be passed to ``tractor.run()``. | ||||
|     """ | ||||
|     with pytest.raises(TypeError): | ||||
|         tractor.run(None) | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_self_is_registered(arb_addr): | ||||
|     "Verify waiting on the arbiter to register itself using the standard api." | ||||
|  | @ -42,8 +49,7 @@ async def test_self_is_registered_localportal(arb_addr): | |||
|         assert isinstance(portal, tractor._portal.LocalPortal) | ||||
| 
 | ||||
|         with trio.fail_after(0.2): | ||||
|             sockaddr = await portal.run_from_ns( | ||||
|                     'self', 'wait_for_actor', name='root') | ||||
|             sockaddr = await portal.run_from_ns('self', 'wait_for_actor', name='root') | ||||
|             assert sockaddr[0] == arb_addr | ||||
| 
 | ||||
| 
 | ||||
|  | @ -53,10 +59,6 @@ def test_local_actor_async_func(arb_addr): | |||
|     nums = [] | ||||
| 
 | ||||
|     async def print_loop(): | ||||
| 
 | ||||
|         async with tractor.open_root_actor( | ||||
|             arbiter_addr=arb_addr, | ||||
|         ): | ||||
|         # arbiter is started in-proc if dne | ||||
|         assert tractor.current_actor().is_arbiter | ||||
| 
 | ||||
|  | @ -65,7 +67,7 @@ def test_local_actor_async_func(arb_addr): | |||
|             await trio.sleep(0.1) | ||||
| 
 | ||||
|     start = time.time() | ||||
|     trio.run(print_loop) | ||||
|     tractor.run(print_loop, arbiter_addr=arb_addr) | ||||
| 
 | ||||
|     # ensure the sleeps were actually awaited | ||||
|     assert time.time() - start >= 1 | ||||
|  |  | |||
|  | @ -1,11 +1,10 @@ | |||
| """ | ||||
| Multiple python programs invoking the runtime. | ||||
| Multiple python programs invoking ``tractor.run()`` | ||||
| """ | ||||
| import platform | ||||
| import time | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from conftest import ( | ||||
|     tractor_test, | ||||
|  | @ -46,13 +45,8 @@ async def test_cancel_remote_arbiter(daemon, arb_addr): | |||
| def test_register_duplicate_name(daemon, arb_addr): | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|         ) as n: | ||||
| 
 | ||||
|         assert not tractor.current_actor().is_arbiter | ||||
| 
 | ||||
|         async with tractor.open_nursery() as n: | ||||
|             p1 = await n.start_actor('doggy') | ||||
|             p2 = await n.start_actor('doggy') | ||||
| 
 | ||||
|  | @ -63,4 +57,4 @@ def test_register_duplicate_name(daemon, arb_addr): | |||
| 
 | ||||
|     # run it manually since we want to start **after** | ||||
|     # the other "daemon" program | ||||
|     trio.run(main) | ||||
|     tractor.run(main, arbiter_addr=arb_addr) | ||||
|  |  | |||
|  | @ -4,22 +4,20 @@ from itertools import cycle | |||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from tractor.experimental import msgpub | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| from tractor.testing import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| def test_type_checks(): | ||||
| 
 | ||||
|     with pytest.raises(TypeError) as err: | ||||
|         @msgpub | ||||
|         @tractor.msg.pub | ||||
|         async def no_get_topics(yo): | ||||
|             yield | ||||
| 
 | ||||
|     assert "must define a `get_topics`" in str(err.value) | ||||
| 
 | ||||
|     with pytest.raises(TypeError) as err: | ||||
|         @msgpub | ||||
|         @tractor.msg.pub | ||||
|         def not_async_gen(yo): | ||||
|             pass | ||||
| 
 | ||||
|  | @ -34,7 +32,7 @@ def is_even(i): | |||
| _get_topics = None | ||||
| 
 | ||||
| 
 | ||||
| @msgpub | ||||
| @tractor.msg.pub | ||||
| async def pubber(get_topics, seed=10): | ||||
| 
 | ||||
|     # ensure topic subscriptions are as expected | ||||
|  | @ -48,9 +46,8 @@ async def pubber(get_topics, seed=10): | |||
| 
 | ||||
| 
 | ||||
| async def subs( | ||||
|     which, | ||||
|     pub_actor_name, | ||||
|     seed=10, | ||||
|     which, pub_actor_name, seed=10, | ||||
|     portal=None, | ||||
|     task_status=trio.TASK_STATUS_IGNORED, | ||||
| ): | ||||
|     if len(which) == 1: | ||||
|  | @ -63,15 +60,12 @@ async def subs( | |||
|         def pred(i): | ||||
|             return isinstance(i, int) | ||||
| 
 | ||||
|     # TODO: https://github.com/goodboy/tractor/issues/207 | ||||
|     async with tractor.wait_for_actor(pub_actor_name) as portal: | ||||
|         assert portal | ||||
| 
 | ||||
|         async with portal.open_stream_from( | ||||
|     async with tractor.find_actor(pub_actor_name) as portal: | ||||
|         stream = await portal.run( | ||||
|             pubber, | ||||
|             topics=which, | ||||
|             seed=seed, | ||||
|         ) as stream: | ||||
|         ) | ||||
|         task_status.started(stream) | ||||
|         times = 10 | ||||
|         count = 0 | ||||
|  | @ -85,11 +79,12 @@ async def subs( | |||
| 
 | ||||
|         await stream.aclose() | ||||
| 
 | ||||
|         async with portal.open_stream_from( | ||||
|         stream = await portal.run( | ||||
|             pubber, | ||||
|             topics=['odd'], | ||||
|             seed=seed, | ||||
|         ) as stream: | ||||
|         ) | ||||
| 
 | ||||
|         await stream.__anext__() | ||||
|         count = 0 | ||||
|         # async with aclosing(stream) as stream: | ||||
|  | @ -105,7 +100,7 @@ async def subs( | |||
|             await stream.aclose() | ||||
| 
 | ||||
| 
 | ||||
| @msgpub(tasks=['one', 'two']) | ||||
| @tractor.msg.pub(tasks=['one', 'two']) | ||||
| async def multilock_pubber(get_topics): | ||||
|     yield {'doggy': 10} | ||||
| 
 | ||||
|  | @ -133,10 +128,11 @@ async def test_required_args(callwith_expecterror): | |||
|             await func(**kwargs) | ||||
|     else: | ||||
|         async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|             portal = await n.start_actor( | ||||
|             # await func(**kwargs) | ||||
|             portal = await n.run_in_actor( | ||||
|                 multilock_pubber, | ||||
|                 name='pubber', | ||||
|                 enable_modules=[__name__], | ||||
|                 **kwargs | ||||
|             ) | ||||
| 
 | ||||
|             async with tractor.wait_for_actor('pubber'): | ||||
|  | @ -144,15 +140,9 @@ async def test_required_args(callwith_expecterror): | |||
| 
 | ||||
|             await trio.sleep(0.5) | ||||
| 
 | ||||
|             async with portal.open_stream_from( | ||||
|                 multilock_pubber, | ||||
|                 **kwargs | ||||
|             ) as stream: | ||||
|                 async for val in stream: | ||||
|             async for val in await portal.result(): | ||||
|                 assert val == {'doggy': 10} | ||||
| 
 | ||||
|             await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'pub_actor', | ||||
|  | @ -169,10 +159,7 @@ def test_multi_actor_subs_arbiter_pub( | |||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|             enable_modules=[__name__], | ||||
|         ) as n: | ||||
|         async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|             name = 'root' | ||||
| 
 | ||||
|  | @ -180,9 +167,8 @@ def test_multi_actor_subs_arbiter_pub( | |||
|                 # start the publisher as a daemon | ||||
|                 master_portal = await n.start_actor( | ||||
|                     'streamer', | ||||
|                     enable_modules=[__name__], | ||||
|                     rpc_module_paths=[__name__], | ||||
|                 ) | ||||
|                 name = 'streamer' | ||||
| 
 | ||||
|             even_portal = await n.run_in_actor( | ||||
|                 subs, | ||||
|  | @ -241,6 +227,7 @@ def test_multi_actor_subs_arbiter_pub( | |||
|                 assert 'even' not in get_topics() | ||||
| 
 | ||||
|             await odd_portal.cancel_actor() | ||||
|             await trio.sleep(2) | ||||
| 
 | ||||
|             if pub_actor == 'arbiter': | ||||
|                 while get_topics(): | ||||
|  | @ -250,7 +237,11 @@ def test_multi_actor_subs_arbiter_pub( | |||
|             else: | ||||
|                 await master_portal.cancel_actor() | ||||
| 
 | ||||
|     trio.run(main) | ||||
|     tractor.run( | ||||
|         main, | ||||
|         arbiter_addr=arb_addr, | ||||
|         rpc_module_paths=[__name__], | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| def test_single_subactor_pub_multitask_subs( | ||||
|  | @ -259,14 +250,11 @@ def test_single_subactor_pub_multitask_subs( | |||
| ): | ||||
|     async def main(): | ||||
| 
 | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|             enable_modules=[__name__], | ||||
|         ) as n: | ||||
|         async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|             portal = await n.start_actor( | ||||
|                 'streamer', | ||||
|                 enable_modules=[__name__], | ||||
|                 rpc_module_paths=[__name__], | ||||
|             ) | ||||
|             async with tractor.wait_for_actor('streamer'): | ||||
|                 # block until 2nd actor is initialized | ||||
|  | @ -290,4 +278,8 @@ def test_single_subactor_pub_multitask_subs( | |||
| 
 | ||||
|             await portal.cancel_actor() | ||||
| 
 | ||||
|     trio.run(main) | ||||
|     tractor.run( | ||||
|         main, | ||||
|         arbiter_addr=arb_addr, | ||||
|         rpc_module_paths=[__name__], | ||||
|     ) | ||||
|  |  | |||
|  | @ -1,182 +0,0 @@ | |||
| ''' | ||||
| Async context manager cache api testing: ``trionics.maybe_open_context():`` | ||||
| 
 | ||||
| ''' | ||||
| from contextlib import asynccontextmanager as acm | ||||
| import platform | ||||
| from typing import Awaitable | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| _resource: int = 0 | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def maybe_increment_counter(task_name: str): | ||||
|     global _resource | ||||
| 
 | ||||
|     _resource += 1 | ||||
|     await trio.lowlevel.checkpoint() | ||||
|     yield _resource | ||||
|     await trio.lowlevel.checkpoint() | ||||
|     _resource -= 1 | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'key_on', | ||||
|     ['key_value', 'kwargs'], | ||||
|     ids="key_on={}".format, | ||||
| ) | ||||
| def test_resource_only_entered_once(key_on): | ||||
|     global _resource | ||||
|     _resource = 0 | ||||
| 
 | ||||
|     kwargs = {} | ||||
|     key = None | ||||
|     if key_on == 'key_value': | ||||
|         key = 'some_common_key' | ||||
| 
 | ||||
|     async def main(): | ||||
|         cache_active: bool = False | ||||
| 
 | ||||
|         async def enter_cached_mngr(name: str): | ||||
|             nonlocal cache_active | ||||
| 
 | ||||
|             if key_on == 'kwargs': | ||||
|                 # make a common kwargs input to key on it | ||||
|                 kwargs = {'task_name': 'same_task_name'} | ||||
|                 assert key is None | ||||
|             else: | ||||
|                 # different task names per task will be used | ||||
|                 kwargs = {'task_name': name} | ||||
| 
 | ||||
|             async with tractor.trionics.maybe_open_context( | ||||
|                 maybe_increment_counter, | ||||
|                 kwargs=kwargs, | ||||
|                 key=key, | ||||
| 
 | ||||
|             ) as (cache_hit, resource): | ||||
|                 if cache_hit: | ||||
|                     try: | ||||
|                         cache_active = True | ||||
|                         assert resource == 1 | ||||
|                         await trio.sleep_forever() | ||||
|                     finally: | ||||
|                         cache_active = False | ||||
|                 else: | ||||
|                     assert resource == 1 | ||||
|                     await trio.sleep_forever() | ||||
| 
 | ||||
|         with trio.move_on_after(0.5): | ||||
|             async with ( | ||||
|                 tractor.open_root_actor(), | ||||
|                 trio.open_nursery() as n, | ||||
|             ): | ||||
| 
 | ||||
|                 for i in range(10): | ||||
|                     n.start_soon(enter_cached_mngr, f'task_{i}') | ||||
|                     await trio.sleep(0.001) | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def streamer( | ||||
|     ctx: tractor.Context, | ||||
|     seq: list[int] = list(range(1000)), | ||||
| ) -> None: | ||||
| 
 | ||||
|     await ctx.started() | ||||
|     async with ctx.open_stream() as stream: | ||||
|         for val in seq: | ||||
|             await stream.send(val) | ||||
|             await trio.sleep(0.001) | ||||
| 
 | ||||
|     print('producer finished') | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def open_stream() -> Awaitable[tractor.MsgStream]: | ||||
| 
 | ||||
|     async with tractor.open_nursery() as tn: | ||||
|         portal = await tn.start_actor('streamer', enable_modules=[__name__]) | ||||
|         async with ( | ||||
|             portal.open_context(streamer) as (ctx, first), | ||||
|             ctx.open_stream() as stream, | ||||
|         ): | ||||
|             yield stream | ||||
| 
 | ||||
|         await portal.cancel_actor() | ||||
|     print('CANCELLED STREAMER') | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def maybe_open_stream(taskname: str): | ||||
|     async with tractor.trionics.maybe_open_context( | ||||
|         # NOTE: all secondary tasks should cache hit on the same key | ||||
|         acm_func=open_stream, | ||||
|     ) as (cache_hit, stream): | ||||
| 
 | ||||
|         if cache_hit: | ||||
|             print(f'{taskname} loaded from cache') | ||||
| 
 | ||||
|             # add a new broadcast subscription for the quote stream | ||||
|             # if this feed is already allocated by the first | ||||
|             # task that entereed | ||||
|             async with stream.subscribe() as bstream: | ||||
|                 yield bstream | ||||
|         else: | ||||
|             # yield the actual stream | ||||
|             yield stream | ||||
| 
 | ||||
| 
 | ||||
| def test_open_local_sub_to_stream(): | ||||
|     ''' | ||||
|     Verify a single inter-actor stream can can be fanned-out shared to | ||||
|     N local tasks using ``trionics.maybe_open_context():``. | ||||
| 
 | ||||
|     ''' | ||||
|     timeout = 3 if platform.system() != "Windows" else 10 | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         full = list(range(1000)) | ||||
| 
 | ||||
|         async def get_sub_and_pull(taskname: str): | ||||
|             async with ( | ||||
|                 maybe_open_stream(taskname) as stream, | ||||
|             ): | ||||
|                 if '0' in taskname: | ||||
|                     assert isinstance(stream, tractor.MsgStream) | ||||
|                 else: | ||||
|                     assert isinstance( | ||||
|                         stream, | ||||
|                         tractor.trionics.BroadcastReceiver | ||||
|                     ) | ||||
| 
 | ||||
|                 first = await stream.receive() | ||||
|                 print(f'{taskname} started with value {first}') | ||||
|                 seq = [] | ||||
|                 async for msg in stream: | ||||
|                     seq.append(msg) | ||||
| 
 | ||||
|                 assert set(seq).issubset(set(full)) | ||||
|             print(f'{taskname} finished') | ||||
| 
 | ||||
|         with trio.fail_after(timeout): | ||||
|             # TODO: turns out this isn't multi-task entrant XD | ||||
|             # We probably need an indepotent entry semantic? | ||||
|             async with tractor.open_root_actor(): | ||||
|                 async with ( | ||||
|                     trio.open_nursery() as nurse, | ||||
|                 ): | ||||
|                     for i in range(10): | ||||
|                         nurse.start_soon(get_sub_and_pull, f'task_{i}') | ||||
|                         await trio.sleep(0.001) | ||||
| 
 | ||||
|                 print('all consumer tasks finished') | ||||
| 
 | ||||
|     trio.run(main) | ||||
|  | @ -74,15 +74,11 @@ def test_rpc_errors(arb_addr, to_call, testdir): | |||
|         remote_err = inside_err | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         # spawn a subactor which calls us back | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|             enable_modules=exposed_mods.copy(), | ||||
|         ) as n: | ||||
| 
 | ||||
|         actor = tractor.current_actor() | ||||
|         assert actor.is_arbiter | ||||
| 
 | ||||
|         # spawn a subactor which calls us back | ||||
|         async with tractor.open_nursery() as n: | ||||
|             await n.run_in_actor( | ||||
|                 sleep_back_actor, | ||||
|                 actor_name=subactor_requests_to, | ||||
|  | @ -94,11 +90,15 @@ def test_rpc_errors(arb_addr, to_call, testdir): | |||
|                 func_name=funcname, | ||||
|                 exposed_mods=exposed_mods, | ||||
|                 func_defined=True if func_defined else False, | ||||
|                 enable_modules=subactor_exposed_mods, | ||||
|                 rpc_module_paths=subactor_exposed_mods, | ||||
|             ) | ||||
| 
 | ||||
|     def run(): | ||||
|         trio.run(main) | ||||
|         tractor.run( | ||||
|             main, | ||||
|             arbiter_addr=arb_addr, | ||||
|             rpc_module_paths=exposed_mods.copy(), | ||||
|         ) | ||||
| 
 | ||||
|     # handle both parameterized cases | ||||
|     if exposed_mods and func_defined: | ||||
|  |  | |||
|  | @ -1,73 +0,0 @@ | |||
| """ | ||||
| Verifying internal runtime state and undocumented extras. | ||||
| 
 | ||||
| """ | ||||
| import os | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| _file_path: str = '' | ||||
| 
 | ||||
| 
 | ||||
| def unlink_file(): | ||||
|     print('Removing tmp file!') | ||||
|     os.remove(_file_path) | ||||
| 
 | ||||
| 
 | ||||
| async def crash_and_clean_tmpdir( | ||||
|     tmp_file_path: str, | ||||
|     error: bool = True, | ||||
| ): | ||||
|     global _file_path | ||||
|     _file_path = tmp_file_path | ||||
| 
 | ||||
|     actor = tractor.current_actor() | ||||
|     actor.lifetime_stack.callback(unlink_file) | ||||
| 
 | ||||
|     assert os.path.isfile(tmp_file_path) | ||||
|     await trio.sleep(0.1) | ||||
|     if error: | ||||
|         assert 0 | ||||
|     else: | ||||
|         actor.cancel_soon() | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'error_in_child', | ||||
|     [True, False], | ||||
| ) | ||||
| @tractor_test | ||||
| async def test_lifetime_stack_wipes_tmpfile( | ||||
|     tmp_path, | ||||
|     error_in_child: bool, | ||||
| ): | ||||
|     child_tmp_file = tmp_path / "child.txt" | ||||
|     child_tmp_file.touch() | ||||
|     assert child_tmp_file.exists() | ||||
|     path = str(child_tmp_file) | ||||
| 
 | ||||
|     try: | ||||
|         with trio.move_on_after(0.5): | ||||
|             async with tractor.open_nursery() as n: | ||||
|                     await (  # inlined portal | ||||
|                         await n.run_in_actor( | ||||
|                             crash_and_clean_tmpdir, | ||||
|                             tmp_file_path=path, | ||||
|                             error=error_in_child, | ||||
|                         ) | ||||
|                     ).result() | ||||
| 
 | ||||
|     except ( | ||||
|         tractor.RemoteActorError, | ||||
|         tractor.BaseExceptionGroup, | ||||
|     ): | ||||
|         pass | ||||
| 
 | ||||
|     # tmp file should have been wiped by | ||||
|     # teardown stack. | ||||
|     assert not child_tmp_file.exists() | ||||
|  | @ -1,8 +1,7 @@ | |||
| """ | ||||
| Spawning basics | ||||
| 
 | ||||
| """ | ||||
| from typing import Optional | ||||
| from functools import partial | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
|  | @ -13,36 +12,23 @@ from conftest import tractor_test | |||
| data_to_pass_down = {'doggy': 10, 'kitty': 4} | ||||
| 
 | ||||
| 
 | ||||
| async def spawn( | ||||
|     is_arbiter: bool, | ||||
|     data: dict, | ||||
|     arb_addr: tuple[str, int], | ||||
| ): | ||||
| async def spawn(is_arbiter, data): | ||||
|     namespaces = [__name__] | ||||
| 
 | ||||
|     await trio.sleep(0.1) | ||||
| 
 | ||||
|     async with tractor.open_root_actor( | ||||
|         arbiter_addr=arb_addr, | ||||
|     ): | ||||
| 
 | ||||
|     actor = tractor.current_actor() | ||||
|     assert actor.is_arbiter == is_arbiter | ||||
|         data = data_to_pass_down | ||||
|     data == data_to_pass_down | ||||
| 
 | ||||
|     if actor.is_arbiter: | ||||
| 
 | ||||
|             async with tractor.open_nursery( | ||||
|             ) as nursery: | ||||
| 
 | ||||
|         async with tractor.open_nursery() as nursery: | ||||
|             # forks here | ||||
|             portal = await nursery.run_in_actor( | ||||
|                 spawn, | ||||
|                 is_arbiter=False, | ||||
|                 name='sub-actor', | ||||
|                 data=data, | ||||
|                     arb_addr=arb_addr, | ||||
|                     enable_modules=namespaces, | ||||
|                 rpc_module_paths=namespaces, | ||||
|             ) | ||||
| 
 | ||||
|             assert len(nursery._children) == 1 | ||||
|  | @ -56,16 +42,16 @@ async def spawn( | |||
| 
 | ||||
| 
 | ||||
| def test_local_arbiter_subactor_global_state(arb_addr): | ||||
|     result = trio.run( | ||||
|         spawn, | ||||
|     result = tractor.run( | ||||
|         partial(spawn, data=data_to_pass_down), | ||||
|         True, | ||||
|         data_to_pass_down, | ||||
|         arb_addr, | ||||
|         name='arbiter', | ||||
|         arbiter_addr=arb_addr, | ||||
|     ) | ||||
|     assert result == 10 | ||||
| 
 | ||||
| 
 | ||||
| async def movie_theatre_question(): | ||||
| def movie_theatre_question(): | ||||
|     """A question asked in a dark theatre, in a tangent | ||||
|     (errr, I mean different) process. | ||||
|     """ | ||||
|  | @ -81,7 +67,7 @@ async def test_movie_theatre_convo(start_method): | |||
|         portal = await n.start_actor( | ||||
|             'frank', | ||||
|             # enable the actor to run funcs from this current module | ||||
|             enable_modules=[__name__], | ||||
|             rpc_module_paths=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         print(await portal.run(movie_theatre_question)) | ||||
|  | @ -94,38 +80,24 @@ async def test_movie_theatre_convo(start_method): | |||
|         await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| async def cellar_door(return_value: Optional[str]): | ||||
|     return return_value | ||||
| def cellar_door(): | ||||
|     return "Dang that's beautiful" | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'return_value', ["Dang that's beautiful", None], | ||||
|     ids=['return_str', 'return_None'], | ||||
| ) | ||||
| @tractor_test | ||||
| async def test_most_beautiful_word( | ||||
|     start_method, | ||||
|     return_value | ||||
| ): | ||||
|     ''' | ||||
|     The main ``tractor`` routine. | ||||
| 
 | ||||
|     ''' | ||||
|     with trio.fail_after(1): | ||||
| async def test_most_beautiful_word(start_method): | ||||
|     """The main ``tractor`` routine. | ||||
|     """ | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.run_in_actor( | ||||
|             cellar_door, | ||||
|                 return_value=return_value, | ||||
|             name='some_linguist', | ||||
|         ) | ||||
| 
 | ||||
|             print(await portal.result()) | ||||
|     # The ``async with`` will unblock here since the 'some_linguist' | ||||
|     # actor has completed its main task ``cellar_door``. | ||||
| 
 | ||||
|     # this should pull the cached final result already captured during | ||||
|     # the nursery block exit. | ||||
|     print(await portal.result()) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -142,27 +114,26 @@ def test_loglevel_propagated_to_subactor( | |||
|     capfd, | ||||
|     arb_addr, | ||||
| ): | ||||
|     if start_method == 'mp_forkserver': | ||||
|     if start_method == 'forkserver': | ||||
|         pytest.skip( | ||||
|             "a bug with `capfd` seems to make forkserver capture not work?") | ||||
| 
 | ||||
|     level = 'critical' | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             name='arbiter', | ||||
|             start_method=start_method, | ||||
|             arbiter_addr=arb_addr, | ||||
| 
 | ||||
|         ) as tn: | ||||
|         async with tractor.open_nursery() as tn: | ||||
|             await tn.run_in_actor( | ||||
|                 check_loglevel, | ||||
|                 loglevel=level, | ||||
|                 level=level, | ||||
|             ) | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
|     tractor.run( | ||||
|         main, | ||||
|         name='arbiter', | ||||
|         loglevel=level, | ||||
|         start_method=start_method, | ||||
|         arbiter_addr=arb_addr, | ||||
|     ) | ||||
|     # ensure subactor spits log message on stderr | ||||
|     captured = capfd.readouterr() | ||||
|     assert 'yoyoyo' in captured.err | ||||
|  |  | |||
|  | @ -7,10 +7,9 @@ import platform | |||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| from tractor.testing import tractor_test | ||||
| import pytest | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| def test_must_define_ctx(): | ||||
| 
 | ||||
|  | @ -33,16 +32,13 @@ async def async_gen_stream(sequence): | |||
| 
 | ||||
|     # block indefinitely waiting to be cancelled by ``aclose()`` call | ||||
|     with trio.CancelScope() as cs: | ||||
|         await trio.sleep_forever() | ||||
|         await trio.sleep(float('inf')) | ||||
|         assert 0 | ||||
|     assert cs.cancelled_caught | ||||
| 
 | ||||
| 
 | ||||
| @tractor.stream | ||||
| async def context_stream( | ||||
|     ctx: tractor.Context, | ||||
|     sequence | ||||
| ): | ||||
| async def context_stream(ctx, sequence): | ||||
|     for i in sequence: | ||||
|         await ctx.send_yield(i) | ||||
|         await trio.sleep(0.1) | ||||
|  | @ -54,38 +50,26 @@ async def context_stream( | |||
|     assert cs.cancelled_caught | ||||
| 
 | ||||
| 
 | ||||
| async def stream_from_single_subactor( | ||||
|     arb_addr, | ||||
|     start_method, | ||||
|     stream_func, | ||||
| ): | ||||
| async def stream_from_single_subactor(stream_func): | ||||
|     """Verify we can spawn a daemon actor and retrieve streamed data. | ||||
|     """ | ||||
|     # only one per host address, spawns an actor if None | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         arbiter_addr=arb_addr, | ||||
|         start_method=start_method, | ||||
|     ) as nursery: | ||||
| 
 | ||||
|     async with tractor.find_actor('streamerd') as portals: | ||||
| 
 | ||||
|         if not portals: | ||||
| 
 | ||||
|             # only one per host address, spawns an actor if None | ||||
|             async with tractor.open_nursery() as nursery: | ||||
|                 # no brokerd actor found | ||||
|                 portal = await nursery.start_actor( | ||||
|                     'streamerd', | ||||
|                     enable_modules=[__name__], | ||||
|                     rpc_module_paths=[__name__], | ||||
|                 ) | ||||
| 
 | ||||
|                 seq = range(10) | ||||
| 
 | ||||
|                 with trio.fail_after(5): | ||||
|                     async with portal.open_stream_from( | ||||
|                         stream_func, | ||||
|                 stream = await portal.run( | ||||
|                     stream_func,  # one of the funcs above | ||||
|                     sequence=list(seq),  # has to be msgpack serializable | ||||
|                     ) as stream: | ||||
| 
 | ||||
|                 ) | ||||
|                 # it'd sure be nice to have an asyncitertools here... | ||||
|                 iseq = iter(seq) | ||||
|                 ival = next(iseq) | ||||
|  | @ -102,14 +86,12 @@ async def stream_from_single_subactor( | |||
| 
 | ||||
|                 await trio.sleep(0.3) | ||||
| 
 | ||||
|                         # ensure EOC signalled-state translates | ||||
|                         # XXX: not really sure this is correct, | ||||
|                         # shouldn't it be a `ClosedResourceError`? | ||||
|                 try: | ||||
|                     await stream.__anext__() | ||||
|                 except StopAsyncIteration: | ||||
|                     # stop all spawned subactors | ||||
|                     await portal.cancel_actor() | ||||
|                 # await nursery.cancel() | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|  | @ -118,13 +100,13 @@ async def stream_from_single_subactor( | |||
| def test_stream_from_single_subactor(arb_addr, start_method, stream_func): | ||||
|     """Verify streaming from a spawned async generator. | ||||
|     """ | ||||
|     trio.run( | ||||
|     tractor.run( | ||||
|         partial( | ||||
|             stream_from_single_subactor, | ||||
|             arb_addr, | ||||
|             start_method, | ||||
|             stream_func=stream_func, | ||||
|         ), | ||||
|         arbiter_addr=arb_addr, | ||||
|         start_method=start_method, | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -136,7 +118,7 @@ async def stream_data(seed): | |||
|         yield i | ||||
| 
 | ||||
|         # trigger scheduler to simulate practical usage | ||||
|         await trio.sleep(0.0001) | ||||
|         await trio.sleep(0) | ||||
| 
 | ||||
| 
 | ||||
| # this is the third actor; the aggregator | ||||
|  | @ -150,7 +132,7 @@ async def aggregate(seed): | |||
|             # fork point | ||||
|             portal = await nursery.start_actor( | ||||
|                 name=f'streamer_{i}', | ||||
|                 enable_modules=[__name__], | ||||
|                 rpc_module_paths=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             portals.append(portal) | ||||
|  | @ -159,12 +141,9 @@ async def aggregate(seed): | |||
| 
 | ||||
|         async def push_to_chan(portal, send_chan): | ||||
|             async with send_chan: | ||||
| 
 | ||||
|                 async with portal.open_stream_from( | ||||
|                     stream_data, seed=seed, | ||||
|                 ) as stream: | ||||
| 
 | ||||
|                     async for value in stream: | ||||
|                 async for value in await portal.run( | ||||
|                     __name__, 'stream_data', seed=seed | ||||
|                 ): | ||||
|                     # leverage trio's built-in backpressure | ||||
|                     await send_chan.send(value) | ||||
| 
 | ||||
|  | @ -204,29 +183,26 @@ async def a_quadruple_example(): | |||
|         seed = int(1e3) | ||||
|         pre_start = time.time() | ||||
| 
 | ||||
|         portal = await nursery.start_actor( | ||||
|         portal = await nursery.run_in_actor( | ||||
|             aggregate, | ||||
|             seed=seed, | ||||
|             name='aggregator', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         start = time.time() | ||||
|         # the portal call returns exactly what you'd expect | ||||
|         # as if the remote "aggregate" function was called locally | ||||
|         result_stream = [] | ||||
| 
 | ||||
|         async with portal.open_stream_from(aggregate, seed=seed) as stream: | ||||
|             async for value in stream: | ||||
|         async for value in await portal.result(): | ||||
|             result_stream.append(value) | ||||
| 
 | ||||
|         print(f"STREAM TIME = {time.time() - start}") | ||||
|         print(f"STREAM + SPAWN TIME = {time.time() - pre_start}") | ||||
|         assert result_stream == list(range(seed)) | ||||
|         await portal.cancel_actor() | ||||
|         return result_stream | ||||
| 
 | ||||
| 
 | ||||
| async def cancel_after(wait, arb_addr): | ||||
|     async with tractor.open_root_actor(arbiter_addr=arb_addr): | ||||
| async def cancel_after(wait): | ||||
|     with trio.move_on_after(wait): | ||||
|         return await a_quadruple_example() | ||||
| 
 | ||||
|  | @ -240,7 +216,7 @@ def time_quad_ex(arb_addr, ci_env, spawn_backend): | |||
| 
 | ||||
|     timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4 | ||||
|     start = time.time() | ||||
|     results = trio.run(cancel_after, timeout, arb_addr) | ||||
|     results = tractor.run(cancel_after, timeout, arbiter_addr=arb_addr) | ||||
|     diff = time.time() - start | ||||
|     assert results | ||||
|     return results, diff | ||||
|  | @ -251,7 +227,7 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend): | |||
| 
 | ||||
|     results, diff = time_quad_ex | ||||
|     assert results | ||||
|     this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 3 | ||||
|     this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 2.5 | ||||
|     assert diff < this_fast | ||||
| 
 | ||||
| 
 | ||||
|  | @ -267,7 +243,7 @@ def test_not_fast_enough_quad( | |||
|     """ | ||||
|     results, diff = time_quad_ex | ||||
|     delay = max(diff - cancel_delay, 0) | ||||
|     results = trio.run(cancel_after, delay, arb_addr) | ||||
|     results = tractor.run(cancel_after, delay, arbiter_addr=arb_addr) | ||||
|     system = platform.system() | ||||
|     if system in ('Windows', 'Darwin') and results is not None: | ||||
|         # In CI envoirments it seems later runs are quicker then the first | ||||
|  | @ -296,14 +272,11 @@ async def test_respawn_consumer_task( | |||
| 
 | ||||
|     async with tractor.open_nursery() as n: | ||||
| 
 | ||||
|         portal = await n.start_actor( | ||||
|             name='streamer', | ||||
|             enable_modules=[__name__] | ||||
|         ) | ||||
|         async with portal.open_stream_from( | ||||
|         stream = await(await n.run_in_actor( | ||||
|             stream_data, | ||||
|             seed=11, | ||||
|         ) as stream: | ||||
|             name='streamer', | ||||
|         )).result() | ||||
| 
 | ||||
|         expect = set(range(11)) | ||||
|         received = [] | ||||
|  | @ -317,7 +290,7 @@ async def test_respawn_consumer_task( | |||
|                 task_status.started(cs) | ||||
| 
 | ||||
|                 # shield stream's underlying channel from cancellation | ||||
|                     # with stream.shield(): | ||||
|                 with stream.shield(): | ||||
| 
 | ||||
|                     async for v in stream: | ||||
|                         print(f'from stream: {v}') | ||||
|  | @ -344,9 +317,3 @@ async def test_respawn_consumer_task( | |||
|                 if not expect: | ||||
|                     print("all values streamed, BREAKING") | ||||
|                     break | ||||
| 
 | ||||
|                 cs.cancel() | ||||
| 
 | ||||
|         # TODO: this is justification for a | ||||
|         # ``ActorNursery.stream_from_actor()`` helper? | ||||
|         await portal.cancel_actor() | ||||
|  | @ -1,514 +0,0 @@ | |||
| """ | ||||
| Broadcast channels for fan-out to local tasks. | ||||
| 
 | ||||
| """ | ||||
| from contextlib import asynccontextmanager | ||||
| from functools import partial | ||||
| from itertools import cycle | ||||
| import time | ||||
| from typing import Optional | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| from trio.lowlevel import current_task | ||||
| import tractor | ||||
| from tractor.trionics import ( | ||||
|     broadcast_receiver, | ||||
|     Lagged, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def echo_sequences( | ||||
| 
 | ||||
|     ctx:  tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
|     '''Bidir streaming endpoint which will stream | ||||
|     back any sequence it is sent item-wise. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
| 
 | ||||
|     async with ctx.open_stream() as stream: | ||||
|         async for sequence in stream: | ||||
|             seq = list(sequence) | ||||
|             for value in seq: | ||||
|                 await stream.send(value) | ||||
|                 print(f'producer sent {value}') | ||||
| 
 | ||||
| 
 | ||||
| async def ensure_sequence( | ||||
| 
 | ||||
|     stream: tractor.MsgStream, | ||||
|     sequence: list, | ||||
|     delay: Optional[float] = None, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     name = current_task().name | ||||
|     async with stream.subscribe() as bcaster: | ||||
|         assert not isinstance(bcaster, type(stream)) | ||||
|         async for value in bcaster: | ||||
|             print(f'{name} rx: {value}') | ||||
|             assert value == sequence[0] | ||||
|             sequence.remove(value) | ||||
| 
 | ||||
|             if delay: | ||||
|                 await trio.sleep(delay) | ||||
| 
 | ||||
|             if not sequence: | ||||
|                 # fully consumed | ||||
|                 break | ||||
| 
 | ||||
| 
 | ||||
| @asynccontextmanager | ||||
| async def open_sequence_streamer( | ||||
| 
 | ||||
|     sequence: list[int], | ||||
|     arb_addr: tuple[str, int], | ||||
|     start_method: str, | ||||
| 
 | ||||
| ) -> tractor.MsgStream: | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         arbiter_addr=arb_addr, | ||||
|         start_method=start_method, | ||||
|     ) as tn: | ||||
| 
 | ||||
|         portal = await tn.start_actor( | ||||
|             'sequence_echoer', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             echo_sequences, | ||||
|         ) as (ctx, first): | ||||
| 
 | ||||
|             assert first is None | ||||
|             async with ctx.open_stream(backpressure=True) as stream: | ||||
|                 yield stream | ||||
| 
 | ||||
|         await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| def test_stream_fan_out_to_local_subscriptions( | ||||
|     arb_addr, | ||||
|     start_method, | ||||
| ): | ||||
| 
 | ||||
|     sequence = list(range(1000)) | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         async with open_sequence_streamer( | ||||
|             sequence, | ||||
|             arb_addr, | ||||
|             start_method, | ||||
|         ) as stream: | ||||
| 
 | ||||
|             async with trio.open_nursery() as n: | ||||
|                 for i in range(10): | ||||
|                     n.start_soon( | ||||
|                         ensure_sequence, | ||||
|                         stream, | ||||
|                         sequence.copy(), | ||||
|                         name=f'consumer_{i}', | ||||
|                     ) | ||||
| 
 | ||||
|                 await stream.send(tuple(sequence)) | ||||
| 
 | ||||
|                 async for value in stream: | ||||
|                     print(f'source stream rx: {value}') | ||||
|                     assert value == sequence[0] | ||||
|                     sequence.remove(value) | ||||
| 
 | ||||
|                     if not sequence: | ||||
|                         # fully consumed | ||||
|                         break | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'task_delays', | ||||
|     [ | ||||
|         (0.01, 0.001), | ||||
|         (0.001, 0.01), | ||||
|     ] | ||||
| ) | ||||
| def test_consumer_and_parent_maybe_lag( | ||||
|     arb_addr, | ||||
|     start_method, | ||||
|     task_delays, | ||||
| ): | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         sequence = list(range(300)) | ||||
|         parent_delay, sub_delay = task_delays | ||||
| 
 | ||||
|         async with open_sequence_streamer( | ||||
|             sequence, | ||||
|             arb_addr, | ||||
|             start_method, | ||||
|         ) as stream: | ||||
| 
 | ||||
|             try: | ||||
|                 async with trio.open_nursery() as n: | ||||
| 
 | ||||
|                     n.start_soon( | ||||
|                         ensure_sequence, | ||||
|                         stream, | ||||
|                         sequence.copy(), | ||||
|                         sub_delay, | ||||
|                         name='consumer_task', | ||||
|                     ) | ||||
| 
 | ||||
|                     await stream.send(tuple(sequence)) | ||||
| 
 | ||||
|                     # async for value in stream: | ||||
|                     lagged = False | ||||
|                     lag_count = 0 | ||||
| 
 | ||||
|                     while True: | ||||
|                         try: | ||||
|                             value = await stream.receive() | ||||
|                             print(f'source stream rx: {value}') | ||||
| 
 | ||||
|                             if lagged: | ||||
|                                 # re set the sequence starting at our last | ||||
|                                 # value | ||||
|                                 sequence = sequence[sequence.index(value) + 1:] | ||||
|                             else: | ||||
|                                 assert value == sequence[0] | ||||
|                                 sequence.remove(value) | ||||
| 
 | ||||
|                             lagged = False | ||||
| 
 | ||||
|                         except Lagged: | ||||
|                             lagged = True | ||||
|                             print(f'source stream lagged after {value}') | ||||
|                             lag_count += 1 | ||||
|                             continue | ||||
| 
 | ||||
|                         # lag the parent | ||||
|                         await trio.sleep(parent_delay) | ||||
| 
 | ||||
|                         if not sequence: | ||||
|                             # fully consumed | ||||
|                             break | ||||
|                     print(f'parent + source stream lagged: {lag_count}') | ||||
| 
 | ||||
|                     if parent_delay > sub_delay: | ||||
|                         assert lag_count > 0 | ||||
| 
 | ||||
|             except Lagged: | ||||
|                 # child was lagged | ||||
|                 assert parent_delay < sub_delay | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_faster_task_to_recv_is_cancelled_by_slower( | ||||
|     arb_addr, | ||||
|     start_method, | ||||
| ): | ||||
|     ''' | ||||
|     Ensure that if a faster task consuming from a stream is cancelled | ||||
|     the slower task can continue to receive all expected values. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
| 
 | ||||
|         sequence = list(range(1000)) | ||||
| 
 | ||||
|         async with open_sequence_streamer( | ||||
|             sequence, | ||||
|             arb_addr, | ||||
|             start_method, | ||||
| 
 | ||||
|         ) as stream: | ||||
| 
 | ||||
|             async with trio.open_nursery() as n: | ||||
|                 n.start_soon( | ||||
|                     ensure_sequence, | ||||
|                     stream, | ||||
|                     sequence.copy(), | ||||
|                     0, | ||||
|                     name='consumer_task', | ||||
|                 ) | ||||
| 
 | ||||
|                 await stream.send(tuple(sequence)) | ||||
| 
 | ||||
|                 # pull 3 values, cancel the subtask, then | ||||
|                 # expect to be able to pull all values still | ||||
|                 for i in range(20): | ||||
|                     try: | ||||
|                         value = await stream.receive() | ||||
|                         print(f'source stream rx: {value}') | ||||
|                         await trio.sleep(0.01) | ||||
|                     except Lagged: | ||||
|                         print(f'parent overrun after {value}') | ||||
|                         continue | ||||
| 
 | ||||
|                 print('cancelling faster subtask') | ||||
|                 n.cancel_scope.cancel() | ||||
| 
 | ||||
|             try: | ||||
|                 value = await stream.receive() | ||||
|                 print(f'source stream after cancel: {value}') | ||||
|             except Lagged: | ||||
|                 print(f'parent overrun after {value}') | ||||
| 
 | ||||
|             # expect to see all remaining values | ||||
|             with trio.fail_after(0.5): | ||||
|                 async for value in stream: | ||||
|                     assert stream._broadcaster._state.recv_ready is None | ||||
|                     print(f'source stream rx: {value}') | ||||
|                     if value == 999: | ||||
|                         # fully consumed and we missed no values once | ||||
|                         # the faster subtask was cancelled | ||||
|                         break | ||||
| 
 | ||||
|                 # await tractor.breakpoint() | ||||
|                 # await stream.receive() | ||||
|                 print(f'final value: {value}') | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_subscribe_errors_after_close(): | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         size = 1 | ||||
|         tx, rx = trio.open_memory_channel(size) | ||||
|         async with broadcast_receiver(rx, size) as brx: | ||||
|             pass | ||||
| 
 | ||||
|         try: | ||||
|             # open and close | ||||
|             async with brx.subscribe(): | ||||
|                 pass | ||||
| 
 | ||||
|         except trio.ClosedResourceError: | ||||
|             assert brx.key not in brx._state.subs | ||||
| 
 | ||||
|         else: | ||||
|             assert 0 | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_ensure_slow_consumers_lag_out( | ||||
|     arb_addr, | ||||
|     start_method, | ||||
| ): | ||||
|     '''This is a pure local task test; no tractor | ||||
|     machinery is really required. | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
| 
 | ||||
|         # make sure it all works within the runtime | ||||
|         async with tractor.open_root_actor(): | ||||
| 
 | ||||
|             num_laggers = 4 | ||||
|             laggers: dict[str, int] = {} | ||||
|             retries = 3 | ||||
|             size = 100 | ||||
|             tx, rx = trio.open_memory_channel(size) | ||||
|             brx = broadcast_receiver(rx, size) | ||||
| 
 | ||||
|             async def sub_and_print( | ||||
|                 delay: float, | ||||
|             ) -> None: | ||||
| 
 | ||||
|                 task = current_task() | ||||
|                 start = time.time() | ||||
| 
 | ||||
|                 async with brx.subscribe() as lbrx: | ||||
|                     while True: | ||||
|                         print(f'{task.name}: starting consume loop') | ||||
|                         try: | ||||
|                             async for value in lbrx: | ||||
|                                 print(f'{task.name}: {value}') | ||||
|                                 await trio.sleep(delay) | ||||
| 
 | ||||
|                             if task.name == 'sub_1': | ||||
|                                 # trigger checkpoint to clean out other subs | ||||
|                                 await trio.sleep(0.01) | ||||
| 
 | ||||
|                                 # the non-lagger got | ||||
|                                 # a ``trio.EndOfChannel`` | ||||
|                                 # because the ``tx`` below was closed | ||||
|                                 assert len(lbrx._state.subs) == 1 | ||||
| 
 | ||||
|                                 await lbrx.aclose() | ||||
| 
 | ||||
|                                 assert len(lbrx._state.subs) == 0 | ||||
| 
 | ||||
|                         except trio.ClosedResourceError: | ||||
|                             # only the fast sub will try to re-enter | ||||
|                             # iteration on the now closed bcaster | ||||
|                             assert task.name == 'sub_1' | ||||
|                             return | ||||
| 
 | ||||
|                         except Lagged: | ||||
|                             lag_time = time.time() - start | ||||
|                             lags = laggers[task.name] | ||||
|                             print( | ||||
|                                 f'restarting slow task {task.name} ' | ||||
|                                 f'that bailed out on {lags}:{value} ' | ||||
|                                 f'after {lag_time:.3f}') | ||||
|                             if lags <= retries: | ||||
|                                 laggers[task.name] += 1 | ||||
|                                 continue | ||||
|                             else: | ||||
|                                 print( | ||||
|                                     f'{task.name} was too slow and terminated ' | ||||
|                                     f'on {lags}:{value}') | ||||
|                                 return | ||||
| 
 | ||||
|             async with trio.open_nursery() as nursery: | ||||
| 
 | ||||
|                 for i in range(1, num_laggers): | ||||
| 
 | ||||
|                     task_name = f'sub_{i}' | ||||
|                     laggers[task_name] = 0 | ||||
|                     nursery.start_soon( | ||||
|                         partial( | ||||
|                             sub_and_print, | ||||
|                             delay=i*0.001, | ||||
|                         ), | ||||
|                         name=task_name, | ||||
|                     ) | ||||
| 
 | ||||
|                 # allow subs to sched | ||||
|                 await trio.sleep(0.1) | ||||
| 
 | ||||
|                 async with tx: | ||||
|                     for i in cycle(range(size)): | ||||
|                         await tx.send(i) | ||||
|                         if len(brx._state.subs) == 2: | ||||
|                             # only one, the non lagger, sub is left | ||||
|                             break | ||||
| 
 | ||||
|                 # the non-lagger | ||||
|                 assert laggers.pop('sub_1') == 0 | ||||
| 
 | ||||
|                 for n, v in laggers.items(): | ||||
|                     assert v == 4 | ||||
| 
 | ||||
|                 assert tx._closed | ||||
|                 assert not tx._state.open_send_channels | ||||
| 
 | ||||
|                 # check that "first" bcaster that we created | ||||
|                 # above, never was iterated and is thus overrun | ||||
|                 try: | ||||
|                     await brx.receive() | ||||
|                 except Lagged: | ||||
|                     # expect tokio style index truncation | ||||
|                     seq = brx._state.subs[brx.key] | ||||
|                     assert seq == len(brx._state.queue) - 1 | ||||
| 
 | ||||
|                 # all backpressured entries in the underlying | ||||
|                 # channel should have been copied into the caster | ||||
|                 # queue trailing-window | ||||
|                 async for i in rx: | ||||
|                     print(f'bped: {i}') | ||||
|                     assert i in brx._state.queue | ||||
| 
 | ||||
|                 # should be noop | ||||
|                 await brx.aclose() | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_first_recver_is_cancelled(): | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         # make sure it all works within the runtime | ||||
|         async with tractor.open_root_actor(): | ||||
| 
 | ||||
|             tx, rx = trio.open_memory_channel(1) | ||||
|             brx = broadcast_receiver(rx, 1) | ||||
|             cs = trio.CancelScope() | ||||
| 
 | ||||
|             async def sub_and_recv(): | ||||
|                 with cs: | ||||
|                     async with brx.subscribe() as bc: | ||||
|                         async for value in bc: | ||||
|                             print(value) | ||||
| 
 | ||||
|             async def cancel_and_send(): | ||||
|                 await trio.sleep(0.2) | ||||
|                 cs.cancel() | ||||
|                 await tx.send(1) | ||||
| 
 | ||||
|             async with trio.open_nursery() as n: | ||||
| 
 | ||||
|                 n.start_soon(sub_and_recv) | ||||
|                 await trio.sleep(0.1) | ||||
|                 assert brx._state.recv_ready | ||||
| 
 | ||||
|                 n.start_soon(cancel_and_send) | ||||
| 
 | ||||
|                 # ensure that we don't hang because no-task is now | ||||
|                 # waiting on the underlying receive.. | ||||
|                 with trio.fail_after(0.5): | ||||
|                     value = await brx.receive() | ||||
|                     print(f'parent: {value}') | ||||
|                     assert value == 1 | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_no_raise_on_lag(): | ||||
|     ''' | ||||
|     Run a simple 2-task broadcast where one task is slow but configured | ||||
|     so that it does not raise `Lagged` on overruns using | ||||
|     `raise_on_lasg=False` and verify that the task does not raise. | ||||
| 
 | ||||
|     ''' | ||||
|     size = 100 | ||||
|     tx, rx = trio.open_memory_channel(size) | ||||
|     brx = broadcast_receiver(rx, size) | ||||
| 
 | ||||
|     async def slow(): | ||||
|         async with brx.subscribe( | ||||
|             raise_on_lag=False, | ||||
|         ) as br: | ||||
|             async for msg in br: | ||||
|                 print(f'slow task got: {msg}') | ||||
|                 await trio.sleep(0.1) | ||||
| 
 | ||||
|     async def fast(): | ||||
|         async with brx.subscribe() as br: | ||||
|             async for msg in br: | ||||
|                 print(f'fast task got: {msg}') | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with ( | ||||
|             tractor.open_root_actor( | ||||
|                 # NOTE: so we see the warning msg emitted by the bcaster | ||||
|                 # internals when the no raise flag is set. | ||||
|                 loglevel='warning', | ||||
|             ), | ||||
|             trio.open_nursery() as n, | ||||
|         ): | ||||
|             n.start_soon(slow) | ||||
|             n.start_soon(fast) | ||||
| 
 | ||||
|             for i in range(1000): | ||||
|                 await tx.send(i) | ||||
| 
 | ||||
|             # simulate user nailing ctl-c after realizing | ||||
|             # there's a lag in the slow task. | ||||
|             await trio.sleep(1) | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|  | @ -1,82 +0,0 @@ | |||
| ''' | ||||
| Reminders for oddities in `trio` that we need to stay aware of and/or | ||||
| want to see changed. | ||||
| 
 | ||||
| ''' | ||||
| import pytest | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'use_start_soon', [ | ||||
|         pytest.param( | ||||
|             True, | ||||
|             marks=pytest.mark.xfail(reason="see python-trio/trio#2258") | ||||
|         ), | ||||
|         False, | ||||
|     ] | ||||
| ) | ||||
| def test_stashed_child_nursery(use_start_soon): | ||||
| 
 | ||||
|     _child_nursery = None | ||||
| 
 | ||||
|     async def waits_on_signal( | ||||
|         ev: trio.Event(), | ||||
|         task_status: TaskStatus[trio.Nursery] = trio.TASK_STATUS_IGNORED, | ||||
|     ): | ||||
|         ''' | ||||
|         Do some stuf, then signal other tasks, then yield back to "starter". | ||||
| 
 | ||||
|         ''' | ||||
|         await ev.wait() | ||||
|         task_status.started() | ||||
| 
 | ||||
|     async def mk_child_nursery( | ||||
|         task_status: TaskStatus = trio.TASK_STATUS_IGNORED, | ||||
|     ): | ||||
|         ''' | ||||
|         Allocate a child sub-nursery and stash it as a global. | ||||
| 
 | ||||
|         ''' | ||||
|         nonlocal _child_nursery | ||||
| 
 | ||||
|         async with trio.open_nursery() as cn: | ||||
|             _child_nursery = cn | ||||
|             task_status.started(cn) | ||||
| 
 | ||||
|             # block until cancelled by parent. | ||||
|             await trio.sleep_forever() | ||||
| 
 | ||||
|     async def sleep_and_err( | ||||
|         ev: trio.Event, | ||||
|         task_status: TaskStatus = trio.TASK_STATUS_IGNORED, | ||||
|     ): | ||||
|         await trio.sleep(0.5) | ||||
|         doggy()  # noqa | ||||
|         ev.set() | ||||
|         task_status.started() | ||||
| 
 | ||||
|     async def main(): | ||||
| 
 | ||||
|         async with ( | ||||
|             trio.open_nursery() as pn, | ||||
|         ): | ||||
|             cn = await pn.start(mk_child_nursery) | ||||
|             assert cn | ||||
| 
 | ||||
|             ev = trio.Event() | ||||
| 
 | ||||
|             if use_start_soon: | ||||
|                 # this causes inf hang | ||||
|                 cn.start_soon(sleep_and_err, ev) | ||||
| 
 | ||||
|             else: | ||||
|                 # this does not. | ||||
|                 await cn.start(sleep_and_err, ev) | ||||
| 
 | ||||
|             with trio.fail_after(1): | ||||
|                 await cn.start(waits_on_signal, ev) | ||||
| 
 | ||||
|     with pytest.raises(NameError): | ||||
|         trio.run(main) | ||||
|  | @ -1,86 +1,39 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| tractor: structured concurrent "actors". | ||||
| 
 | ||||
| tractor: An actor model micro-framework built on | ||||
|          ``trio`` and ``multiprocessing``. | ||||
| """ | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| from trio import MultiError | ||||
| 
 | ||||
| from ._clustering import open_actor_cluster | ||||
| from ._ipc import Channel | ||||
| from ._streaming import ( | ||||
|     Context, | ||||
|     MsgStream, | ||||
|     stream, | ||||
|     context, | ||||
| ) | ||||
| from ._discovery import ( | ||||
|     get_arbiter, | ||||
|     find_actor, | ||||
|     wait_for_actor, | ||||
|     query_actor, | ||||
| ) | ||||
| from ._supervise import open_nursery | ||||
| from ._state import ( | ||||
|     current_actor, | ||||
|     is_root_process, | ||||
| ) | ||||
| from ._exceptions import ( | ||||
|     RemoteActorError, | ||||
|     ModuleNotExposed, | ||||
|     ContextCancelled, | ||||
| ) | ||||
| from ._debug import ( | ||||
|     breakpoint, | ||||
|     post_mortem, | ||||
| ) | ||||
| from ._streaming import Context, stream | ||||
| from ._discovery import get_arbiter, find_actor, wait_for_actor | ||||
| from ._trionics import open_nursery | ||||
| from ._state import current_actor, is_root_process | ||||
| from ._exceptions import RemoteActorError, ModuleNotExposed | ||||
| from ._debug import breakpoint, post_mortem | ||||
| from . import msg | ||||
| from ._root import ( | ||||
|     run_daemon, | ||||
|     open_root_actor, | ||||
| ) | ||||
| from ._portal import Portal | ||||
| from ._runtime import Actor | ||||
| from ._root import run, run_daemon, open_root_actor | ||||
| 
 | ||||
| 
 | ||||
| __all__ = [ | ||||
|     'Actor', | ||||
|     'Channel', | ||||
|     'Context', | ||||
|     'ContextCancelled', | ||||
|     'ModuleNotExposed', | ||||
|     'MsgStream', | ||||
|     'BaseExceptionGroup', | ||||
|     'Portal', | ||||
|     'MultiError', | ||||
|     'RemoteActorError', | ||||
|     'breakpoint', | ||||
|     'context', | ||||
|     'current_actor', | ||||
|     'find_actor', | ||||
|     'get_arbiter', | ||||
|     'is_root_process', | ||||
|     'msg', | ||||
|     'open_actor_cluster', | ||||
|     'open_nursery', | ||||
|     'open_root_actor', | ||||
|     'post_mortem', | ||||
|     'query_actor', | ||||
|     'run', | ||||
|     'run_daemon', | ||||
|     'stream', | ||||
|     'wait_for_actor', | ||||
|     'to_asyncio', | ||||
|     'wait_for_actor', | ||||
| ] | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -1,22 +1,4 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| This is the "bootloader" for actors started using the native trio backend. | ||||
| 
 | ||||
| """This is the "bootloader" for actors started using the native trio backend. | ||||
| """ | ||||
| import sys | ||||
| import trio | ||||
|  | @ -24,7 +6,7 @@ import argparse | |||
| 
 | ||||
| from ast import literal_eval | ||||
| 
 | ||||
| from ._runtime import Actor | ||||
| from ._actor import Actor | ||||
| from ._entry import _trio_main | ||||
| 
 | ||||
| 
 | ||||
|  | @ -37,15 +19,12 @@ def parse_ipaddr(arg): | |||
|     return (str(host), int(port)) | ||||
| 
 | ||||
| 
 | ||||
| from ._entry import _trio_main | ||||
| 
 | ||||
| if __name__ == "__main__": | ||||
| 
 | ||||
|     parser = argparse.ArgumentParser() | ||||
|     parser.add_argument("--uid", type=parse_uid) | ||||
|     parser.add_argument("--loglevel", type=str) | ||||
|     parser.add_argument("--parent_addr", type=parse_ipaddr) | ||||
|     parser.add_argument("--asyncio", action='store_true') | ||||
|     args = parser.parse_args() | ||||
| 
 | ||||
|     subactor = Actor( | ||||
|  | @ -57,6 +36,5 @@ if __name__ == "__main__": | |||
| 
 | ||||
|     _trio_main( | ||||
|         subactor, | ||||
|         parent_addr=args.parent_addr, | ||||
|         infect_asyncio=args.asyncio, | ||||
|         parent_addr=args.parent_addr | ||||
|     ) | ||||
|  | @ -1,74 +0,0 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| ''' | ||||
| Actor cluster helpers. | ||||
| 
 | ||||
| ''' | ||||
| from __future__ import annotations | ||||
| 
 | ||||
| from contextlib import asynccontextmanager as acm | ||||
| from multiprocessing import cpu_count | ||||
| from typing import AsyncGenerator, Optional | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def open_actor_cluster( | ||||
|     modules: list[str], | ||||
|     count: int = cpu_count(), | ||||
|     names: list[str] | None = None, | ||||
|     hard_kill: bool = False, | ||||
| 
 | ||||
|     # passed through verbatim to ``open_root_actor()`` | ||||
|     **runtime_kwargs, | ||||
| 
 | ||||
| ) -> AsyncGenerator[ | ||||
|     dict[str, tractor.Portal], | ||||
|     None, | ||||
| ]: | ||||
| 
 | ||||
|     portals: dict[str, tractor.Portal] = {} | ||||
| 
 | ||||
|     if not names: | ||||
|         names = [f'worker_{i}' for i in range(count)] | ||||
| 
 | ||||
|     if not len(names) == count: | ||||
|         raise ValueError( | ||||
|             'Number of names is {len(names)} but count it {count}') | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         **runtime_kwargs, | ||||
|     ) as an: | ||||
|         async with trio.open_nursery() as n: | ||||
|             uid = tractor.current_actor().uid | ||||
| 
 | ||||
|             async def _start(name: str) -> None: | ||||
|                 name = f'{uid[0]}.{name}' | ||||
|                 portals[name] = await an.start_actor( | ||||
|                     enable_modules=modules, | ||||
|                     name=name, | ||||
|                 ) | ||||
| 
 | ||||
|             for name in names: | ||||
|                 n.start_soon(_start, name) | ||||
| 
 | ||||
|         assert len(portals) == count | ||||
|         yield portals | ||||
| 
 | ||||
|         await an.cancel(hard_kill=hard_kill) | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -1,29 +1,9 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Actor discovery API. | ||||
| 
 | ||||
| """ | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     Union, | ||||
|     AsyncGenerator, | ||||
| ) | ||||
| from contextlib import asynccontextmanager as acm | ||||
| import typing | ||||
| from typing import Tuple, Optional, Union | ||||
| from async_generator import asynccontextmanager | ||||
| 
 | ||||
| from ._ipc import _connect_chan, Channel | ||||
| from ._portal import ( | ||||
|  | @ -34,16 +14,14 @@ from ._portal import ( | |||
| from ._state import current_actor, _runtime_vars | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| @asynccontextmanager | ||||
| async def get_arbiter( | ||||
| 
 | ||||
|     host: str, | ||||
|     port: int, | ||||
| 
 | ||||
| ) -> AsyncGenerator[Union[Portal, LocalPortal], None]: | ||||
|     '''Return a portal instance connected to a local or remote | ||||
| ) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]: | ||||
|     """Return a portal instance connected to a local or remote | ||||
|     arbiter. | ||||
|     ''' | ||||
|     """ | ||||
|     actor = current_actor() | ||||
| 
 | ||||
|     if not actor: | ||||
|  | @ -55,75 +33,39 @@ async def get_arbiter( | |||
|         yield LocalPortal(actor, Channel((host, port))) | ||||
|     else: | ||||
|         async with _connect_chan(host, port) as chan: | ||||
| 
 | ||||
|             async with open_portal(chan) as arb_portal: | ||||
| 
 | ||||
|                 yield arb_portal | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| @asynccontextmanager | ||||
| async def get_root( | ||||
|     **kwargs, | ||||
| ) -> AsyncGenerator[Portal, None]: | ||||
| 
 | ||||
| **kwargs, | ||||
| ) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]: | ||||
|     host, port = _runtime_vars['_root_mailbox'] | ||||
|     assert host is not None | ||||
| 
 | ||||
|     async with _connect_chan(host, port) as chan: | ||||
|         async with open_portal(chan, **kwargs) as portal: | ||||
|             yield portal | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def query_actor( | ||||
| @asynccontextmanager | ||||
| async def find_actor( | ||||
|     name: str, | ||||
|     arbiter_sockaddr: Optional[tuple[str, int]] = None, | ||||
|     arbiter_sockaddr: Tuple[str, int] = None | ||||
| ) -> typing.AsyncGenerator[Optional[Portal], None]: | ||||
|     """Ask the arbiter to find actor(s) by name. | ||||
| 
 | ||||
| ) -> AsyncGenerator[tuple[str, int], None]: | ||||
|     ''' | ||||
|     Simple address lookup for a given actor name. | ||||
| 
 | ||||
|     Returns the (socket) address or ``None``. | ||||
| 
 | ||||
|     ''' | ||||
|     Returns a connected portal to the last registered matching actor | ||||
|     known to the arbiter. | ||||
|     """ | ||||
|     actor = current_actor() | ||||
|     async with get_arbiter( | ||||
|         *arbiter_sockaddr or actor._arb_addr | ||||
|     ) as arb_portal: | ||||
| 
 | ||||
|         sockaddr = await arb_portal.run_from_ns( | ||||
|             'self', | ||||
|             'find_actor', | ||||
|             name=name, | ||||
|         ) | ||||
| 
 | ||||
|     async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal: | ||||
|         sockaddr = await arb_portal.run_from_ns('self', 'find_actor', name=name) | ||||
|         # TODO: return portals to all available actors - for now just | ||||
|         # the last one that registered | ||||
|         if name == 'arbiter' and actor.is_arbiter: | ||||
|             raise RuntimeError("The current actor is the arbiter") | ||||
| 
 | ||||
|         yield sockaddr if sockaddr else None | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def find_actor( | ||||
|     name: str, | ||||
|     arbiter_sockaddr: tuple[str, int] | None = None | ||||
| 
 | ||||
| ) -> AsyncGenerator[Optional[Portal], None]: | ||||
|     ''' | ||||
|     Ask the arbiter to find actor(s) by name. | ||||
| 
 | ||||
|     Returns a connected portal to the last registered matching actor | ||||
|     known to the arbiter. | ||||
| 
 | ||||
|     ''' | ||||
|     async with query_actor( | ||||
|         name=name, | ||||
|         arbiter_sockaddr=arbiter_sockaddr, | ||||
|     ) as sockaddr: | ||||
| 
 | ||||
|         if sockaddr: | ||||
|         elif sockaddr: | ||||
|             async with _connect_chan(*sockaddr) as chan: | ||||
|                 async with open_portal(chan) as portal: | ||||
|                     yield portal | ||||
|  | @ -131,27 +73,19 @@ async def find_actor( | |||
|             yield None | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| @asynccontextmanager | ||||
| async def wait_for_actor( | ||||
|     name: str, | ||||
|     arbiter_sockaddr: tuple[str, int] | None = None | ||||
| ) -> AsyncGenerator[Portal, None]: | ||||
|     arbiter_sockaddr: Tuple[str, int] = None | ||||
| ) -> typing.AsyncGenerator[Portal, None]: | ||||
|     """Wait on an actor to register with the arbiter. | ||||
| 
 | ||||
|     A portal to the first registered actor is returned. | ||||
|     """ | ||||
|     actor = current_actor() | ||||
| 
 | ||||
|     async with get_arbiter( | ||||
|         *arbiter_sockaddr or actor._arb_addr, | ||||
|     ) as arb_portal: | ||||
|         sockaddrs = await arb_portal.run_from_ns( | ||||
|             'self', | ||||
|             'wait_for_actor', | ||||
|             name=name, | ||||
|         ) | ||||
|     async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal: | ||||
|         sockaddrs = await arb_portal.run_from_ns('self', 'wait_for_actor', name=name) | ||||
|         sockaddr = sockaddrs[-1] | ||||
| 
 | ||||
|         async with _connect_chan(*sockaddr) as chan: | ||||
|             async with open_portal(chan) as portal: | ||||
|                 yield portal | ||||
|  |  | |||
|  | @ -1,64 +1,28 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Sub-process entry points. | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| from functools import partial | ||||
| from typing import ( | ||||
|     Any, | ||||
|     TYPE_CHECKING, | ||||
| ) | ||||
| from typing import Tuple, Any | ||||
| import signal | ||||
| 
 | ||||
| import trio  # type: ignore | ||||
| 
 | ||||
| from .log import ( | ||||
|     get_console_log, | ||||
|     get_logger, | ||||
| ) | ||||
| from .log import get_console_log, get_logger | ||||
| from . import _state | ||||
| from .to_asyncio import run_as_asyncio_guest | ||||
| from ._runtime import ( | ||||
|     async_main, | ||||
|     Actor, | ||||
| ) | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     from ._spawn import SpawnMethodKey | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| def _mp_main( | ||||
| 
 | ||||
|     actor: Actor,  # type: ignore | ||||
|     accept_addr: tuple[str, int], | ||||
|     forkserver_info: tuple[Any, Any, Any, Any, Any], | ||||
|     start_method: SpawnMethodKey, | ||||
|     parent_addr: tuple[str, int] | None = None, | ||||
|     infect_asyncio: bool = False, | ||||
| 
 | ||||
|     actor: 'Actor',  # type: ignore | ||||
|     accept_addr: Tuple[str, int], | ||||
|     forkserver_info: Tuple[Any, Any, Any, Any, Any], | ||||
|     start_method: str, | ||||
|     parent_addr: Tuple[str, int] = None, | ||||
| ) -> None: | ||||
|     ''' | ||||
|     The routine called *after fork* which invokes a fresh ``trio.run`` | ||||
| 
 | ||||
|     ''' | ||||
|     """The routine called *after fork* which invokes a fresh ``trio.run`` | ||||
|     """ | ||||
|     actor._forkserver_info = forkserver_info | ||||
|     from ._spawn import try_set_start_method | ||||
|     spawn_ctx = try_set_start_method(start_method) | ||||
|  | @ -76,16 +40,11 @@ def _mp_main( | |||
| 
 | ||||
|     log.debug(f"parent_addr is {parent_addr}") | ||||
|     trio_main = partial( | ||||
|         async_main, | ||||
|         actor, | ||||
|         actor._async_main, | ||||
|         accept_addr, | ||||
|         parent_addr=parent_addr | ||||
|     ) | ||||
|     try: | ||||
|         if infect_asyncio: | ||||
|             actor._infected_aio = True | ||||
|             run_as_asyncio_guest(trio_main) | ||||
|         else: | ||||
|         trio.run(trio_main) | ||||
|     except KeyboardInterrupt: | ||||
|         pass  # handle it the same way trio does? | ||||
|  | @ -95,17 +54,16 @@ def _mp_main( | |||
| 
 | ||||
| 
 | ||||
| def _trio_main( | ||||
| 
 | ||||
|     actor: Actor,  # type: ignore | ||||
|     actor: 'Actor',  # type: ignore | ||||
|     *, | ||||
|     parent_addr: tuple[str, int] | None = None, | ||||
|     infect_asyncio: bool = False, | ||||
| 
 | ||||
|     parent_addr: Tuple[str, int] = None, | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Entry point for a `trio_run_in_process` subactor. | ||||
|     """Entry point for a `trio_run_in_process` subactor. | ||||
|     """ | ||||
|     # Disable sigint handling in children; | ||||
|     # we don't need it thanks to our cancellation machinery. | ||||
|     signal.signal(signal.SIGINT, signal.SIG_IGN) | ||||
| 
 | ||||
|     ''' | ||||
|     log.info(f"Started new trio process for {actor.uid}") | ||||
| 
 | ||||
|     if actor.loglevel is not None: | ||||
|  | @ -120,16 +78,11 @@ def _trio_main( | |||
| 
 | ||||
|     log.debug(f"parent_addr is {parent_addr}") | ||||
|     trio_main = partial( | ||||
|         async_main, | ||||
|         actor, | ||||
|         actor._async_main, | ||||
|         parent_addr=parent_addr | ||||
|     ) | ||||
| 
 | ||||
|     try: | ||||
|         if infect_asyncio: | ||||
|             actor._infected_aio = True | ||||
|             run_as_asyncio_guest(trio_main) | ||||
|         else: | ||||
|         trio.run(trio_main) | ||||
|     except KeyboardInterrupt: | ||||
|         log.warning(f"Actor {actor.uid} received KBI") | ||||
|  |  | |||
|  | @ -1,58 +1,36 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Our classy exception set. | ||||
| 
 | ||||
| """ | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Optional, | ||||
|     Type, | ||||
| ) | ||||
| from typing import Dict, Any | ||||
| import importlib | ||||
| import builtins | ||||
| import traceback | ||||
| 
 | ||||
| import exceptiongroup as eg | ||||
| import trio | ||||
| 
 | ||||
| 
 | ||||
| _this_mod = importlib.import_module(__name__) | ||||
| 
 | ||||
| 
 | ||||
| class ActorFailure(Exception): | ||||
|     "General actor failure" | ||||
| 
 | ||||
| 
 | ||||
| class RemoteActorError(Exception): | ||||
|     # TODO: local recontruction of remote exception deats | ||||
|     "Remote actor exception bundled locally" | ||||
|     def __init__( | ||||
|         self, | ||||
|         message: str, | ||||
|         suberror_type: Optional[Type[BaseException]] = None, | ||||
|         **msgdata | ||||
| 
 | ||||
|     ) -> None: | ||||
|     def __init__(self, message, type_str, **msgdata) -> None: | ||||
|         super().__init__(message) | ||||
|         for ns in [builtins, _this_mod, trio]: | ||||
|             try: | ||||
|                 self.type = getattr(ns, type_str) | ||||
|                 break | ||||
|             except AttributeError: | ||||
|                 continue | ||||
|         else: | ||||
|             self.type = Exception | ||||
| 
 | ||||
|         self.type = suberror_type | ||||
|         self.msgdata = msgdata | ||||
| 
 | ||||
|     # TODO: a trio.MultiError.catch like context manager | ||||
|     # for catching underlying remote errors of a particular type | ||||
| 
 | ||||
| 
 | ||||
| class InternalActorError(RemoteActorError): | ||||
|     """Remote internal ``tractor`` error indicating | ||||
|  | @ -60,14 +38,6 @@ class InternalActorError(RemoteActorError): | |||
|     """ | ||||
| 
 | ||||
| 
 | ||||
| class TransportClosed(trio.ClosedResourceError): | ||||
|     "Underlying channel transport was closed prior to use" | ||||
| 
 | ||||
| 
 | ||||
| class ContextCancelled(RemoteActorError): | ||||
|     "Inter-actor task context cancelled itself on the callee side." | ||||
| 
 | ||||
| 
 | ||||
| class NoResult(RuntimeError): | ||||
|     "No final result is expected for this actor" | ||||
| 
 | ||||
|  | @ -76,102 +46,40 @@ class ModuleNotExposed(ModuleNotFoundError): | |||
|     "The requested module is not exposed for RPC" | ||||
| 
 | ||||
| 
 | ||||
| class NoRuntime(RuntimeError): | ||||
|     "The root actor has not been initialized yet" | ||||
| 
 | ||||
| 
 | ||||
| class StreamOverrun(trio.TooSlowError): | ||||
|     "This stream was overrun by sender" | ||||
| 
 | ||||
| 
 | ||||
| class AsyncioCancelled(Exception): | ||||
|     ''' | ||||
|     Asyncio cancelled translation (non-base) error | ||||
|     for use with the ``to_asyncio`` module | ||||
|     to be raised in the ``trio`` side task | ||||
| 
 | ||||
|     ''' | ||||
| 
 | ||||
| 
 | ||||
| def pack_error( | ||||
|     exc: BaseException, | ||||
|     tb=None, | ||||
| 
 | ||||
| ) -> dict[str, Any]: | ||||
| def pack_error(exc: BaseException) -> Dict[str, Any]: | ||||
|     """Create an "error message" for tranmission over | ||||
|     a channel (aka the wire). | ||||
|     """ | ||||
|     if tb: | ||||
|         tb_str = ''.join(traceback.format_tb(tb)) | ||||
|     else: | ||||
|         tb_str = traceback.format_exc() | ||||
| 
 | ||||
|     return { | ||||
|         'error': { | ||||
|             'tb_str': tb_str, | ||||
|             'tb_str': traceback.format_exc(), | ||||
|             'type_str': type(exc).__name__, | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
| 
 | ||||
| def unpack_error( | ||||
| 
 | ||||
|     msg: dict[str, Any], | ||||
|     msg: Dict[str, Any], | ||||
|     chan=None, | ||||
|     err_type=RemoteActorError | ||||
| 
 | ||||
| ) -> Exception: | ||||
|     ''' | ||||
|     Unpack an 'error' message from the wire | ||||
|     """Unpack an 'error' message from the wire | ||||
|     into a local ``RemoteActorError``. | ||||
| 
 | ||||
|     ''' | ||||
|     __tracebackhide__ = True | ||||
|     error = msg['error'] | ||||
| 
 | ||||
|     tb_str = error.get('tb_str', '') | ||||
|     message = f"{chan.uid}\n" + tb_str | ||||
|     type_name = error['type_str'] | ||||
|     suberror_type: Type[BaseException] = Exception | ||||
| 
 | ||||
|     if type_name == 'ContextCancelled': | ||||
|         err_type = ContextCancelled | ||||
|         suberror_type = trio.Cancelled | ||||
| 
 | ||||
|     else:  # try to lookup a suitable local error type | ||||
|         for ns in [ | ||||
|             builtins, | ||||
|             _this_mod, | ||||
|             eg, | ||||
|             trio, | ||||
|         ]: | ||||
|             try: | ||||
|                 suberror_type = getattr(ns, type_name) | ||||
|                 break | ||||
|             except AttributeError: | ||||
|                 continue | ||||
| 
 | ||||
|     exc = err_type( | ||||
|         message, | ||||
|         suberror_type=suberror_type, | ||||
| 
 | ||||
|         # unpack other fields into error type init | ||||
|     """ | ||||
|     tb_str = msg['error'].get('tb_str', '') | ||||
|     return err_type( | ||||
|         f"{chan.uid}\n" + tb_str, | ||||
|         **msg['error'], | ||||
|     ) | ||||
| 
 | ||||
|     return exc | ||||
| 
 | ||||
| 
 | ||||
| def is_multi_cancelled(exc: BaseException) -> bool: | ||||
|     ''' | ||||
|     Predicate to determine if a possible ``eg.BaseExceptionGroup`` contains | ||||
|     only ``trio.Cancelled`` sub-exceptions (and is likely the result of | ||||
|     """Predicate to determine if a ``trio.MultiError`` contains only | ||||
|     ``trio.Cancelled`` sub-exceptions (and is likely the result of | ||||
|     cancelling a collection of subtasks. | ||||
| 
 | ||||
|     ''' | ||||
|     if isinstance(exc, eg.BaseExceptionGroup): | ||||
|         return exc.subgroup( | ||||
|             lambda exc: isinstance(exc, trio.Cancelled) | ||||
|         ) is not None | ||||
| 
 | ||||
|     return False | ||||
|     """ | ||||
|     return not trio.MultiError.filter( | ||||
|         lambda exc: exc if not isinstance(exc, trio.Cancelled) else None, | ||||
|         exc, | ||||
|     ) | ||||
|  |  | |||
|  | @ -1,19 +1,3 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| This is near-copy of the 3.8 stdlib's ``multiprocessing.forkserver.py`` | ||||
| with some hackery to prevent any more then a single forkserver and | ||||
|  |  | |||
							
								
								
									
										477
									
								
								tractor/_ipc.py
								
								
								
								
							
							
						
						
									
										477
									
								
								tractor/_ipc.py
								
								
								
								
							|  | @ -1,240 +1,84 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Inter-process comms abstractions | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| import platform | ||||
| import struct | ||||
| import typing | ||||
| from collections.abc import ( | ||||
|     AsyncGenerator, | ||||
|     AsyncIterator, | ||||
| ) | ||||
| from typing import ( | ||||
|     Any, | ||||
|     runtime_checkable, | ||||
|     Optional, | ||||
|     Protocol, | ||||
|     Type, | ||||
|     TypeVar, | ||||
| ) | ||||
| from typing import Any, Tuple, Optional | ||||
| from functools import partial | ||||
| import inspect | ||||
| 
 | ||||
| from tricycle import BufferedReceiveStream | ||||
| import msgspec | ||||
| import msgpack | ||||
| import trio | ||||
| from async_generator import asynccontextmanager | ||||
| 
 | ||||
| from .log import get_logger | ||||
| from ._exceptions import TransportClosed | ||||
| log = get_logger(__name__) | ||||
| log = get_logger('ipc') | ||||
| 
 | ||||
| # :eyeroll: | ||||
| try: | ||||
|     import msgpack_numpy | ||||
|     Unpacker = msgpack_numpy.Unpacker | ||||
| except ImportError: | ||||
|     # just plain ``msgpack`` requires tweaking key settings | ||||
|     Unpacker = partial(msgpack.Unpacker, strict_map_key=False) | ||||
| 
 | ||||
| 
 | ||||
| _is_windows = platform.system() == 'Windows' | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| def get_stream_addrs(stream: trio.SocketStream) -> tuple: | ||||
|     # should both be IP sockets | ||||
|     lsockname = stream.socket.getsockname() | ||||
|     rsockname = stream.socket.getpeername() | ||||
|     return ( | ||||
|         tuple(lsockname[:2]), | ||||
|         tuple(rsockname[:2]), | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| MsgType = TypeVar("MsgType") | ||||
| 
 | ||||
| # TODO: consider using a generic def and indexing with our eventual | ||||
| # msg definition/types? | ||||
| # - https://docs.python.org/3/library/typing.html#typing.Protocol | ||||
| # - https://jcristharif.com/msgspec/usage.html#structs | ||||
| 
 | ||||
| 
 | ||||
| @runtime_checkable | ||||
| class MsgTransport(Protocol[MsgType]): | ||||
| 
 | ||||
|     stream: trio.SocketStream | ||||
|     drained: list[MsgType] | ||||
| 
 | ||||
| class MsgpackStream: | ||||
|     """A ``trio.SocketStream`` delivering ``msgpack`` formatted data. | ||||
|     """ | ||||
|     def __init__(self, stream: trio.SocketStream) -> None: | ||||
|         ... | ||||
| 
 | ||||
|     # XXX: should this instead be called `.sendall()`? | ||||
|     async def send(self, msg: MsgType) -> None: | ||||
|         ... | ||||
| 
 | ||||
|     async def recv(self) -> MsgType: | ||||
|         ... | ||||
| 
 | ||||
|     def __aiter__(self) -> MsgType: | ||||
|         ... | ||||
| 
 | ||||
|     def connected(self) -> bool: | ||||
|         ... | ||||
| 
 | ||||
|     # defining this sync otherwise it causes a mypy error because it | ||||
|     # can't figure out it's a generator i guess?..? | ||||
|     def drain(self) -> AsyncIterator[dict]: | ||||
|         ... | ||||
| 
 | ||||
|     @property | ||||
|     def laddr(self) -> tuple[str, int]: | ||||
|         ... | ||||
| 
 | ||||
|     @property | ||||
|     def raddr(self) -> tuple[str, int]: | ||||
|         ... | ||||
| 
 | ||||
| 
 | ||||
| # TODO: not sure why we have to inherit here, but it seems to be an | ||||
| # issue with ``get_msg_transport()`` returning a ``Type[Protocol]``; | ||||
| # probably should make a `mypy` issue? | ||||
| class MsgpackTCPStream(MsgTransport): | ||||
|     ''' | ||||
|     A ``trio.SocketStream`` delivering ``msgpack`` formatted data | ||||
|     using the ``msgspec`` codec lib. | ||||
| 
 | ||||
|     ''' | ||||
|     def __init__( | ||||
|         self, | ||||
|         stream: trio.SocketStream, | ||||
|         prefix_size: int = 4, | ||||
| 
 | ||||
|     ) -> None: | ||||
| 
 | ||||
|         self.stream = stream | ||||
|         assert self.stream.socket | ||||
| 
 | ||||
|         # should both be IP sockets | ||||
|         self._laddr, self._raddr = get_stream_addrs(stream) | ||||
|         lsockname = stream.socket.getsockname() | ||||
|         assert isinstance(lsockname, tuple) | ||||
|         self._laddr = lsockname[:2] | ||||
|         rsockname = stream.socket.getpeername() | ||||
|         assert isinstance(rsockname, tuple) | ||||
|         self._raddr = rsockname[:2] | ||||
| 
 | ||||
|         # create read loop instance | ||||
|         self._agen = self._iter_packets() | ||||
|         self._send_lock = trio.StrictFIFOLock() | ||||
| 
 | ||||
|         # public i guess? | ||||
|         self.drained: list[dict] = [] | ||||
| 
 | ||||
|         self.recv_stream = BufferedReceiveStream(transport_stream=stream) | ||||
|         self.prefix_size = prefix_size | ||||
| 
 | ||||
|         # TODO: struct aware messaging coders | ||||
|         self.encode = msgspec.msgpack.Encoder().encode | ||||
|         self.decode = msgspec.msgpack.Decoder().decode  # dict[str, Any]) | ||||
| 
 | ||||
|     async def _iter_packets(self) -> AsyncGenerator[dict, None]: | ||||
|         '''Yield packets from the underlying stream. | ||||
| 
 | ||||
|         ''' | ||||
|         import msgspec  # noqa | ||||
|         decodes_failed: int = 0 | ||||
| 
 | ||||
|     async def _iter_packets(self) -> typing.AsyncGenerator[dict, None]: | ||||
|         """Yield packets from the underlying stream. | ||||
|         """ | ||||
|         unpacker = Unpacker( | ||||
|             raw=False, | ||||
|             use_list=False, | ||||
|         ) | ||||
|         while True: | ||||
|             try: | ||||
|                 header = await self.recv_stream.receive_exactly(4) | ||||
|                 data = await self.stream.receive_some(2**10) | ||||
|                 log.trace(f"received {data}")  # type: ignore | ||||
|             except trio.BrokenResourceError: | ||||
|                 log.warning(f"Stream connection {self.raddr} broke") | ||||
|                 return | ||||
| 
 | ||||
|             except ( | ||||
|                 ValueError, | ||||
|                 ConnectionResetError, | ||||
|             if data == b'': | ||||
|                 log.debug(f"Stream connection {self.raddr} was closed") | ||||
|                 return | ||||
| 
 | ||||
|                 # not sure entirely why we need this but without it we | ||||
|                 # seem to be getting racy failures here on | ||||
|                 # arbiter/registry name subs.. | ||||
|                 trio.BrokenResourceError, | ||||
|             ): | ||||
|                 raise TransportClosed( | ||||
|                     f'transport {self} was already closed prior ro read' | ||||
|                 ) | ||||
| 
 | ||||
|             if header == b'': | ||||
|                 raise TransportClosed( | ||||
|                     f'transport {self} was already closed prior ro read' | ||||
|                 ) | ||||
| 
 | ||||
|             size, = struct.unpack("<I", header) | ||||
| 
 | ||||
|             log.transport(f'received header {size}')  # type: ignore | ||||
| 
 | ||||
|             msg_bytes = await self.recv_stream.receive_exactly(size) | ||||
| 
 | ||||
|             log.transport(f"received {msg_bytes}")  # type: ignore | ||||
|             try: | ||||
|                 yield self.decode(msg_bytes) | ||||
|             except ( | ||||
|                 msgspec.DecodeError, | ||||
|                 UnicodeDecodeError, | ||||
|             ): | ||||
|                 if decodes_failed < 4: | ||||
|                     # ignore decoding errors for now and assume they have to | ||||
|                     # do with a channel drop - hope that receiving from the | ||||
|                     # channel will raise an expected error and bubble up. | ||||
|                     try: | ||||
|                         msg_str: str | bytes = msg_bytes.decode() | ||||
|                     except UnicodeDecodeError: | ||||
|                         msg_str = msg_bytes | ||||
| 
 | ||||
|                     log.error( | ||||
|                         '`msgspec` failed to decode!?\n' | ||||
|                         'dumping bytes:\n' | ||||
|                         f'{msg_str!r}' | ||||
|                     ) | ||||
|                     decodes_failed += 1 | ||||
|                 else: | ||||
|                     raise | ||||
| 
 | ||||
|     async def send(self, msg: Any) -> None: | ||||
|         async with self._send_lock: | ||||
| 
 | ||||
|             bytes_data: bytes = self.encode(msg) | ||||
| 
 | ||||
|             # supposedly the fastest says, | ||||
|             # https://stackoverflow.com/a/54027962 | ||||
|             size: bytes = struct.pack("<I", len(bytes_data)) | ||||
| 
 | ||||
|             return await self.stream.send_all(size + bytes_data) | ||||
|             unpacker.feed(data) | ||||
|             for packet in unpacker: | ||||
|                 yield packet | ||||
| 
 | ||||
|     @property | ||||
|     def laddr(self) -> tuple[str, int]: | ||||
|     def laddr(self) -> Tuple[Any, ...]: | ||||
|         return self._laddr | ||||
| 
 | ||||
|     @property | ||||
|     def raddr(self) -> tuple[str, int]: | ||||
|     def raddr(self) -> Tuple[Any, ...]: | ||||
|         return self._raddr | ||||
| 
 | ||||
|     # XXX: should this instead be called `.sendall()`? | ||||
|     async def send(self, data: Any) -> None: | ||||
|         async with self._send_lock: | ||||
|             return await self.stream.send_all( | ||||
|                 msgpack.dumps(data, use_bin_type=True)) | ||||
| 
 | ||||
|     async def recv(self) -> Any: | ||||
|         return await self._agen.asend(None) | ||||
| 
 | ||||
|     async def drain(self) -> AsyncIterator[dict]: | ||||
|         ''' | ||||
|         Drain the stream's remaining messages sent from | ||||
|         the far end until the connection is closed by | ||||
|         the peer. | ||||
| 
 | ||||
|         ''' | ||||
|         try: | ||||
|             async for msg in self._iter_packets(): | ||||
|                 self.drained.append(msg) | ||||
|         except TransportClosed: | ||||
|             for msg in self.drained: | ||||
|                 yield msg | ||||
| 
 | ||||
|     def __aiter__(self): | ||||
|         return self._agen | ||||
| 
 | ||||
|  | @ -242,87 +86,32 @@ class MsgpackTCPStream(MsgTransport): | |||
|         return self.stream.socket.fileno() != -1 | ||||
| 
 | ||||
| 
 | ||||
| def get_msg_transport( | ||||
| 
 | ||||
|     key: tuple[str, str], | ||||
| 
 | ||||
| ) -> Type[MsgTransport]: | ||||
| 
 | ||||
|     return { | ||||
|         ('msgpack', 'tcp'): MsgpackTCPStream, | ||||
|     }[key] | ||||
| 
 | ||||
| 
 | ||||
| class Channel: | ||||
|     ''' | ||||
|     An inter-process channel for communication between (remote) actors. | ||||
|     """An inter-process channel for communication between (remote) actors. | ||||
| 
 | ||||
|     Wraps a ``MsgStream``: transport + encoding IPC connection. | ||||
| 
 | ||||
|     Currently we only support ``trio.SocketStream`` for transport | ||||
|     (aka TCP) and the ``msgpack`` interchange format via the ``msgspec`` | ||||
|     codec libary. | ||||
| 
 | ||||
|     ''' | ||||
|     Currently the only supported transport is a ``trio.SocketStream``. | ||||
|     """ | ||||
|     def __init__( | ||||
| 
 | ||||
|         self, | ||||
|         destaddr: Optional[tuple[str, int]], | ||||
| 
 | ||||
|         msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'), | ||||
| 
 | ||||
|         # TODO: optional reconnection support? | ||||
|         # auto_reconnect: bool = False, | ||||
|         # on_reconnect: typing.Callable[..., typing.Awaitable] = None, | ||||
| 
 | ||||
|         destaddr: Optional[Tuple[str, int]] = None, | ||||
|         on_reconnect: typing.Callable[..., typing.Awaitable] = None, | ||||
|         auto_reconnect: bool = False, | ||||
|         stream: trio.SocketStream = None,  # expected to be active | ||||
|     ) -> None: | ||||
| 
 | ||||
|         # self._recon_seq = on_reconnect | ||||
|         # self._autorecon = auto_reconnect | ||||
| 
 | ||||
|         self._destaddr = destaddr | ||||
|         self._transport_key = msg_transport_type_key | ||||
| 
 | ||||
|         # Either created in ``.connect()`` or passed in by | ||||
|         # user in ``.from_stream()``. | ||||
|         self._stream: Optional[trio.SocketStream] = None | ||||
|         self.msgstream: Optional[MsgTransport] = None | ||||
| 
 | ||||
|         self._recon_seq = on_reconnect | ||||
|         self._autorecon = auto_reconnect | ||||
|         self.msgstream: Optional[MsgpackStream] = MsgpackStream( | ||||
|             stream) if stream else None | ||||
|         if self.msgstream and destaddr: | ||||
|             raise ValueError( | ||||
|                 f"A stream was provided with local addr {self.laddr}" | ||||
|             ) | ||||
|         self._destaddr = self.msgstream.raddr if self.msgstream else destaddr | ||||
|         # set after handshake - always uid of far end | ||||
|         self.uid: Optional[tuple[str, str]] = None | ||||
| 
 | ||||
|         self.uid: Optional[Tuple[str, str]] = None | ||||
|         # set if far end actor errors internally | ||||
|         self._exc: Optional[Exception] = None | ||||
|         self._agen = self._aiter_recv() | ||||
|         self._exc: Optional[Exception] = None  # set if far end actor errors | ||||
|         self._closed: bool = False | ||||
|         # flag set on ``Portal.cancel_actor()`` indicating | ||||
|         # remote (peer) cancellation of the far end actor runtime. | ||||
|         self._cancel_called: bool = False  # set on ``Portal.cancel_actor()`` | ||||
| 
 | ||||
|     @classmethod | ||||
|     def from_stream( | ||||
|         cls, | ||||
|         stream: trio.SocketStream, | ||||
|         **kwargs, | ||||
| 
 | ||||
|     ) -> Channel: | ||||
| 
 | ||||
|         src, dst = get_stream_addrs(stream) | ||||
|         chan = Channel(destaddr=dst, **kwargs) | ||||
| 
 | ||||
|         # set immediately here from provided instance | ||||
|         chan._stream = stream | ||||
|         chan.set_msg_transport(stream) | ||||
|         return chan | ||||
| 
 | ||||
|     def set_msg_transport( | ||||
|         self, | ||||
|         stream: trio.SocketStream, | ||||
|         type_key: Optional[tuple[str, str]] = None, | ||||
| 
 | ||||
|     ) -> MsgTransport: | ||||
|         type_key = type_key or self._transport_key | ||||
|         self.msgstream = get_msg_transport(type_key)(stream) | ||||
|         return self.msgstream | ||||
| 
 | ||||
|     def __repr__(self) -> str: | ||||
|         if self.msgstream: | ||||
|  | @ -332,65 +121,43 @@ class Channel: | |||
|         return object.__repr__(self) | ||||
| 
 | ||||
|     @property | ||||
|     def laddr(self) -> Optional[tuple[str, int]]: | ||||
|     def laddr(self) -> Optional[Tuple[Any, ...]]: | ||||
|         return self.msgstream.laddr if self.msgstream else None | ||||
| 
 | ||||
|     @property | ||||
|     def raddr(self) -> Optional[tuple[str, int]]: | ||||
|     def raddr(self) -> Optional[Tuple[Any, ...]]: | ||||
|         return self.msgstream.raddr if self.msgstream else None | ||||
| 
 | ||||
|     async def connect( | ||||
|         self, | ||||
|         destaddr: tuple[Any, ...] | None = None, | ||||
|         self, destaddr: Tuple[Any, ...] = None, | ||||
|         **kwargs | ||||
| 
 | ||||
|     ) -> MsgTransport: | ||||
| 
 | ||||
|     ) -> trio.SocketStream: | ||||
|         if self.connected(): | ||||
|             raise RuntimeError("channel is already connected?") | ||||
| 
 | ||||
|         destaddr = destaddr or self._destaddr | ||||
|         assert isinstance(destaddr, tuple) | ||||
| 
 | ||||
|         stream = await trio.open_tcp_stream( | ||||
|             *destaddr, | ||||
|             **kwargs | ||||
|         ) | ||||
|         msgstream = self.set_msg_transport(stream) | ||||
| 
 | ||||
|         log.transport( | ||||
|             f'Opened channel[{type(msgstream)}]: {self.laddr} -> {self.raddr}' | ||||
|         ) | ||||
|         return msgstream | ||||
|         stream = await trio.open_tcp_stream(*destaddr, **kwargs) | ||||
|         self.msgstream = MsgpackStream(stream) | ||||
|         return stream | ||||
| 
 | ||||
|     async def send(self, item: Any) -> None: | ||||
| 
 | ||||
|         log.transport(f"send `{item}`")  # type: ignore | ||||
|         log.trace(f"send `{item}`")  # type: ignore | ||||
|         assert self.msgstream | ||||
| 
 | ||||
|         await self.msgstream.send(item) | ||||
| 
 | ||||
|     async def recv(self) -> Any: | ||||
|         assert self.msgstream | ||||
|         try: | ||||
|             return await self.msgstream.recv() | ||||
| 
 | ||||
|         # try: | ||||
|         #     return await self.msgstream.recv() | ||||
|         # except trio.BrokenResourceError: | ||||
|         #     if self._autorecon: | ||||
|         #         await self._reconnect() | ||||
|         #         return await self.recv() | ||||
|         #     raise | ||||
|         except trio.BrokenResourceError: | ||||
|             if self._autorecon: | ||||
|                 await self._reconnect() | ||||
|                 return await self.recv() | ||||
| 
 | ||||
|     async def aclose(self) -> None: | ||||
| 
 | ||||
|         log.transport( | ||||
|             f'Closing channel to {self.uid} ' | ||||
|             f'{self.laddr} -> {self.raddr}' | ||||
|         ) | ||||
|         log.debug(f"Closing {self}") | ||||
|         assert self.msgstream | ||||
|         await self.msgstream.stream.aclose() | ||||
|         self._closed = True | ||||
| 
 | ||||
|     async def __aenter__(self): | ||||
|         await self.connect() | ||||
|  | @ -402,44 +169,40 @@ class Channel: | |||
|     def __aiter__(self): | ||||
|         return self._agen | ||||
| 
 | ||||
|     # async def _reconnect(self) -> None: | ||||
|     #     """Handle connection failures by polling until a reconnect can be | ||||
|     #     established. | ||||
|     #     """ | ||||
|     #     down = False | ||||
|     #     while True: | ||||
|     #         try: | ||||
|     #             with trio.move_on_after(3) as cancel_scope: | ||||
|     #                 await self.connect() | ||||
|     #             cancelled = cancel_scope.cancelled_caught | ||||
|     #             if cancelled: | ||||
|     #                 log.transport( | ||||
|     #                     "Reconnect timed out after 3 seconds, retrying...") | ||||
|     #                 continue | ||||
|     #             else: | ||||
|     #                 log.transport("Stream connection re-established!") | ||||
| 
 | ||||
|     #                 # TODO: run any reconnection sequence | ||||
|     #                 # on_recon = self._recon_seq | ||||
|     #                 # if on_recon: | ||||
|     #                 #     await on_recon(self) | ||||
| 
 | ||||
|     #                 break | ||||
|     #         except (OSError, ConnectionRefusedError): | ||||
|     #             if not down: | ||||
|     #                 down = True | ||||
|     #                 log.transport( | ||||
|     #                     f"Connection to {self.raddr} went down, waiting" | ||||
|     #                     " for re-establishment") | ||||
|     #             await trio.sleep(1) | ||||
|     async def _reconnect(self) -> None: | ||||
|         """Handle connection failures by polling until a reconnect can be | ||||
|         established. | ||||
|         """ | ||||
|         down = False | ||||
|         while True: | ||||
|             try: | ||||
|                 with trio.move_on_after(3) as cancel_scope: | ||||
|                     await self.connect() | ||||
|                 cancelled = cancel_scope.cancelled_caught | ||||
|                 if cancelled: | ||||
|                     log.warning( | ||||
|                         "Reconnect timed out after 3 seconds, retrying...") | ||||
|                     continue | ||||
|                 else: | ||||
|                     log.warning("Stream connection re-established!") | ||||
|                     # run any reconnection sequence | ||||
|                     on_recon = self._recon_seq | ||||
|                     if on_recon: | ||||
|                         await on_recon(self) | ||||
|                     break | ||||
|             except (OSError, ConnectionRefusedError): | ||||
|                 if not down: | ||||
|                     down = True | ||||
|                     log.warning( | ||||
|                         f"Connection to {self.raddr} went down, waiting" | ||||
|                         " for re-establishment") | ||||
|                 await trio.sleep(1) | ||||
| 
 | ||||
|     async def _aiter_recv( | ||||
|         self | ||||
|     ) -> AsyncGenerator[Any, None]: | ||||
|         ''' | ||||
|         Async iterate items from underlying stream. | ||||
| 
 | ||||
|         ''' | ||||
|     ) -> typing.AsyncGenerator[Any, None]: | ||||
|         """Async iterate items from underlying stream. | ||||
|         """ | ||||
|         assert self.msgstream | ||||
|         while True: | ||||
|             try: | ||||
|  | @ -452,14 +215,16 @@ class Channel: | |||
|                     #     await self.msgstream.send(sent) | ||||
|             except trio.BrokenResourceError: | ||||
| 
 | ||||
|                 # if not self._autorecon: | ||||
|                 if not self._autorecon: | ||||
|                     raise | ||||
| 
 | ||||
|             await self.aclose() | ||||
| 
 | ||||
|             # if self._autorecon:  # attempt reconnect | ||||
|             #     await self._reconnect() | ||||
|             #     continue | ||||
|             if self._autorecon:  # attempt reconnect | ||||
|                 await self._reconnect() | ||||
|                 continue | ||||
|             else: | ||||
|                 return | ||||
| 
 | ||||
|     def connected(self) -> bool: | ||||
|         return self.msgstream.connected() if self.msgstream else False | ||||
|  | @ -469,11 +234,9 @@ class Channel: | |||
| async def _connect_chan( | ||||
|     host: str, port: int | ||||
| ) -> typing.AsyncGenerator[Channel, None]: | ||||
|     ''' | ||||
|     Create and connect a channel with disconnect on context manager | ||||
|     """Create and connect a channel with disconnect on context manager | ||||
|     teardown. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     chan = Channel((host, port)) | ||||
|     await chan.connect() | ||||
|     yield chan | ||||
|  |  | |||
|  | @ -1,39 +1,23 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Helpers pulled mostly verbatim from ``multiprocessing.spawn`` | ||||
| to aid with "fixing up" the ``__main__`` module in subprocesses. | ||||
| 
 | ||||
| These helpers are needed for any spawing backend that doesn't already | ||||
| handle this. For example when using ``trio_run_in_process`` it is needed | ||||
| but obviously not when we're already using ``multiprocessing``. | ||||
| 
 | ||||
| These helpers are needed for any spawing backend that doesn't already handle this. | ||||
| For example when using ``trio_run_in_process`` it is needed but obviously not when | ||||
| we're already using ``multiprocessing``. | ||||
| """ | ||||
| import os | ||||
| import sys | ||||
| import platform | ||||
| import types | ||||
| import runpy | ||||
| from typing import Dict | ||||
| 
 | ||||
| 
 | ||||
| ORIGINAL_DIR = os.path.abspath(os.getcwd()) | ||||
| 
 | ||||
| 
 | ||||
| def _mp_figure_out_main() -> dict[str, str]: | ||||
| def _mp_figure_out_main() -> Dict[str, str]: | ||||
|     """Taken from ``multiprocessing.spawn.get_preparation_data()``. | ||||
| 
 | ||||
|     Retrieve parent actor `__main__` module data. | ||||
|  |  | |||
|  | @ -1,145 +1,306 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| ''' | ||||
| Memory boundary "Portals": an API for structured | ||||
| concurrency linked tasks running in disparate memory domains. | ||||
| 
 | ||||
| ''' | ||||
| from __future__ import annotations | ||||
| """ | ||||
| Portal api | ||||
| """ | ||||
| import importlib | ||||
| import inspect | ||||
| from typing import ( | ||||
|     Any, Optional, | ||||
|     Callable, AsyncGenerator, | ||||
|     Type, | ||||
| ) | ||||
| import typing | ||||
| from typing import Tuple, Any, Dict, Optional, Set, Iterator | ||||
| from functools import partial | ||||
| from dataclasses import dataclass | ||||
| from pprint import pformat | ||||
| from contextlib import contextmanager | ||||
| import warnings | ||||
| 
 | ||||
| import trio | ||||
| from async_generator import asynccontextmanager | ||||
| 
 | ||||
| from .trionics import maybe_open_nursery | ||||
| from ._state import current_actor | ||||
| from ._ipc import Channel | ||||
| from .log import get_logger | ||||
| from .msg import NamespacePath | ||||
| from ._exceptions import ( | ||||
|     unpack_error, | ||||
|     NoResult, | ||||
|     ContextCancelled, | ||||
| ) | ||||
| from ._streaming import ( | ||||
|     Context, | ||||
|     MsgStream, | ||||
| ) | ||||
| from ._exceptions import unpack_error, NoResult, RemoteActorError | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| log = get_logger('tractor') | ||||
| 
 | ||||
| 
 | ||||
| def _unwrap_msg( | ||||
|     msg: dict[str, Any], | ||||
|     channel: Channel | ||||
| @asynccontextmanager | ||||
| async def maybe_open_nursery( | ||||
|     nursery: trio.Nursery = None, | ||||
|     shield: bool = False, | ||||
| ) -> typing.AsyncGenerator[trio.Nursery, Any]: | ||||
|     """Create a new nursery if None provided. | ||||
| 
 | ||||
| ) -> Any: | ||||
|     __tracebackhide__ = True | ||||
|     Blocks on exit as expected if no input nursery is provided. | ||||
|     """ | ||||
|     if nursery is not None: | ||||
|         yield nursery | ||||
|     else: | ||||
|         async with trio.open_nursery() as nursery: | ||||
|             nursery.cancel_scope.shield = shield | ||||
|             yield nursery | ||||
| 
 | ||||
| 
 | ||||
| class ReceiveStream(trio.abc.ReceiveChannel): | ||||
|     """A wrapper around a ``trio._channel.MemoryReceiveChannel`` with | ||||
|     special behaviour for signalling stream termination across an | ||||
|     inter-actor ``Channel``. This is the type returned to a local task | ||||
|     which invoked a remote streaming function using `Portal.run()`. | ||||
| 
 | ||||
|     Termination rules: | ||||
|     - if the local task signals stop iteration a cancel signal is | ||||
|       relayed to the remote task indicating to stop streaming | ||||
|     - if the remote task signals the end of a stream, raise a | ||||
|       ``StopAsyncIteration`` to terminate the local ``async for`` | ||||
| 
 | ||||
|     """ | ||||
|     def __init__( | ||||
|         self, | ||||
|         cid: str, | ||||
|         rx_chan: trio.abc.ReceiveChannel, | ||||
|         portal: 'Portal', | ||||
|     ) -> None: | ||||
|         self._cid = cid | ||||
|         self._rx_chan = rx_chan | ||||
|         self._portal = portal | ||||
|         self._shielded = False | ||||
| 
 | ||||
|     # delegate directly to underlying mem channel | ||||
|     def receive_nowait(self): | ||||
|         return self._rx_chan.receive_nowait() | ||||
| 
 | ||||
|     async def receive(self): | ||||
|         try: | ||||
|             msg = await self._rx_chan.receive() | ||||
|             return msg['yield'] | ||||
| 
 | ||||
|         except trio.ClosedResourceError: | ||||
|             # when the send is closed we assume the stream has | ||||
|             # terminated and signal this local iterator to stop | ||||
|             await self.aclose() | ||||
|             raise StopAsyncIteration | ||||
| 
 | ||||
|         except trio.Cancelled: | ||||
|             # relay cancels to the remote task | ||||
|             await self.aclose() | ||||
|             raise | ||||
| 
 | ||||
|         except KeyError: | ||||
|             # internal error should never get here | ||||
|             assert msg.get('cid'), ( | ||||
|                 "Received internal error at portal?") | ||||
|             raise unpack_error(msg, self._portal.channel) | ||||
| 
 | ||||
|     @contextmanager | ||||
|     def shield( | ||||
|         self | ||||
|     ) -> Iterator['ReceiveStream']:  # noqa | ||||
|         """Shield this stream's underlying channel such that a local consumer task | ||||
|         can be cancelled (and possibly restarted) using ``trio.Cancelled``. | ||||
| 
 | ||||
|         """ | ||||
|         self._shielded = True | ||||
|         yield self | ||||
|         self._shielded = False | ||||
| 
 | ||||
|     async def aclose(self): | ||||
|         """Cancel associated remote actor task and local memory channel | ||||
|         on close. | ||||
|         """ | ||||
|         rx_chan = self._rx_chan | ||||
|         stats = rx_chan.statistics() | ||||
| 
 | ||||
|         if rx_chan._closed: | ||||
|             log.warning(f"{self} is already closed") | ||||
|             return | ||||
| 
 | ||||
|         if stats.open_receive_channels > 1: | ||||
|             # if we've been cloned don't kill the stream | ||||
|             log.debug("there are still consumers running keeping stream alive") | ||||
|             return | ||||
| 
 | ||||
|         if self._shielded: | ||||
|             log.warning(f"{self} is shielded, portal channel being kept alive") | ||||
|             return | ||||
| 
 | ||||
|         # close the local mem chan | ||||
|         rx_chan.close() | ||||
| 
 | ||||
|         cid = self._cid | ||||
|         with trio.move_on_after(0.5) as cs: | ||||
|             cs.shield = True | ||||
|             log.warning( | ||||
|                 f"Cancelling stream {cid} to " | ||||
|                 f"{self._portal.channel.uid}") | ||||
| 
 | ||||
|             # NOTE: we're telling the far end actor to cancel a task | ||||
|             # corresponding to *this actor*. The far end local channel | ||||
|             # instance is passed to `Actor._cancel_task()` implicitly. | ||||
|             await self._portal.run_from_ns('self', '_cancel_task', cid=cid) | ||||
| 
 | ||||
|         if cs.cancelled_caught: | ||||
|             # XXX: there's no way to know if the remote task was indeed | ||||
|             # cancelled in the case where the connection is broken or | ||||
|             # some other network error occurred. | ||||
|             if not self._portal.channel.connected(): | ||||
|                 log.warning( | ||||
|                     "May have failed to cancel remote task " | ||||
|                     f"{cid} for {self._portal.channel.uid}") | ||||
| 
 | ||||
|     def clone(self): | ||||
|         """Clone this receive channel allowing for multi-task | ||||
|         consumption from the same channel. | ||||
| 
 | ||||
|         """ | ||||
|         return ReceiveStream( | ||||
|             self._cid, | ||||
|             self._rx_chan.clone(), | ||||
|             self._portal, | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| class Portal: | ||||
|     """A 'portal' to a(n) (remote) ``Actor``. | ||||
| 
 | ||||
|     Allows for invoking remote routines and receiving results through an | ||||
|     underlying ``tractor.Channel`` as though the remote (async) | ||||
|     function / generator was invoked locally. | ||||
| 
 | ||||
|     Think of this like a native async IPC API. | ||||
|     """ | ||||
|     def __init__(self, channel: Channel) -> None: | ||||
|         self.channel = channel | ||||
|         # when this is set to a tuple returned from ``_submit()`` then | ||||
|         # it is expected that ``result()`` will be awaited at some point | ||||
|         # during the portal's lifetime | ||||
|         self._result: Optional[Any] = None | ||||
|         # set when _submit_for_result is called | ||||
|         self._expect_result: Optional[ | ||||
|             Tuple[str, Any, str, Dict[str, Any]] | ||||
|         ] = None | ||||
|         self._streams: Set[ReceiveStream] = set() | ||||
|         self.actor = current_actor() | ||||
| 
 | ||||
|     async def _submit( | ||||
|         self, | ||||
|         ns: str, | ||||
|         func: str, | ||||
|         kwargs, | ||||
|     ) -> Tuple[str, trio.abc.ReceiveChannel, str, Dict[str, Any]]: | ||||
|         """Submit a function to be scheduled and run by actor, return the | ||||
|         associated caller id, response queue, response type str, | ||||
|         first message packet as a tuple. | ||||
| 
 | ||||
|         This is an async call. | ||||
|         """ | ||||
|         # ship a function call request to the remote actor | ||||
|         cid, recv_chan = await self.actor.send_cmd( | ||||
|             self.channel, ns, func, kwargs) | ||||
| 
 | ||||
|         # wait on first response msg and handle (this should be | ||||
|         # in an immediate response) | ||||
| 
 | ||||
|         first_msg = await recv_chan.receive() | ||||
|         functype = first_msg.get('functype') | ||||
| 
 | ||||
|         if functype == 'function' or functype == 'asyncfunction': | ||||
|             resp_type = 'return' | ||||
|         elif functype == 'asyncgen': | ||||
|             resp_type = 'yield' | ||||
|         elif 'error' in first_msg: | ||||
|             raise unpack_error(first_msg, self.channel) | ||||
|         else: | ||||
|             raise ValueError(f"{first_msg} is an invalid response packet?") | ||||
| 
 | ||||
|         return cid, recv_chan, resp_type, first_msg | ||||
| 
 | ||||
|     async def _submit_for_result(self, ns: str, func: str, **kwargs) -> None: | ||||
|         assert self._expect_result is None, \ | ||||
|                 "A pending main result has already been submitted" | ||||
|         self._expect_result = await self._submit(ns, func, kwargs) | ||||
| 
 | ||||
|     async def run( | ||||
|         self, | ||||
|         func_or_ns: str, | ||||
|         fn_name: Optional[str] = None, | ||||
|         **kwargs | ||||
|     ) -> Any: | ||||
|         """Submit a remote function to be scheduled and run by actor, in | ||||
|         a new task, wrap and return its (stream of) result(s). | ||||
| 
 | ||||
|         This is a blocking call and returns either a value from the | ||||
|         remote rpc task or a local async generator instance. | ||||
|         """ | ||||
|         if isinstance(func_or_ns, str): | ||||
|             warnings.warn( | ||||
|                 "`Portal.run(namespace: str, funcname: str)` is now" | ||||
|                 "deprecated, pass a function reference directly instead\n" | ||||
|                 "If you still want to run a remote function by name use" | ||||
|                 "`Portal.run_from_ns()`", | ||||
|                 DeprecationWarning, | ||||
|                 stacklevel=2, | ||||
|             ) | ||||
|             fn_mod_path = func_or_ns | ||||
|             assert isinstance(fn_name, str) | ||||
| 
 | ||||
|         else:  # function reference was passed directly | ||||
|             fn = func_or_ns | ||||
|             fn_mod_path = fn.__module__ | ||||
|             fn_name = fn.__name__ | ||||
| 
 | ||||
|         return await self._return_from_resptype( | ||||
|             *(await self._submit(fn_mod_path, fn_name, kwargs)) | ||||
|         ) | ||||
| 
 | ||||
|     async def run_from_ns( | ||||
|         self, | ||||
|         namespace_path: str, | ||||
|         function_name: str, | ||||
|         **kwargs, | ||||
|     ) -> Any: | ||||
|         """Run a function from a (remote) namespace in a new task on the far-end actor. | ||||
| 
 | ||||
|         This is a more explitcit way to run tasks in a remote-process | ||||
|         actor using explicit object-path syntax. Hint: this is how | ||||
|         `.run()` works underneath. | ||||
| 
 | ||||
|         Note:: | ||||
| 
 | ||||
|             A special namespace `self` can be used to invoke `Actor` | ||||
|             instance methods in the remote runtime. Currently this should only | ||||
|             be used for `tractor` internals. | ||||
|         """ | ||||
|         return await self._return_from_resptype( | ||||
|             *(await self._submit(namespace_path, function_name, kwargs)) | ||||
|         ) | ||||
| 
 | ||||
|     async def _return_from_resptype( | ||||
|         self, | ||||
|         cid: str, | ||||
|         recv_chan: trio.abc.ReceiveChannel, | ||||
|         resptype: str, | ||||
|         first_msg: dict | ||||
|     ) -> Any: | ||||
|         # TODO: not this needs some serious work and thinking about how | ||||
|         # to make async-generators the fundamental IPC API over channels! | ||||
|         # (think `yield from`, `gen.send()`, and functional reactive stuff) | ||||
|         if resptype == 'yield':  # stream response | ||||
|             rchan = ReceiveStream(cid, recv_chan, self) | ||||
|             self._streams.add(rchan) | ||||
|             return rchan | ||||
| 
 | ||||
|         elif resptype == 'return':  # single response | ||||
|             msg = await recv_chan.receive() | ||||
|             try: | ||||
|                 return msg['return'] | ||||
|             except KeyError: | ||||
|                 # internal error should never get here | ||||
|                 assert msg.get('cid'), "Received internal error at portal?" | ||||
|         raise unpack_error(msg, channel) from None | ||||
| 
 | ||||
| 
 | ||||
| class MessagingError(Exception): | ||||
|     'Some kind of unexpected SC messaging dialog issue' | ||||
| 
 | ||||
| 
 | ||||
| class Portal: | ||||
|     ''' | ||||
|     A 'portal' to a(n) (remote) ``Actor``. | ||||
| 
 | ||||
|     A portal is "opened" (and eventually closed) by one side of an | ||||
|     inter-actor communication context. The side which opens the portal | ||||
|     is equivalent to a "caller" in function parlance and usually is | ||||
|     either the called actor's parent (in process tree hierarchy terms) | ||||
|     or a client interested in scheduling work to be done remotely in a | ||||
|     far process. | ||||
| 
 | ||||
|     The portal api allows the "caller" actor to invoke remote routines | ||||
|     and receive results through an underlying ``tractor.Channel`` as | ||||
|     though the remote (async) function / generator was called locally. | ||||
|     It may be thought of loosely as an RPC api where native Python | ||||
|     function calling semantics are supported transparently; hence it is | ||||
|     like having a "portal" between the seperate actor memory spaces. | ||||
| 
 | ||||
|     ''' | ||||
|     # the timeout for a remote cancel request sent to | ||||
|     # a(n) (peer) actor. | ||||
|     cancel_timeout = 0.5 | ||||
| 
 | ||||
|     def __init__(self, channel: Channel) -> None: | ||||
|         self.channel = channel | ||||
|         # during the portal's lifetime | ||||
|         self._result_msg: Optional[dict] = None | ||||
| 
 | ||||
|         # When set to a ``Context`` (when _submit_for_result is called) | ||||
|         # it is expected that ``result()`` will be awaited at some | ||||
|         # point. | ||||
|         self._expect_result: Optional[Context] = None | ||||
|         self._streams: set[MsgStream] = set() | ||||
|         self.actor = current_actor() | ||||
| 
 | ||||
|     async def _submit_for_result( | ||||
|         self, | ||||
|         ns: str, | ||||
|         func: str, | ||||
|         **kwargs | ||||
|     ) -> None: | ||||
| 
 | ||||
|         assert self._expect_result is None, \ | ||||
|                 "A pending main result has already been submitted" | ||||
| 
 | ||||
|         self._expect_result = await self.actor.start_remote_task( | ||||
|             self.channel, | ||||
|             ns, | ||||
|             func, | ||||
|             kwargs | ||||
|         ) | ||||
| 
 | ||||
|     async def _return_once( | ||||
|         self, | ||||
|         ctx: Context, | ||||
| 
 | ||||
|     ) -> dict[str, Any]: | ||||
| 
 | ||||
|         assert ctx._remote_func_type == 'asyncfunc'  # single response | ||||
|         msg = await ctx._recv_chan.receive() | ||||
|         return msg | ||||
|                 raise unpack_error(msg, self.channel) | ||||
|         else: | ||||
|             raise ValueError(f"Unknown msg response type: {first_msg}") | ||||
| 
 | ||||
|     async def result(self) -> Any: | ||||
|         ''' | ||||
|         Return the result(s) from the remote actor's "main" task. | ||||
| 
 | ||||
|         ''' | ||||
|         # __tracebackhide__ = True | ||||
|         """Return the result(s) from the remote actor's "main" task. | ||||
|         """ | ||||
|         # Check for non-rpc errors slapped on the | ||||
|         # channel for which we always raise | ||||
|         exc = self.channel._exc | ||||
|  | @ -156,19 +317,25 @@ class Portal: | |||
| 
 | ||||
|         # expecting a "main" result | ||||
|         assert self._expect_result | ||||
| 
 | ||||
|         if self._result_msg is None: | ||||
|             self._result_msg = await self._return_once( | ||||
|                 self._expect_result | ||||
|         if self._result is None: | ||||
|             try: | ||||
|                 self._result = await self._return_from_resptype( | ||||
|                     *self._expect_result | ||||
|                 ) | ||||
|             except RemoteActorError as err: | ||||
|                 self._result = err | ||||
| 
 | ||||
|         return _unwrap_msg(self._result_msg, self.channel) | ||||
|         # re-raise error on every call | ||||
|         if isinstance(self._result, RemoteActorError): | ||||
|             raise self._result | ||||
| 
 | ||||
|         return self._result | ||||
| 
 | ||||
|     async def _cancel_streams(self): | ||||
|         # terminate all locally running async generator | ||||
|         # IPC calls | ||||
|         if self._streams: | ||||
|             log.cancel( | ||||
|             log.warning( | ||||
|                 f"Cancelling all streams with {self.channel.uid}") | ||||
|             for stream in self._streams.copy(): | ||||
|                 try: | ||||
|  | @ -187,407 +354,80 @@ class Portal: | |||
|         # we'll need to .aclose all those channels here | ||||
|         await self._cancel_streams() | ||||
| 
 | ||||
|     async def cancel_actor( | ||||
|         self, | ||||
|         timeout: float | None = None, | ||||
| 
 | ||||
|     ) -> bool: | ||||
|         ''' | ||||
|         Cancel the actor on the other end of this portal. | ||||
| 
 | ||||
|         ''' | ||||
|     async def cancel_actor(self): | ||||
|         """Cancel the actor on the other end of this portal. | ||||
|         """ | ||||
|         if not self.channel.connected(): | ||||
|             log.cancel("This channel is already closed can't cancel") | ||||
|             log.warning("This portal is already closed can't cancel") | ||||
|             return False | ||||
| 
 | ||||
|         log.cancel( | ||||
|         await self._cancel_streams() | ||||
| 
 | ||||
|         log.warning( | ||||
|             f"Sending actor cancel request to {self.channel.uid} on " | ||||
|             f"{self.channel}") | ||||
| 
 | ||||
|         self.channel._cancel_called = True | ||||
| 
 | ||||
|         try: | ||||
|             # send cancel cmd - might not get response | ||||
|             # XXX: sure would be nice to make this work with a proper shield | ||||
|             with trio.move_on_after(timeout or self.cancel_timeout) as cs: | ||||
|                 cs.shield = True | ||||
|             # with trio.CancelScope() as cancel_scope: | ||||
|             # with trio.CancelScope(shield=True) as cancel_scope: | ||||
|             with trio.move_on_after(0.5) as cancel_scope: | ||||
|                 cancel_scope.shield = True | ||||
| 
 | ||||
|                 await self.run_from_ns('self', 'cancel') | ||||
|                 return True | ||||
| 
 | ||||
|             if cs.cancelled_caught: | ||||
|                 log.cancel(f"May have failed to cancel {self.channel.uid}") | ||||
|             if cancel_scope.cancelled_caught: | ||||
|                 log.warning(f"May have failed to cancel {self.channel.uid}") | ||||
| 
 | ||||
|             # if we get here some weird cancellation case happened | ||||
|             return False | ||||
| 
 | ||||
|         except ( | ||||
|             trio.ClosedResourceError, | ||||
|             trio.BrokenResourceError, | ||||
|         ): | ||||
|             log.cancel( | ||||
|                 f"{self.channel} for {self.channel.uid} was already " | ||||
|                 "closed or broken?") | ||||
|             return False | ||||
| 
 | ||||
|     async def run_from_ns( | ||||
|         self, | ||||
|         namespace_path: str, | ||||
|         function_name: str, | ||||
|         **kwargs, | ||||
|     ) -> Any: | ||||
|         ''' | ||||
|         Run a function from a (remote) namespace in a new task on the | ||||
|         far-end actor. | ||||
| 
 | ||||
|         This is a more explitcit way to run tasks in a remote-process | ||||
|         actor using explicit object-path syntax. Hint: this is how | ||||
|         `.run()` works underneath. | ||||
| 
 | ||||
|         Note:: | ||||
| 
 | ||||
|             A special namespace `self` can be used to invoke `Actor` | ||||
|             instance methods in the remote runtime. Currently this | ||||
|             should only be used solely for ``tractor`` runtime | ||||
|             internals. | ||||
| 
 | ||||
|         ''' | ||||
|         ctx = await self.actor.start_remote_task( | ||||
|             self.channel, | ||||
|             namespace_path, | ||||
|             function_name, | ||||
|             kwargs, | ||||
|         ) | ||||
|         ctx._portal = self | ||||
|         msg = await self._return_once(ctx) | ||||
|         return _unwrap_msg(msg, self.channel) | ||||
| 
 | ||||
|     async def run( | ||||
|         self, | ||||
|         func: str, | ||||
|         fn_name: Optional[str] = None, | ||||
|         **kwargs | ||||
|     ) -> Any: | ||||
|         ''' | ||||
|         Submit a remote function to be scheduled and run by actor, in | ||||
|         a new task, wrap and return its (stream of) result(s). | ||||
| 
 | ||||
|         This is a blocking call and returns either a value from the | ||||
|         remote rpc task or a local async generator instance. | ||||
| 
 | ||||
|         ''' | ||||
|         if isinstance(func, str): | ||||
|             warnings.warn( | ||||
|                 "`Portal.run(namespace: str, funcname: str)` is now" | ||||
|                 "deprecated, pass a function reference directly instead\n" | ||||
|                 "If you still want to run a remote function by name use" | ||||
|                 "`Portal.run_from_ns()`", | ||||
|                 DeprecationWarning, | ||||
|                 stacklevel=2, | ||||
|             ) | ||||
|             fn_mod_path = func | ||||
|             assert isinstance(fn_name, str) | ||||
| 
 | ||||
|         else:  # function reference was passed directly | ||||
|             if ( | ||||
|                 not inspect.iscoroutinefunction(func) or | ||||
|                 ( | ||||
|                     inspect.iscoroutinefunction(func) and | ||||
|                     getattr(func, '_tractor_stream_function', False) | ||||
|                 ) | ||||
|             ): | ||||
|                 raise TypeError( | ||||
|                     f'{func} must be a non-streaming async function!') | ||||
| 
 | ||||
|             fn_mod_path, fn_name = NamespacePath.from_ref(func).to_tuple() | ||||
| 
 | ||||
|         ctx = await self.actor.start_remote_task( | ||||
|             self.channel, | ||||
|             fn_mod_path, | ||||
|             fn_name, | ||||
|             kwargs, | ||||
|         ) | ||||
|         ctx._portal = self | ||||
|         return _unwrap_msg( | ||||
|             await self._return_once(ctx), | ||||
|             self.channel, | ||||
|         ) | ||||
| 
 | ||||
|     @asynccontextmanager | ||||
|     async def open_stream_from( | ||||
|         self, | ||||
|         async_gen_func: Callable,  # typing: ignore | ||||
|         **kwargs, | ||||
| 
 | ||||
|     ) -> AsyncGenerator[MsgStream, None]: | ||||
| 
 | ||||
|         if not inspect.isasyncgenfunction(async_gen_func): | ||||
|             if not ( | ||||
|                 inspect.iscoroutinefunction(async_gen_func) and | ||||
|                 getattr(async_gen_func, '_tractor_stream_function', False) | ||||
|             ): | ||||
|                 raise TypeError( | ||||
|                     f'{async_gen_func} must be an async generator function!') | ||||
| 
 | ||||
|         fn_mod_path, fn_name = NamespacePath.from_ref( | ||||
|             async_gen_func).to_tuple() | ||||
|         ctx = await self.actor.start_remote_task( | ||||
|             self.channel, | ||||
|             fn_mod_path, | ||||
|             fn_name, | ||||
|             kwargs | ||||
|         ) | ||||
|         ctx._portal = self | ||||
| 
 | ||||
|         # ensure receive-only stream entrypoint | ||||
|         assert ctx._remote_func_type == 'asyncgen' | ||||
| 
 | ||||
|         try: | ||||
|             # deliver receive only stream | ||||
|             async with MsgStream( | ||||
|                 ctx, ctx._recv_chan, | ||||
|             ) as rchan: | ||||
|                 self._streams.add(rchan) | ||||
|                 yield rchan | ||||
| 
 | ||||
|         finally: | ||||
| 
 | ||||
|             # cancel the far end task on consumer close | ||||
|             # NOTE: this is a special case since we assume that if using | ||||
|             # this ``.open_fream_from()`` api, the stream is one a one | ||||
|             # time use and we couple the far end tasks's lifetime to | ||||
|             # the consumer's scope; we don't ever send a `'stop'` | ||||
|             # message right now since there shouldn't be a reason to | ||||
|             # stop and restart the stream, right? | ||||
|             try: | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     await ctx.cancel() | ||||
| 
 | ||||
|         except trio.ClosedResourceError: | ||||
|                 # if the far end terminates before we send a cancel the | ||||
|                 # underlying transport-channel may already be closed. | ||||
|                 log.cancel(f'Context {ctx} was already closed?') | ||||
| 
 | ||||
|             # XXX: should this always be done? | ||||
|             # await recv_chan.aclose() | ||||
|             self._streams.remove(rchan) | ||||
| 
 | ||||
|     @asynccontextmanager | ||||
|     async def open_context( | ||||
| 
 | ||||
|         self, | ||||
|         func: Callable, | ||||
|         **kwargs, | ||||
| 
 | ||||
|     ) -> AsyncGenerator[tuple[Context, Any], None]: | ||||
|         ''' | ||||
|         Open an inter-actor task context. | ||||
| 
 | ||||
|         This is a synchronous API which allows for deterministic | ||||
|         setup/teardown of a remote task. The yielded ``Context`` further | ||||
|         allows for opening bidirectional streams, explicit cancellation | ||||
|         and synchronized final result collection. See ``tractor.Context``. | ||||
| 
 | ||||
|         ''' | ||||
|         # conduct target func method structural checks | ||||
|         if not inspect.iscoroutinefunction(func) and ( | ||||
|             getattr(func, '_tractor_contex_function', False) | ||||
|         ): | ||||
|             raise TypeError( | ||||
|                 f'{func} must be an async generator function!') | ||||
| 
 | ||||
|         fn_mod_path, fn_name = NamespacePath.from_ref(func).to_tuple() | ||||
| 
 | ||||
|         ctx = await self.actor.start_remote_task( | ||||
|             self.channel, | ||||
|             fn_mod_path, | ||||
|             fn_name, | ||||
|             kwargs | ||||
|         ) | ||||
| 
 | ||||
|         assert ctx._remote_func_type == 'context' | ||||
|         msg = await ctx._recv_chan.receive() | ||||
| 
 | ||||
|         try: | ||||
|             # the "first" value here is delivered by the callee's | ||||
|             # ``Context.started()`` call. | ||||
|             first = msg['started'] | ||||
|             ctx._started_called = True | ||||
| 
 | ||||
|         except KeyError: | ||||
|             assert msg.get('cid'), ("Received internal error at context?") | ||||
| 
 | ||||
|             if msg.get('error'): | ||||
|                 # raise kerr from unpack_error(msg, self.channel) | ||||
|                 raise unpack_error(msg, self.channel) from None | ||||
|             else: | ||||
|                 raise MessagingError( | ||||
|                     f'Context for {ctx.cid} was expecting a `started` message' | ||||
|                     f' but received a non-error msg:\n{pformat(msg)}' | ||||
|                 ) | ||||
| 
 | ||||
|         _err: Optional[BaseException] = None | ||||
|         ctx._portal = self | ||||
| 
 | ||||
|         uid = self.channel.uid | ||||
|         cid = ctx.cid | ||||
|         etype: Optional[Type[BaseException]] = None | ||||
| 
 | ||||
|         # deliver context instance and .started() msg value in open tuple. | ||||
|         try: | ||||
|             async with trio.open_nursery() as scope_nursery: | ||||
|                 ctx._scope_nursery = scope_nursery | ||||
| 
 | ||||
|                 # do we need this? | ||||
|                 # await trio.lowlevel.checkpoint() | ||||
| 
 | ||||
|                 yield ctx, first | ||||
| 
 | ||||
|         except ContextCancelled as err: | ||||
|             _err = err | ||||
|             if not ctx._cancel_called: | ||||
|                 # context was cancelled at the far end but was | ||||
|                 # not part of this end requesting that cancel | ||||
|                 # so raise for the local task to respond and handle. | ||||
|                 raise | ||||
| 
 | ||||
|             # if the context was cancelled by client code | ||||
|             # then we don't need to raise since user code | ||||
|             # is expecting this and the block should exit. | ||||
|             else: | ||||
|                 log.debug(f'Context {ctx} cancelled gracefully') | ||||
| 
 | ||||
|         except ( | ||||
|             BaseException, | ||||
| 
 | ||||
|             # more specifically, we need to handle these but not | ||||
|             # sure it's worth being pedantic: | ||||
|             # Exception, | ||||
|             # trio.Cancelled, | ||||
|             # KeyboardInterrupt, | ||||
| 
 | ||||
|         ) as err: | ||||
|             etype = type(err) | ||||
|             # the context cancels itself on any cancel | ||||
|             # causing error. | ||||
| 
 | ||||
|             if ctx.chan.connected(): | ||||
|                 log.cancel( | ||||
|                     'Context cancelled for task, sending cancel request..\n' | ||||
|                     f'task:{cid}\n' | ||||
|                     f'actor:{uid}' | ||||
|                 ) | ||||
|                 await ctx.cancel() | ||||
|             else: | ||||
|             log.warning( | ||||
|                     'IPC connection for context is broken?\n' | ||||
|                     f'task:{cid}\n' | ||||
|                     f'actor:{uid}' | ||||
|                 ) | ||||
| 
 | ||||
|             raise | ||||
| 
 | ||||
|         finally: | ||||
|             # in the case where a runtime nursery (due to internal bug) | ||||
|             # or a remote actor transmits an error we want to be | ||||
|             # sure we get the error the underlying feeder mem chan. | ||||
|             # if it's not raised here it *should* be raised from the | ||||
|             # msg loop nursery right? | ||||
|             if ctx.chan.connected(): | ||||
|                 log.info( | ||||
|                     'Waiting on final context-task result for\n' | ||||
|                     f'task: {cid}\n' | ||||
|                     f'actor: {uid}' | ||||
|                 ) | ||||
|                 result = await ctx.result() | ||||
|                 log.runtime( | ||||
|                     f'Context {fn_name} returned ' | ||||
|                     f'value from callee `{result}`' | ||||
|                 ) | ||||
| 
 | ||||
|             # though it should be impossible for any tasks | ||||
|             # operating *in* this scope to have survived | ||||
|             # we tear down the runtime feeder chan last | ||||
|             # to avoid premature stream clobbers. | ||||
|             if ctx._recv_chan is not None: | ||||
|                 # should we encapsulate this in the context api? | ||||
|                 await ctx._recv_chan.aclose() | ||||
| 
 | ||||
|             if etype: | ||||
|                 if ctx._cancel_called: | ||||
|                     log.cancel( | ||||
|                         f'Context {fn_name} cancelled by caller with\n{etype}' | ||||
|                     ) | ||||
|                 elif _err is not None: | ||||
|                     log.cancel( | ||||
|                         f'Context for task cancelled by callee with {etype}\n' | ||||
|                         f'target: `{fn_name}`\n' | ||||
|                         f'task:{cid}\n' | ||||
|                         f'actor:{uid}' | ||||
|                     ) | ||||
|             # XXX: (MEGA IMPORTANT) if this is a root opened process we | ||||
|             # wait for any immediate child in debug before popping the | ||||
|             # context from the runtime msg loop otherwise inside | ||||
|             # ``Actor._push_result()`` the msg will be discarded and in | ||||
|             # the case where that msg is global debugger unlock (via | ||||
|             # a "stop" msg for a stream), this can result in a deadlock | ||||
|             # where the root is waiting on the lock to clear but the | ||||
|             # child has already cleared it and clobbered IPC. | ||||
|             from ._debug import maybe_wait_for_debugger | ||||
|             await maybe_wait_for_debugger() | ||||
| 
 | ||||
|             # remove the context from runtime tracking | ||||
|             self.actor._contexts.pop( | ||||
|                 (self.channel.uid, ctx.cid), | ||||
|                 None, | ||||
|             ) | ||||
|                 f"{self.channel} for {self.channel.uid} was already closed?") | ||||
|             return False | ||||
| 
 | ||||
| 
 | ||||
| @dataclass | ||||
| class LocalPortal: | ||||
|     ''' | ||||
|     A 'portal' to a local ``Actor``. | ||||
|     """A 'portal' to a local ``Actor``. | ||||
| 
 | ||||
|     A compatibility shim for normal portals but for invoking functions | ||||
|     using an in process actor instance. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     actor: 'Actor'  # type: ignore # noqa | ||||
|     channel: Channel | ||||
| 
 | ||||
|     async def run_from_ns(self, ns: str, func_name: str, **kwargs) -> Any: | ||||
|         ''' | ||||
|         Run a requested local function from a namespace path and | ||||
|         """Run a requested local function from a namespace path and | ||||
|         return it's result. | ||||
| 
 | ||||
|         ''' | ||||
|         """ | ||||
|         obj = self.actor if ns == 'self' else importlib.import_module(ns) | ||||
|         func = getattr(obj, func_name) | ||||
|         if inspect.iscoroutinefunction(func): | ||||
|             return await func(**kwargs) | ||||
|         else: | ||||
|             return func(**kwargs) | ||||
| 
 | ||||
| 
 | ||||
| @asynccontextmanager | ||||
| async def open_portal( | ||||
| 
 | ||||
|     channel: Channel, | ||||
|     nursery: Optional[trio.Nursery] = None, | ||||
|     start_msg_loop: bool = True, | ||||
|     shield: bool = False, | ||||
| ) -> typing.AsyncGenerator[Portal, None]: | ||||
|     """Open a ``Portal`` through the provided ``channel``. | ||||
| 
 | ||||
| ) -> AsyncGenerator[Portal, None]: | ||||
|     ''' | ||||
|     Open a ``Portal`` through the provided ``channel``. | ||||
| 
 | ||||
|     Spawns a background task to handle message processing (normally | ||||
|     done by the actor-runtime implicitly). | ||||
| 
 | ||||
|     ''' | ||||
|     Spawns a background task to handle message processing. | ||||
|     """ | ||||
|     actor = current_actor() | ||||
|     assert actor | ||||
|     was_connected = False | ||||
| 
 | ||||
|     async with maybe_open_nursery(nursery, shield=shield) as nursery: | ||||
| 
 | ||||
|         if not channel.connected(): | ||||
|             await channel.connect() | ||||
|             was_connected = True | ||||
|  | @ -597,11 +437,9 @@ async def open_portal( | |||
| 
 | ||||
|         msg_loop_cs: Optional[trio.CancelScope] = None | ||||
|         if start_msg_loop: | ||||
|             from ._runtime import process_messages | ||||
|             msg_loop_cs = await nursery.start( | ||||
|                 partial( | ||||
|                     process_messages, | ||||
|                     actor, | ||||
|                     actor._process_messages, | ||||
|                     channel, | ||||
|                     # if the local task is cancelled we want to keep | ||||
|                     # the msg loop running until our block ends | ||||
|  | @ -615,9 +453,8 @@ async def open_portal( | |||
|             await portal.aclose() | ||||
| 
 | ||||
|             if was_connected: | ||||
|                 # gracefully signal remote channel-msg loop | ||||
|                 # cancel remote channel-msg loop | ||||
|                 await channel.send(None) | ||||
|                 # await channel.aclose() | ||||
| 
 | ||||
|             # cancel background msg loop task | ||||
|             if msg_loop_cs: | ||||
|  |  | |||
							
								
								
									
										231
									
								
								tractor/_root.py
								
								
								
								
							
							
						
						
									
										231
									
								
								tractor/_root.py
								
								
								
								
							|  | @ -1,53 +1,27 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| ''' | ||||
| """ | ||||
| Root actor runtime ignition(s). | ||||
| 
 | ||||
| ''' | ||||
| """ | ||||
| from contextlib import asynccontextmanager | ||||
| from functools import partial | ||||
| import importlib | ||||
| import logging | ||||
| import signal | ||||
| import sys | ||||
| import os | ||||
| from typing import Tuple, Optional, List, Any | ||||
| import typing | ||||
| import warnings | ||||
| 
 | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| 
 | ||||
| from ._runtime import ( | ||||
|     Actor, | ||||
|     Arbiter, | ||||
|     async_main, | ||||
| ) | ||||
| from ._actor import Actor, Arbiter | ||||
| from . import _debug | ||||
| from . import _spawn | ||||
| from . import _state | ||||
| from . import log | ||||
| from ._ipc import _connect_chan | ||||
| from ._exceptions import is_multi_cancelled | ||||
| 
 | ||||
| 
 | ||||
| # set at startup and after forks | ||||
| _default_arbiter_host: str = '127.0.0.1' | ||||
| _default_arbiter_port: int = 1616 | ||||
| _default_arbiter_host = '127.0.0.1' | ||||
| _default_arbiter_port = 1616 | ||||
| 
 | ||||
| 
 | ||||
| logger = log.get_logger('tractor') | ||||
|  | @ -56,45 +30,37 @@ logger = log.get_logger('tractor') | |||
| @asynccontextmanager | ||||
| async def open_root_actor( | ||||
| 
 | ||||
|     *, | ||||
|     # defaults are above | ||||
|     arbiter_addr: tuple[str, int] | None = None, | ||||
|     arbiter_addr: Tuple[str, int] = ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     ), | ||||
| 
 | ||||
|     # defaults are above | ||||
|     registry_addr: tuple[str, int] | None = None, | ||||
| 
 | ||||
|     name: str | None = 'root', | ||||
|     name: Optional[str] = 'root', | ||||
| 
 | ||||
|     # either the `multiprocessing` start method: | ||||
|     # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods | ||||
|     # OR `trio` (the new default). | ||||
|     start_method: _spawn.SpawnMethodKey | None = None, | ||||
|     start_method: Optional[str] = None, | ||||
| 
 | ||||
|     # enables the multi-process debugger support | ||||
|     debug_mode: bool = False, | ||||
| 
 | ||||
|     # internal logging | ||||
|     loglevel: str | None = None, | ||||
|     loglevel: Optional[str] = None, | ||||
| 
 | ||||
|     enable_modules: list | None = None, | ||||
|     rpc_module_paths: list | None = None, | ||||
|     enable_modules: Optional[List] = None, | ||||
|     rpc_module_paths: Optional[List] = None, | ||||
| 
 | ||||
| ) -> typing.Any: | ||||
|     ''' | ||||
|     Runtime init entry point for ``tractor``. | ||||
|     """Async entry point for ``tractor``. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     # Override the global debugger hook to make it play nice with | ||||
|     # ``trio``, see much discussion in: | ||||
|     # ``trio``, see: | ||||
|     # https://github.com/python-trio/trio/issues/1155#issuecomment-742964018 | ||||
|     builtin_bp_handler = sys.breakpointhook | ||||
|     orig_bp_path: str | None = os.environ.get('PYTHONBREAKPOINT', None) | ||||
|     os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace' | ||||
| 
 | ||||
|     # attempt to retreive ``trio``'s sigint handler and stash it | ||||
|     # on our debugger lock state. | ||||
|     _debug.Lock._trio_handler = signal.getsignal(signal.SIGINT) | ||||
| 
 | ||||
|     # mark top most level process as root actor | ||||
|     _state._runtime_vars['_is_root'] = True | ||||
| 
 | ||||
|  | @ -113,25 +79,6 @@ async def open_root_actor( | |||
|     if start_method is not None: | ||||
|         _spawn.try_set_start_method(start_method) | ||||
| 
 | ||||
|     if arbiter_addr is not None: | ||||
|         warnings.warn( | ||||
|             '`arbiter_addr` is now deprecated and has been renamed to' | ||||
|             '`registry_addr`.\nUse that instead..', | ||||
|             DeprecationWarning, | ||||
|             stacklevel=2, | ||||
|         ) | ||||
| 
 | ||||
|     registry_addr = (host, port) = ( | ||||
|         registry_addr | ||||
|         or arbiter_addr | ||||
|         or ( | ||||
|             _default_arbiter_host, | ||||
|             _default_arbiter_port, | ||||
|         ) | ||||
|     ) | ||||
| 
 | ||||
|     loglevel = (loglevel or log._default_loglevel).upper() | ||||
| 
 | ||||
|     if debug_mode and _spawn._spawn_method == 'trio': | ||||
|         _state._runtime_vars['_debug_mode'] = True | ||||
| 
 | ||||
|  | @ -139,41 +86,30 @@ async def open_root_actor( | |||
|         # for use of ``await tractor.breakpoint()`` | ||||
|         enable_modules.append('tractor._debug') | ||||
| 
 | ||||
|         # if debug mode get's enabled *at least* use that level of | ||||
|         # logging for some informative console prompts. | ||||
|         if ( | ||||
|             logging.getLevelName( | ||||
|                 # lul, need the upper case for the -> int map? | ||||
|                 # sweet "dynamic function behaviour" stdlib... | ||||
|                 loglevel, | ||||
|             ) > logging.getLevelName('PDB') | ||||
|         ): | ||||
|             loglevel = 'PDB' | ||||
| 
 | ||||
|     elif debug_mode: | ||||
|         raise RuntimeError( | ||||
|             "Debug mode is only supported for the `trio` backend!" | ||||
|         ) | ||||
| 
 | ||||
|     arbiter_addr = (host, port) = arbiter_addr or ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port | ||||
|     ) | ||||
| 
 | ||||
|     loglevel = loglevel or log.get_loglevel() | ||||
|     if loglevel is not None: | ||||
|         log._default_loglevel = loglevel | ||||
|         log.get_console_log(loglevel) | ||||
| 
 | ||||
|     try: | ||||
|         # make a temporary connection to see if an arbiter exists, | ||||
|         # if one can't be made quickly we assume none exists. | ||||
|     # make a temporary connection to see if an arbiter exists | ||||
|     arbiter_found = False | ||||
| 
 | ||||
|         # TODO: this connect-and-bail forces us to have to carefully | ||||
|         # rewrap TCP 104-connection-reset errors as EOF so as to avoid | ||||
|         # propagating cancel-causing errors to the channel-msg loop | ||||
|         # machinery.  Likely it would be better to eventually have | ||||
|         # a "discovery" protocol with basic handshake instead. | ||||
|         with trio.move_on_after(1): | ||||
|     try: | ||||
|         async with _connect_chan(host, port): | ||||
|             arbiter_found = True | ||||
| 
 | ||||
|     except OSError: | ||||
|         # TODO: make this a "discovery" log level? | ||||
|         logger.warning(f"No actor registry found @ {host}:{port}") | ||||
|         logger.warning(f"No actor could be found @ {host}:{port}") | ||||
| 
 | ||||
|     # create a local actor and start up its main routine/task | ||||
|     if arbiter_found: | ||||
|  | @ -183,7 +119,7 @@ async def open_root_actor( | |||
| 
 | ||||
|         actor = Actor( | ||||
|             name or 'anonymous', | ||||
|             arbiter_addr=registry_addr, | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             loglevel=loglevel, | ||||
|             enable_modules=enable_modules, | ||||
|         ) | ||||
|  | @ -199,7 +135,7 @@ async def open_root_actor( | |||
| 
 | ||||
|         actor = Arbiter( | ||||
|             name or 'arbiter', | ||||
|             arbiter_addr=registry_addr, | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             loglevel=loglevel, | ||||
|             enable_modules=enable_modules, | ||||
|         ) | ||||
|  | @ -215,14 +151,13 @@ async def open_root_actor( | |||
|         # start the actor runtime in a new task | ||||
|         async with trio.open_nursery() as nursery: | ||||
| 
 | ||||
|             # ``_runtime.async_main()`` creates an internal nursery and | ||||
|             # ``Actor._async_main()`` creates an internal nursery and | ||||
|             # thus blocks here until the entire underlying actor tree has | ||||
|             # terminated thereby conducting structured concurrency. | ||||
| 
 | ||||
|             await nursery.start( | ||||
|                 partial( | ||||
|                     async_main, | ||||
|                     actor, | ||||
|                     actor._async_main, | ||||
|                     accept_addr=(host, port), | ||||
|                     parent_addr=None | ||||
|                 ) | ||||
|  | @ -230,83 +165,77 @@ async def open_root_actor( | |||
|             try: | ||||
|                 yield actor | ||||
| 
 | ||||
|             except ( | ||||
|                 Exception, | ||||
|                 BaseExceptionGroup, | ||||
|             ) as err: | ||||
|             except (Exception, trio.MultiError) as err: | ||||
|                 logger.exception("Actor crashed:") | ||||
|                 await _debug._maybe_enter_pm(err) | ||||
| 
 | ||||
|                 entered = await _debug._maybe_enter_pm(err) | ||||
| 
 | ||||
|                 if not entered and not is_multi_cancelled(err): | ||||
|                     logger.exception("Root actor crashed:") | ||||
| 
 | ||||
|                 # always re-raise | ||||
|                 raise | ||||
| 
 | ||||
|             finally: | ||||
|                 # NOTE: not sure if we'll ever need this but it's | ||||
|                 # possibly better for even more determinism? | ||||
|                 # logger.cancel( | ||||
|                 #     f'Waiting on {len(nurseries)} nurseries in root..') | ||||
|                 # nurseries = actor._actoruid2nursery.values() | ||||
|                 # async with trio.open_nursery() as tempn: | ||||
|                 #     for an in nurseries: | ||||
|                 #         tempn.start_soon(an.exited.wait) | ||||
| 
 | ||||
|                 logger.cancel("Shutting down root actor") | ||||
|                 logger.info("Shutting down root actor") | ||||
|                 await actor.cancel() | ||||
|     finally: | ||||
|         _state._current_actor = None | ||||
| 
 | ||||
|         # restore breakpoint hook state | ||||
|         sys.breakpointhook = builtin_bp_handler | ||||
|         if orig_bp_path is not None: | ||||
|             os.environ['PYTHONBREAKPOINT'] = orig_bp_path | ||||
|         else: | ||||
|             # clear env back to having no entry | ||||
|             os.environ.pop('PYTHONBREAKPOINT') | ||||
| 
 | ||||
|         logger.runtime("Root actor terminated") | ||||
|         logger.info("Root actor terminated") | ||||
| 
 | ||||
| 
 | ||||
| def run_daemon( | ||||
|     enable_modules: list[str], | ||||
| def run( | ||||
| 
 | ||||
|     # target | ||||
|     async_fn: typing.Callable[..., typing.Awaitable], | ||||
|     *args, | ||||
| 
 | ||||
|     # runtime kwargs | ||||
|     name: str | None = 'root', | ||||
|     registry_addr: tuple[str, int] = ( | ||||
|     name: Optional[str] = 'root', | ||||
|     arbiter_addr: Tuple[str, int] = ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     ), | ||||
| 
 | ||||
|     start_method: str | None = None, | ||||
|     start_method: Optional[str] = None, | ||||
|     debug_mode: bool = False, | ||||
|     **kwargs | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Spawn daemon actor which will respond to RPC; the main task simply | ||||
|     starts the runtime and then sleeps forever. | ||||
| 
 | ||||
|     This is a very minimal convenience wrapper around starting | ||||
|     a "run-until-cancelled" root actor which can be started with a set | ||||
|     of enabled modules for RPC request handling. | ||||
| 
 | ||||
|     ''' | ||||
|     kwargs['enable_modules'] = list(enable_modules) | ||||
| 
 | ||||
|     for path in enable_modules: | ||||
|         importlib.import_module(path) | ||||
| ) -> Any: | ||||
|     """Run a trio-actor async function in process. | ||||
| 
 | ||||
|     This is tractor's main entry and the start point for any async actor. | ||||
|     """ | ||||
|     async def _main(): | ||||
| 
 | ||||
|         async with open_root_actor( | ||||
|             registry_addr=registry_addr, | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             name=name, | ||||
|             start_method=start_method, | ||||
|             debug_mode=debug_mode, | ||||
|             **kwargs, | ||||
|         ): | ||||
|             return await trio.sleep_forever() | ||||
| 
 | ||||
|             return await async_fn(*args) | ||||
| 
 | ||||
|     warnings.warn( | ||||
|         "`tractor.run()` is now deprecated. `tractor` now" | ||||
|         " implicitly starts the root actor on first actor nursery" | ||||
|         " use. If you want to start the root actor manually, use" | ||||
|         " `tractor.open_root_actor()`.", | ||||
|         DeprecationWarning, | ||||
|         stacklevel=2, | ||||
|     ) | ||||
|     return trio.run(_main) | ||||
| 
 | ||||
| 
 | ||||
| def run_daemon( | ||||
|     rpc_module_paths: List[str], | ||||
|     **kwargs | ||||
| ) -> None: | ||||
|     """Spawn daemon actor which will respond to RPC. | ||||
| 
 | ||||
|     This is a convenience wrapper around | ||||
|     ``tractor.run(trio.sleep(float('inf')))`` such that the first actor spawned | ||||
|     is meant to run forever responding to RPC requests. | ||||
|     """ | ||||
|     kwargs['rpc_module_paths'] = list(rpc_module_paths) | ||||
| 
 | ||||
|     for path in rpc_module_paths: | ||||
|         importlib.import_module(path) | ||||
| 
 | ||||
|     return run(partial(trio.sleep, float('inf')), **kwargs) | ||||
|  |  | |||
							
								
								
									
										1760
									
								
								tractor/_runtime.py
								
								
								
								
							
							
						
						
									
										1760
									
								
								tractor/_runtime.py
								
								
								
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -1,157 +1,116 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Machinery for actor process spawning using multiple backends. | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| import sys | ||||
| import inspect | ||||
| import multiprocessing as mp | ||||
| import platform | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Awaitable, | ||||
|     Literal, | ||||
|     Callable, | ||||
|     TypeVar, | ||||
|     TYPE_CHECKING, | ||||
| ) | ||||
| from typing import Any, Dict, Optional | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| from async_generator import aclosing, asynccontextmanager | ||||
| 
 | ||||
| from ._debug import ( | ||||
|     maybe_wait_for_debugger, | ||||
|     acquire_debug_lock, | ||||
| ) | ||||
| from ._state import ( | ||||
|     current_actor, | ||||
|     is_main_process, | ||||
|     is_root_process, | ||||
|     debug_mode, | ||||
| ) | ||||
| try: | ||||
|     from multiprocessing import semaphore_tracker  # type: ignore | ||||
|     resource_tracker = semaphore_tracker | ||||
|     resource_tracker._resource_tracker = resource_tracker._semaphore_tracker | ||||
| except ImportError: | ||||
|     # 3.8 introduces a more general version that also tracks shared mems | ||||
|     from multiprocessing import resource_tracker  # type: ignore | ||||
| 
 | ||||
| from multiprocessing import forkserver  # type: ignore | ||||
| from typing import Tuple | ||||
| 
 | ||||
| from . import _forkserver_override | ||||
| from ._state import current_actor, is_main_process | ||||
| from .log import get_logger | ||||
| from ._portal import Portal | ||||
| from ._runtime import Actor | ||||
| from ._actor import Actor, ActorFailure | ||||
| from ._entry import _mp_main | ||||
| from ._exceptions import ActorFailure | ||||
| 
 | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     from ._supervise import ActorNursery | ||||
|     import multiprocessing as mp | ||||
|     ProcessType = TypeVar('ProcessType', mp.Process, trio.Process) | ||||
| 
 | ||||
| log = get_logger('tractor') | ||||
| 
 | ||||
| # placeholder for an mp start context if so using that backend | ||||
| _ctx: mp.context.BaseContext | None = None | ||||
| SpawnMethodKey = Literal[ | ||||
|     'trio',  # supported on all platforms | ||||
|     'mp_spawn', | ||||
|     'mp_forkserver',  # posix only | ||||
| ] | ||||
| _spawn_method: SpawnMethodKey = 'trio' | ||||
| _ctx: Optional[mp.context.BaseContext] = None | ||||
| _spawn_method: str = "spawn" | ||||
| 
 | ||||
| 
 | ||||
| if platform.system() == 'Windows': | ||||
| 
 | ||||
|     import multiprocessing as mp | ||||
|     _spawn_method = "spawn" | ||||
|     _ctx = mp.get_context("spawn") | ||||
| 
 | ||||
|     async def proc_waiter(proc: mp.Process) -> None: | ||||
|         await trio.lowlevel.WaitForSingleObject(proc.sentinel) | ||||
| else: | ||||
|     # *NIX systems use ``trio`` primitives as our default as well | ||||
|     # *NIX systems use ``trio`` primitives as our default | ||||
|     _spawn_method = "trio" | ||||
| 
 | ||||
|     async def proc_waiter(proc: mp.Process) -> None: | ||||
|         await trio.lowlevel.wait_readable(proc.sentinel) | ||||
| 
 | ||||
| 
 | ||||
| def try_set_start_method( | ||||
|     key: SpawnMethodKey | ||||
| 
 | ||||
| ) -> mp.context.BaseContext | None: | ||||
|     ''' | ||||
|     Attempt to set the method for process starting, aka the "actor | ||||
| def try_set_start_method(name: str) -> Optional[mp.context.BaseContext]: | ||||
|     """Attempt to set the method for process starting, aka the "actor | ||||
|     spawning backend". | ||||
| 
 | ||||
|     If the desired method is not supported this function will error. | ||||
|     On Windows only the ``multiprocessing`` "spawn" method is offered | ||||
|     besides the default ``trio`` which uses async wrapping around | ||||
|     ``subprocess.Popen``. | ||||
| 
 | ||||
|     ''' | ||||
|     import multiprocessing as mp | ||||
|     """ | ||||
|     global _ctx | ||||
|     global _spawn_method | ||||
| 
 | ||||
|     mp_methods = mp.get_all_start_methods() | ||||
|     if 'fork' in mp_methods: | ||||
|     methods = mp.get_all_start_methods() | ||||
|     if 'fork' in methods: | ||||
|         # forking is incompatible with ``trio``s global task tree | ||||
|         mp_methods.remove('fork') | ||||
|         methods.remove('fork') | ||||
| 
 | ||||
|     match key: | ||||
|         case 'mp_forkserver': | ||||
|             from . import _forkserver_override | ||||
|             _forkserver_override.override_stdlib() | ||||
|             _ctx = mp.get_context('forkserver') | ||||
|     # supported on all platforms | ||||
|     methods += ['trio'] | ||||
| 
 | ||||
|         case 'mp_spawn': | ||||
|             _ctx = mp.get_context('spawn') | ||||
| 
 | ||||
|         case 'trio': | ||||
|             _ctx = None | ||||
| 
 | ||||
|         case _: | ||||
|     if name not in methods: | ||||
|         raise ValueError( | ||||
|                 f'Spawn method `{key}` is invalid!\n' | ||||
|                 f'Please choose one of {SpawnMethodKey}' | ||||
|             f"Spawn method `{name}` is invalid please choose one of {methods}" | ||||
|         ) | ||||
|     elif name == 'forkserver': | ||||
|         _forkserver_override.override_stdlib() | ||||
|         _ctx = mp.get_context(name) | ||||
|     elif name == 'trio': | ||||
|         _ctx = None | ||||
|     else: | ||||
|         _ctx = mp.get_context(name) | ||||
| 
 | ||||
|     _spawn_method = key | ||||
|     _spawn_method = name | ||||
|     return _ctx | ||||
| 
 | ||||
| 
 | ||||
| async def exhaust_portal( | ||||
| 
 | ||||
|     portal: Portal, | ||||
|     actor: Actor | ||||
| 
 | ||||
| ) -> Any: | ||||
|     ''' | ||||
|     Pull final result from portal (assuming it has one). | ||||
|     """Pull final result from portal (assuming it has one). | ||||
| 
 | ||||
|     If the main task is an async generator do our best to consume | ||||
|     what's left of it. | ||||
|     ''' | ||||
|     __tracebackhide__ = True | ||||
|     """ | ||||
|     try: | ||||
|         log.debug(f"Waiting on final result from {actor.uid}") | ||||
| 
 | ||||
|         # XXX: streams should never be reaped here since they should | ||||
|         # always be established and shutdown using a context manager api | ||||
|         final = await portal.result() | ||||
| 
 | ||||
|     except ( | ||||
|         Exception, | ||||
|         BaseExceptionGroup, | ||||
|     ) as err: | ||||
|         # we reraise in the parent task via a ``BaseExceptionGroup`` | ||||
|         final = res = await portal.result() | ||||
|         # if it's an async-gen then alert that we're cancelling it | ||||
|         if inspect.isasyncgen(res): | ||||
|             final = [] | ||||
|             log.warning( | ||||
|                 f"Blindly consuming asyncgen for {actor.uid}") | ||||
|             with trio.fail_after(1): | ||||
|                 async with aclosing(res) as agen: | ||||
|                     async for item in agen: | ||||
|                         log.debug(f"Consuming item {item}") | ||||
|                         final.append(item) | ||||
|     except (Exception, trio.MultiError) as err: | ||||
|         # we reraise in the parent task via a ``trio.MultiError`` | ||||
|         return err | ||||
|     except trio.Cancelled as err: | ||||
|         # lol, of course we need this too ;P | ||||
|  | @ -164,31 +123,29 @@ async def exhaust_portal( | |||
| 
 | ||||
| 
 | ||||
| async def cancel_on_completion( | ||||
| 
 | ||||
|     portal: Portal, | ||||
|     actor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
| 
 | ||||
|     errors: Dict[Tuple[str, str], Exception], | ||||
|     task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED, | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Cancel actor gracefully once it's "main" portal's | ||||
|     """Cancel actor gracefully once it's "main" portal's | ||||
|     result arrives. | ||||
| 
 | ||||
|     Should only be called for actors spawned with `run_in_actor()`. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     with trio.CancelScope() as cs: | ||||
|         task_status.started(cs) | ||||
|         # if this call errors we store the exception for later | ||||
|         # in ``errors`` which will be reraised inside | ||||
|     # an exception group and we still send out a cancel request | ||||
|         # a MultiError and we still send out a cancel request | ||||
|         result = await exhaust_portal(portal, actor) | ||||
|         if isinstance(result, Exception): | ||||
|             errors[actor.uid] = result | ||||
|             log.warning( | ||||
|                 f"Cancelling {portal.channel.uid} after error {result}" | ||||
|             ) | ||||
| 
 | ||||
|         else: | ||||
|         log.runtime( | ||||
|             log.info( | ||||
|                 f"Cancelling {portal.channel.uid} gracefully " | ||||
|                 f"after result {result}") | ||||
| 
 | ||||
|  | @ -196,158 +153,11 @@ async def cancel_on_completion( | |||
|         await portal.cancel_actor() | ||||
| 
 | ||||
| 
 | ||||
| async def do_hard_kill( | ||||
|     proc: trio.Process, | ||||
|     terminate_after: int = 3, | ||||
| 
 | ||||
| ) -> None: | ||||
|     # NOTE: this timeout used to do nothing since we were shielding | ||||
|     # the ``.wait()`` inside ``new_proc()`` which will pretty much | ||||
|     # never release until the process exits, now it acts as | ||||
|     # a hard-kill time ultimatum. | ||||
|     log.debug(f"Terminating {proc}") | ||||
|     with trio.move_on_after(terminate_after) as cs: | ||||
| 
 | ||||
|         # NOTE: code below was copied verbatim from the now deprecated | ||||
|         # (in 0.20.0) ``trio._subrocess.Process.aclose()``, orig doc | ||||
|         # string: | ||||
|         # | ||||
|         # Close any pipes we have to the process (both input and output) | ||||
|         # and wait for it to exit. If cancelled, kills the process and | ||||
|         # waits for it to finish exiting before propagating the | ||||
|         # cancellation. | ||||
|         with trio.CancelScope(shield=True): | ||||
|             if proc.stdin is not None: | ||||
|                 await proc.stdin.aclose() | ||||
|             if proc.stdout is not None: | ||||
|                 await proc.stdout.aclose() | ||||
|             if proc.stderr is not None: | ||||
|                 await proc.stderr.aclose() | ||||
|         try: | ||||
|             await proc.wait() | ||||
|         finally: | ||||
|             if proc.returncode is None: | ||||
|                 proc.kill() | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     await proc.wait() | ||||
| 
 | ||||
|     if cs.cancelled_caught: | ||||
|         # XXX: should pretty much never get here unless we have | ||||
|         # to move the bits from ``proc.__aexit__()`` out and | ||||
|         # into here. | ||||
|         log.critical(f"#ZOMBIE_LORD_IS_HERE: {proc}") | ||||
|         proc.kill() | ||||
| 
 | ||||
| 
 | ||||
| async def soft_wait( | ||||
| 
 | ||||
|     proc: ProcessType, | ||||
|     wait_func: Callable[ | ||||
|         [ProcessType], | ||||
|         Awaitable, | ||||
|     ], | ||||
|     portal: Portal, | ||||
| 
 | ||||
| ) -> None: | ||||
|     # Wait for proc termination but **dont' yet** call | ||||
|     # ``trio.Process.__aexit__()`` (it tears down stdio | ||||
|     # which will kill any waiting remote pdb trace). | ||||
|     # This is a "soft" (cancellable) join/reap. | ||||
|     uid = portal.channel.uid | ||||
|     try: | ||||
|         log.cancel(f'Soft waiting on actor:\n{uid}') | ||||
|         await wait_func(proc) | ||||
|     except trio.Cancelled: | ||||
|         # if cancelled during a soft wait, cancel the child | ||||
|         # actor before entering the hard reap sequence | ||||
|         # below. This means we try to do a graceful teardown | ||||
|         # via sending a cancel message before getting out | ||||
|         # zombie killing tools. | ||||
|         async with trio.open_nursery() as n: | ||||
|             n.cancel_scope.shield = True | ||||
| 
 | ||||
|             async def cancel_on_proc_deth(): | ||||
|                 ''' | ||||
|                 Cancel the actor cancel request if we detect that | ||||
|                 that the process terminated. | ||||
| 
 | ||||
|                 ''' | ||||
|                 await wait_func(proc) | ||||
|                 n.cancel_scope.cancel() | ||||
| 
 | ||||
|             n.start_soon(cancel_on_proc_deth) | ||||
|             await portal.cancel_actor() | ||||
| 
 | ||||
|             if proc.poll() is None:  # type: ignore | ||||
|                 log.warning( | ||||
|                     'Actor still alive after cancel request:\n' | ||||
|                     f'{uid}' | ||||
|                 ) | ||||
| 
 | ||||
|                 n.cancel_scope.cancel() | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| async def new_proc( | ||||
|     name: str, | ||||
|     actor_nursery: ActorNursery, | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
| 
 | ||||
|     # passed through to actor main | ||||
|     bind_addr: tuple[str, int], | ||||
|     parent_addr: tuple[str, int], | ||||
|     _runtime_vars: dict[str, Any],  # serialized and sent to _child | ||||
| 
 | ||||
|     *, | ||||
| 
 | ||||
|     infect_asyncio: bool = False, | ||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     # lookup backend spawning target | ||||
|     target = _methods[_spawn_method] | ||||
| 
 | ||||
|     # mark the new actor with the global spawn method | ||||
|     subactor._spawn_method = _spawn_method | ||||
| 
 | ||||
|     await target( | ||||
|         name, | ||||
|         actor_nursery, | ||||
|         subactor, | ||||
|         errors, | ||||
|         bind_addr, | ||||
|         parent_addr, | ||||
|         _runtime_vars,  # run time vars | ||||
|         infect_asyncio=infect_asyncio, | ||||
|         task_status=task_status, | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| async def trio_proc( | ||||
|     name: str, | ||||
|     actor_nursery: ActorNursery, | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
| 
 | ||||
|     # passed through to actor main | ||||
|     bind_addr: tuple[str, int], | ||||
|     parent_addr: tuple[str, int], | ||||
|     _runtime_vars: dict[str, Any],  # serialized and sent to _child | ||||
|     *, | ||||
|     infect_asyncio: bool = False, | ||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Create a new ``Process`` using a "spawn method" as (configured using | ||||
|     ``try_set_start_method()``). | ||||
| 
 | ||||
|     This routine should be started in a actor runtime task and the logic | ||||
|     here is to be considered the core supervision strategy. | ||||
| 
 | ||||
|     ''' | ||||
| @asynccontextmanager | ||||
| async def spawn_subactor( | ||||
|     subactor: 'Actor', | ||||
|     parent_addr: Tuple[str, int], | ||||
| ): | ||||
|     spawn_cmd = [ | ||||
|         sys.executable, | ||||
|         "-m", | ||||
|  | @ -370,51 +180,69 @@ async def trio_proc( | |||
|             "--loglevel", | ||||
|             subactor.loglevel | ||||
|         ] | ||||
|     # Tell child to run in guest mode on top of ``asyncio`` loop | ||||
|     if infect_asyncio: | ||||
|         spawn_cmd.append("--asyncio") | ||||
| 
 | ||||
|     cancelled_during_spawn: bool = False | ||||
|     proc: trio.Process | None = None | ||||
|     proc = await trio.open_process(spawn_cmd) | ||||
|     try: | ||||
|         try: | ||||
|             # TODO: needs ``trio_typing`` patch? | ||||
|             proc = await trio.lowlevel.open_process(spawn_cmd) | ||||
|         yield proc | ||||
|     finally: | ||||
|         # XXX: do this **after** cancellation/tearfown | ||||
|         # to avoid killing the process too early | ||||
|         # since trio does this internally on ``__aexit__()`` | ||||
| 
 | ||||
|             log.runtime(f"Started {proc}") | ||||
|         # NOTE: we always "shield" join sub procs in | ||||
|         # the outer scope since no actor zombies are | ||||
|         # ever allowed. This ``__aexit__()`` also shields | ||||
|         # internally. | ||||
|         log.debug(f"Attempting to kill {proc}") | ||||
| 
 | ||||
|         # NOTE: this timeout effectively does nothing right now since | ||||
|         # we are shielding the ``.wait()`` inside ``new_proc()`` which | ||||
|         # will pretty much never release until the process exits. | ||||
|         with trio.move_on_after(3) as cs: | ||||
|             async with proc: | ||||
|                 log.debug(f"Terminating {proc}") | ||||
|         if cs.cancelled_caught: | ||||
|             log.critical(f"HARD KILLING {proc}") | ||||
|             proc.kill() | ||||
| 
 | ||||
| 
 | ||||
| async def new_proc( | ||||
|     name: str, | ||||
|     actor_nursery: 'ActorNursery',  # type: ignore | ||||
|     subactor: Actor, | ||||
|     errors: Dict[Tuple[str, str], Exception], | ||||
|     # passed through to actor main | ||||
|     bind_addr: Tuple[str, int], | ||||
|     parent_addr: Tuple[str, int], | ||||
|     _runtime_vars: Dict[str, Any],  # serialized and sent to _child | ||||
|     *, | ||||
|     use_trio_run_in_process: bool = False, | ||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||
| ) -> None: | ||||
|     """Create a new ``multiprocessing.Process`` using the | ||||
|     spawn method as configured using ``try_set_start_method()``. | ||||
|     """ | ||||
|     cancel_scope = None | ||||
| 
 | ||||
|     # mark the new actor with the global spawn method | ||||
|     subactor._spawn_method = _spawn_method | ||||
| 
 | ||||
|     async with trio.open_nursery() as nursery: | ||||
|         if use_trio_run_in_process or _spawn_method == 'trio': | ||||
|             async with spawn_subactor( | ||||
|                 subactor, | ||||
|                 parent_addr, | ||||
|             ) as proc: | ||||
|                 log.info(f"Started {proc}") | ||||
| 
 | ||||
|                 # wait for actor to spawn and connect back to us | ||||
|                 # channel should have handshake completed by the | ||||
|                 # local actor by the time we get a ref to it | ||||
|                 event, chan = await actor_nursery._actor.wait_for_peer( | ||||
|                     subactor.uid) | ||||
| 
 | ||||
|         except trio.Cancelled: | ||||
|             cancelled_during_spawn = True | ||||
|             # we may cancel before the child connects back in which | ||||
|             # case avoid clobbering the pdb tty. | ||||
|             if debug_mode(): | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     # don't clobber an ongoing pdb | ||||
|                     if is_root_process(): | ||||
|                         await maybe_wait_for_debugger() | ||||
| 
 | ||||
|                     elif proc is not None: | ||||
|                         async with acquire_debug_lock(subactor.uid): | ||||
|                             # soft wait on the proc to terminate | ||||
|                             with trio.move_on_after(0.5): | ||||
|                                 await proc.wait() | ||||
|             raise | ||||
| 
 | ||||
|         # a sub-proc ref **must** exist now | ||||
|         assert proc | ||||
| 
 | ||||
|                 portal = Portal(chan) | ||||
|                 actor_nursery._children[subactor.uid] = ( | ||||
|             subactor, | ||||
|             proc, | ||||
|             portal, | ||||
|         ) | ||||
|                     subactor, proc, portal) | ||||
| 
 | ||||
|                 # send additional init params | ||||
|                 await chan.send({ | ||||
|  | @ -437,102 +265,27 @@ async def trio_proc( | |||
|                 with trio.CancelScope(shield=True): | ||||
|                     await actor_nursery._join_procs.wait() | ||||
| 
 | ||||
|         async with trio.open_nursery() as nursery: | ||||
|                 if portal in actor_nursery._cancel_after_result_on_exit: | ||||
|                 nursery.start_soon( | ||||
|                     cancel_on_completion, | ||||
|                     portal, | ||||
|                     subactor, | ||||
|                     errors | ||||
|                 ) | ||||
|                     cancel_scope = await nursery.start( | ||||
|                         cancel_on_completion, portal, subactor, errors) | ||||
| 
 | ||||
|             # This is a "soft" (cancellable) join/reap which | ||||
|             # will remote cancel the actor on a ``trio.Cancelled`` | ||||
|             # condition. | ||||
|             await soft_wait( | ||||
|                 proc, | ||||
|                 trio.Process.wait, | ||||
|                 portal | ||||
|             ) | ||||
|                 # Wait for proc termination but **dont' yet** call | ||||
|                 # ``trio.Process.__aexit__()`` (it tears down stdio | ||||
|                 # which will kill any waiting remote pdb trace). | ||||
| 
 | ||||
|             # cancel result waiter that may have been spawned in | ||||
|             # tandem if not done already | ||||
|             log.warning( | ||||
|                 "Cancelling existing result waiter task for " | ||||
|                 f"{subactor.uid}") | ||||
|             nursery.cancel_scope.cancel() | ||||
|                 # TODO: No idea how we can enforce zombie | ||||
|                 # reaping more stringently without the shield | ||||
|                 # we used to have below... | ||||
| 
 | ||||
|     finally: | ||||
|         # XXX NOTE XXX: The "hard" reap since no actor zombies are | ||||
|         # allowed! Do this **after** cancellation/teardown to avoid | ||||
|         # killing the process too early. | ||||
|         if proc: | ||||
|             log.cancel(f'Hard reap sequence starting for {subactor.uid}') | ||||
|             with trio.CancelScope(shield=True): | ||||
| 
 | ||||
|                 # don't clobber an ongoing pdb | ||||
|                 if cancelled_during_spawn: | ||||
|                     # Try again to avoid TTY clobbering. | ||||
|                     async with acquire_debug_lock(subactor.uid): | ||||
|                         with trio.move_on_after(0.5): | ||||
|                 # always "hard" join sub procs: | ||||
|                 # no actor zombies allowed | ||||
|                 # with trio.CancelScope(shield=True): | ||||
|                 await proc.wait() | ||||
| 
 | ||||
|                 if is_root_process(): | ||||
|                     # TODO: solve the following issue where we need | ||||
|                     # to do a similar wait like this but in an | ||||
|                     # "intermediary" parent actor that itself isn't | ||||
|                     # in debug but has a child that is, and we need | ||||
|                     # to hold off on relaying SIGINT until that child | ||||
|                     # is complete. | ||||
|                     # https://github.com/goodboy/tractor/issues/320 | ||||
|                     await maybe_wait_for_debugger( | ||||
|                         child_in_debug=_runtime_vars.get( | ||||
|                             '_debug_mode', False), | ||||
|                     ) | ||||
| 
 | ||||
|                 if proc.poll() is None: | ||||
|                     log.cancel(f"Attempting to hard kill {proc}") | ||||
|                     await do_hard_kill(proc) | ||||
| 
 | ||||
|                 log.debug(f"Joined {proc}") | ||||
|         else: | ||||
|             log.warning('Nursery cancelled before sub-proc started') | ||||
| 
 | ||||
|         if not cancelled_during_spawn: | ||||
|             # pop child entry to indicate we no longer managing this | ||||
|             # subactor | ||||
|             actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
| 
 | ||||
| async def mp_proc( | ||||
|     name: str, | ||||
|     actor_nursery: ActorNursery,  # type: ignore  # noqa | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
|     # passed through to actor main | ||||
|     bind_addr: tuple[str, int], | ||||
|     parent_addr: tuple[str, int], | ||||
|     _runtime_vars: dict[str, Any],  # serialized and sent to _child | ||||
|     *, | ||||
|     infect_asyncio: bool = False, | ||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     # uggh zone | ||||
|     try: | ||||
|         from multiprocessing import semaphore_tracker  # type: ignore | ||||
|         resource_tracker = semaphore_tracker | ||||
|         resource_tracker._resource_tracker = resource_tracker._semaphore_tracker  # noqa | ||||
|     except ImportError: | ||||
|         # 3.8 introduces a more general version that also tracks shared mems | ||||
|         from multiprocessing import resource_tracker  # type: ignore | ||||
| 
 | ||||
|             # `multiprocessing` | ||||
|             assert _ctx | ||||
|             start_method = _ctx.get_start_method() | ||||
|             if start_method == 'forkserver': | ||||
| 
 | ||||
|         from multiprocessing import forkserver  # type: ignore | ||||
|                 # XXX do our hackery on the stdlib to avoid multiple | ||||
|                 # forkservers (one at each subproc layer). | ||||
|                 fs = forkserver._forkserver | ||||
|  | @ -544,40 +297,37 @@ async def mp_proc( | |||
|                     # forkserver.set_forkserver_preload(enable_modules) | ||||
|                     forkserver.ensure_running() | ||||
|                     fs_info = ( | ||||
|                 fs._forkserver_address,  # type: ignore  # noqa | ||||
|                 fs._forkserver_alive_fd,  # type: ignore  # noqa | ||||
|                         fs._forkserver_address, | ||||
|                         fs._forkserver_alive_fd, | ||||
|                         getattr(fs, '_forkserver_pid', None), | ||||
|                         getattr( | ||||
|                             resource_tracker._resource_tracker, '_pid', None), | ||||
|                         resource_tracker._resource_tracker._fd, | ||||
|                     ) | ||||
|         else:  # request to forkerserver to fork a new child | ||||
|                 else: | ||||
|                     assert curr_actor._forkserver_info | ||||
|                     fs_info = ( | ||||
|                 fs._forkserver_address,  # type: ignore  # noqa | ||||
|                 fs._forkserver_alive_fd,  # type: ignore  # noqa | ||||
|                 fs._forkserver_pid,  # type: ignore  # noqa | ||||
|                         fs._forkserver_address, | ||||
|                         fs._forkserver_alive_fd, | ||||
|                         fs._forkserver_pid, | ||||
|                         resource_tracker._resource_tracker._pid, | ||||
|                         resource_tracker._resource_tracker._fd, | ||||
|                      ) = curr_actor._forkserver_info | ||||
|             else: | ||||
|         # spawn method | ||||
|                 fs_info = (None, None, None, None, None) | ||||
| 
 | ||||
|     proc: mp.Process = _ctx.Process(  # type: ignore | ||||
|             proc = _ctx.Process(  # type: ignore | ||||
|                 target=_mp_main, | ||||
|                 args=( | ||||
|                     subactor, | ||||
|                     bind_addr, | ||||
|                     fs_info, | ||||
|             _spawn_method, | ||||
|                     start_method, | ||||
|                     parent_addr, | ||||
|             infect_asyncio, | ||||
|                 ), | ||||
|                 # daemon=True, | ||||
|                 name=name, | ||||
|             ) | ||||
| 
 | ||||
|             # `multiprocessing` only (since no async interface): | ||||
|             # register the process before start in case we get a cancel | ||||
|             # request before the actor has fully spawned - then we can wait | ||||
|  | @ -588,24 +338,13 @@ async def mp_proc( | |||
|             if not proc.is_alive(): | ||||
|                 raise ActorFailure("Couldn't start sub-actor?") | ||||
| 
 | ||||
|     log.runtime(f"Started {proc}") | ||||
|             log.info(f"Started {proc}") | ||||
| 
 | ||||
|     try: | ||||
|             # wait for actor to spawn and connect back to us | ||||
|             # channel should have handshake completed by the | ||||
|             # local actor by the time we get a ref to it | ||||
|             event, chan = await actor_nursery._actor.wait_for_peer( | ||||
|                 subactor.uid) | ||||
| 
 | ||||
|         # XXX: monkey patch poll API to match the ``subprocess`` API.. | ||||
|         # not sure why they don't expose this but kk. | ||||
|         proc.poll = lambda: proc.exitcode  # type: ignore | ||||
| 
 | ||||
|     # except: | ||||
|         # TODO: in the case we were cancelled before the sub-proc | ||||
|         # registered itself back we must be sure to try and clean | ||||
|         # any process we may have started. | ||||
| 
 | ||||
|             portal = Portal(chan) | ||||
|             actor_nursery._children[subactor.uid] = (subactor, proc, portal) | ||||
| 
 | ||||
|  | @ -621,59 +360,25 @@ async def mp_proc( | |||
|             # while user code is still doing it's thing. Only after the | ||||
|             # nursery block closes do we allow subactor results to be | ||||
|             # awaited and reported upwards to the supervisor. | ||||
|         with trio.CancelScope(shield=True): | ||||
|             await actor_nursery._join_procs.wait() | ||||
| 
 | ||||
|         async with trio.open_nursery() as nursery: | ||||
|             if portal in actor_nursery._cancel_after_result_on_exit: | ||||
|                 nursery.start_soon( | ||||
|                     cancel_on_completion, | ||||
|                     portal, | ||||
|                     subactor, | ||||
|                     errors | ||||
|                 ) | ||||
|                 cancel_scope = await nursery.start( | ||||
|                     cancel_on_completion, portal, subactor, errors) | ||||
| 
 | ||||
|             # This is a "soft" (cancellable) join/reap which | ||||
|             # will remote cancel the actor on a ``trio.Cancelled`` | ||||
|             # condition. | ||||
|             await soft_wait( | ||||
|                 proc, | ||||
|                 proc_waiter, | ||||
|                 portal | ||||
|             ) | ||||
|             # TODO: timeout block here? | ||||
|             if proc.is_alive(): | ||||
|                 await proc_waiter(proc) | ||||
|             proc.join() | ||||
| 
 | ||||
|         # This is again common logic for all backends: | ||||
| 
 | ||||
|         log.debug(f"Joined {proc}") | ||||
|         # pop child entry to indicate we are no longer managing this subactor | ||||
|         subactor, proc, portal = actor_nursery._children.pop(subactor.uid) | ||||
|         # cancel result waiter that may have been spawned in | ||||
|         # tandem if not done already | ||||
|         if cancel_scope: | ||||
|             log.warning( | ||||
|                 "Cancelling existing result waiter task for " | ||||
|                 f"{subactor.uid}") | ||||
|             nursery.cancel_scope.cancel() | ||||
| 
 | ||||
|     finally: | ||||
|         # hard reap sequence | ||||
|         if proc.is_alive(): | ||||
|             log.cancel(f"Attempting to hard kill {proc}") | ||||
|             with trio.move_on_after(0.1) as cs: | ||||
|                 cs.shield = True | ||||
|                 await proc_waiter(proc) | ||||
| 
 | ||||
|             if cs.cancelled_caught: | ||||
|                 proc.terminate() | ||||
| 
 | ||||
|         proc.join() | ||||
|         log.debug(f"Joined {proc}") | ||||
| 
 | ||||
|         # pop child entry to indicate we are no longer managing subactor | ||||
|         actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
|         # TODO: prolly report to ``mypy`` how this causes all sorts of | ||||
|         # false errors.. | ||||
|         # subactor, proc, portal = actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
| 
 | ||||
| # proc spawning backend target map | ||||
| _methods: dict[SpawnMethodKey, Callable] = { | ||||
|     'trio': trio_proc, | ||||
|     'mp_spawn': mp_proc, | ||||
|     'mp_forkserver': mp_proc, | ||||
| } | ||||
|                 f"Cancelling existing result waiter task for {subactor.uid}") | ||||
|             cancel_scope.cancel() | ||||
|  |  | |||
|  | @ -1,35 +1,14 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Per process state | ||||
| 
 | ||||
| """ | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     Any, | ||||
| ) | ||||
| from typing import Optional, Dict, Any | ||||
| from collections.abc import Mapping | ||||
| import multiprocessing as mp | ||||
| 
 | ||||
| import trio | ||||
| 
 | ||||
| from ._exceptions import NoRuntime | ||||
| 
 | ||||
| 
 | ||||
| _current_actor: Optional['Actor'] = None  # type: ignore # noqa | ||||
| _runtime_vars: dict[str, Any] = { | ||||
| _runtime_vars: Dict[str, Any] = { | ||||
|     '_debug_mode': False, | ||||
|     '_is_root': False, | ||||
|     '_root_mailbox': (None, None) | ||||
|  | @ -40,15 +19,38 @@ def current_actor(err_on_no_runtime: bool = True) -> 'Actor':  # type: ignore # | |||
|     """Get the process-local actor instance. | ||||
|     """ | ||||
|     if _current_actor is None and err_on_no_runtime: | ||||
|         raise NoRuntime("No local actor has been initialized yet") | ||||
|         raise RuntimeError("No local actor has been initialized yet") | ||||
| 
 | ||||
|     return _current_actor | ||||
| 
 | ||||
| 
 | ||||
| _conc_name_getters = { | ||||
|     'task': trio.lowlevel.current_task, | ||||
|     'actor': current_actor | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| class ActorContextInfo(Mapping): | ||||
|     "Dyanmic lookup for local actor and task names" | ||||
|     _context_keys = ('task', 'actor') | ||||
| 
 | ||||
|     def __len__(self): | ||||
|         return len(self._context_keys) | ||||
| 
 | ||||
|     def __iter__(self): | ||||
|         return iter(self._context_keys) | ||||
| 
 | ||||
|     def __getitem__(self, key: str) -> str: | ||||
|         try: | ||||
|             return _conc_name_getters[key]().name  # type: ignore | ||||
|         except RuntimeError: | ||||
|             # no local actor/task context initialized yet | ||||
|             return f'no {key} context' | ||||
| 
 | ||||
| 
 | ||||
| def is_main_process() -> bool: | ||||
|     """Bool determining if this actor is running in the top-most process. | ||||
|     """ | ||||
|     import multiprocessing as mp | ||||
|     return mp.current_process().name == 'MainProcess' | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,755 +1,49 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Message stream types and APIs. | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| import inspect | ||||
| from contextlib import asynccontextmanager | ||||
| from contextvars import ContextVar | ||||
| from dataclasses import dataclass | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Optional, | ||||
|     Callable, | ||||
|     AsyncGenerator, | ||||
|     AsyncIterator | ||||
| ) | ||||
| 
 | ||||
| import warnings | ||||
| from typing import Any | ||||
| 
 | ||||
| import trio | ||||
| 
 | ||||
| from ._ipc import Channel | ||||
| from ._exceptions import unpack_error, ContextCancelled | ||||
| from ._state import current_actor | ||||
| from .log import get_logger | ||||
| from .trionics import broadcast_receiver, BroadcastReceiver | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| _context: ContextVar['Context'] = ContextVar('context') | ||||
| 
 | ||||
| 
 | ||||
| # TODO: the list | ||||
| # - generic typing like trio's receive channel but with msgspec | ||||
| #   messages? class ReceiveChannel(AsyncResource, Generic[ReceiveType]): | ||||
| # - use __slots__ on ``Context``? | ||||
| 
 | ||||
| 
 | ||||
| class MsgStream(trio.abc.Channel): | ||||
|     ''' | ||||
|     A bidirectional message stream for receiving logically sequenced | ||||
|     values over an inter-actor IPC ``Channel``. | ||||
| 
 | ||||
|     This is the type returned to a local task which entered either | ||||
|     ``Portal.open_stream_from()`` or ``Context.open_stream()``. | ||||
| 
 | ||||
|     Termination rules: | ||||
| 
 | ||||
|     - on cancellation the stream is **not** implicitly closed and the | ||||
|       surrounding ``Context`` is expected to handle how that cancel | ||||
|       is relayed to any task on the remote side. | ||||
|     - if the remote task signals the end of a stream the | ||||
|       ``ReceiveChannel`` semantics dictate that a ``StopAsyncIteration`` | ||||
|       to terminate the local ``async for``. | ||||
| 
 | ||||
|     ''' | ||||
|     def __init__( | ||||
|         self, | ||||
|         ctx: 'Context',  # typing: ignore # noqa | ||||
|         rx_chan: trio.MemoryReceiveChannel, | ||||
|         _broadcaster: Optional[BroadcastReceiver] = None, | ||||
| 
 | ||||
|     ) -> None: | ||||
|         self._ctx = ctx | ||||
|         self._rx_chan = rx_chan | ||||
|         self._broadcaster = _broadcaster | ||||
| 
 | ||||
|         # flag to denote end of stream | ||||
|         self._eoc: bool = False | ||||
|         self._closed: bool = False | ||||
| 
 | ||||
|     # delegate directly to underlying mem channel | ||||
|     def receive_nowait(self): | ||||
|         msg = self._rx_chan.receive_nowait() | ||||
|         return msg['yield'] | ||||
| 
 | ||||
|     async def receive(self): | ||||
|         '''Async receive a single msg from the IPC transport, the next | ||||
|         in sequence for this stream. | ||||
| 
 | ||||
|         ''' | ||||
|         # see ``.aclose()`` for notes on the old behaviour prior to | ||||
|         # introducing this | ||||
|         if self._eoc: | ||||
|             raise trio.EndOfChannel | ||||
| 
 | ||||
|         if self._closed: | ||||
|             raise trio.ClosedResourceError('This stream was closed') | ||||
| 
 | ||||
|         try: | ||||
|             msg = await self._rx_chan.receive() | ||||
|             return msg['yield'] | ||||
| 
 | ||||
|         except KeyError as err: | ||||
|             # internal error should never get here | ||||
|             assert msg.get('cid'), ("Received internal error at portal?") | ||||
| 
 | ||||
|             # TODO: handle 2 cases with 3.10 match syntax | ||||
|             # - 'stop' | ||||
|             # - 'error' | ||||
|             # possibly just handle msg['stop'] here! | ||||
| 
 | ||||
|             if self._closed: | ||||
|                 raise trio.ClosedResourceError('This stream was closed') | ||||
| 
 | ||||
|             if msg.get('stop') or self._eoc: | ||||
|                 log.debug(f"{self} was stopped at remote end") | ||||
| 
 | ||||
|                 # XXX: important to set so that a new ``.receive()`` | ||||
|                 # call (likely by another task using a broadcast receiver) | ||||
|                 # doesn't accidentally pull the ``return`` message | ||||
|                 # value out of the underlying feed mem chan! | ||||
|                 self._eoc = True | ||||
| 
 | ||||
|                 # # when the send is closed we assume the stream has | ||||
|                 # # terminated and signal this local iterator to stop | ||||
|                 # await self.aclose() | ||||
| 
 | ||||
|                 # XXX: this causes ``ReceiveChannel.__anext__()`` to | ||||
|                 # raise a ``StopAsyncIteration`` **and** in our catch | ||||
|                 # block below it will trigger ``.aclose()``. | ||||
|                 raise trio.EndOfChannel from err | ||||
| 
 | ||||
|             # TODO: test that shows stream raising an expected error!!! | ||||
|             elif msg.get('error'): | ||||
|                 # raise the error message | ||||
|                 raise unpack_error(msg, self._ctx.chan) | ||||
| 
 | ||||
|             else: | ||||
|                 raise | ||||
| 
 | ||||
|         except ( | ||||
|             trio.ClosedResourceError,  # by self._rx_chan | ||||
|             trio.EndOfChannel,  # by self._rx_chan or `stop` msg from far end | ||||
|         ): | ||||
|             # XXX: we close the stream on any of these error conditions: | ||||
| 
 | ||||
|             # a ``ClosedResourceError`` indicates that the internal | ||||
|             # feeder memory receive channel was closed likely by the | ||||
|             # runtime after the associated transport-channel | ||||
|             # disconnected or broke. | ||||
| 
 | ||||
|             # an ``EndOfChannel`` indicates either the internal recv | ||||
|             # memchan exhausted **or** we raisesd it just above after | ||||
|             # receiving a `stop` message from the far end of the stream. | ||||
| 
 | ||||
|             # Previously this was triggered by calling ``.aclose()`` on | ||||
|             # the send side of the channel inside | ||||
|             # ``Actor._push_result()`` (should still be commented code | ||||
|             # there - which should eventually get removed), but now the | ||||
|             # 'stop' message handling has been put just above. | ||||
| 
 | ||||
|             # TODO: Locally, we want to close this stream gracefully, by | ||||
|             # terminating any local consumers tasks deterministically. | ||||
|             # One we have broadcast support, we **don't** want to be | ||||
|             # closing this stream and not flushing a final value to | ||||
|             # remaining (clone) consumers who may not have been | ||||
|             # scheduled to receive it yet. | ||||
| 
 | ||||
|             # when the send is closed we assume the stream has | ||||
|             # terminated and signal this local iterator to stop | ||||
|             await self.aclose() | ||||
| 
 | ||||
|             raise  # propagate | ||||
| 
 | ||||
|     async def aclose(self): | ||||
|         ''' | ||||
|         Cancel associated remote actor task and local memory channel on | ||||
|         close. | ||||
| 
 | ||||
|         ''' | ||||
|         # XXX: keep proper adherance to trio's `.aclose()` semantics: | ||||
|         # https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose | ||||
|         rx_chan = self._rx_chan | ||||
| 
 | ||||
|         if rx_chan._closed: | ||||
|             log.cancel(f"{self} is already closed") | ||||
| 
 | ||||
|             # this stream has already been closed so silently succeed as | ||||
|             # per ``trio.AsyncResource`` semantics. | ||||
|             # https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose | ||||
|             return | ||||
| 
 | ||||
|         self._eoc = True | ||||
| 
 | ||||
|         # NOTE: this is super subtle IPC messaging stuff: | ||||
|         # Relay stop iteration to far end **iff** we're | ||||
|         # in bidirectional mode. If we're only streaming | ||||
|         # *from* one side then that side **won't** have an | ||||
|         # entry in `Actor._cids2qs` (maybe it should though?). | ||||
|         # So any `yield` or `stop` msgs sent from the caller side | ||||
|         # will cause key errors on the callee side since there is | ||||
|         # no entry for a local feeder mem chan since the callee task | ||||
|         # isn't expecting messages to be sent by the caller. | ||||
|         # Thus, we must check that this context DOES NOT | ||||
|         # have a portal reference to ensure this is indeed the callee | ||||
|         # side and can relay a 'stop'. | ||||
| 
 | ||||
|         # In the bidirectional case, `Context.open_stream()` will create | ||||
|         # the `Actor._cids2qs` entry from a call to | ||||
|         # `Actor.get_context()` and will call us here to send the stop | ||||
|         # msg in ``__aexit__()`` on teardown. | ||||
|         try: | ||||
|             # NOTE: if this call is cancelled we expect this end to | ||||
|             # handle as though the stop was never sent (though if it | ||||
|             # was it shouldn't matter since it's unlikely a user | ||||
|             # will try to re-use a stream after attemping to close | ||||
|             # it). | ||||
|             with trio.CancelScope(shield=True): | ||||
|                 await self._ctx.send_stop() | ||||
| 
 | ||||
|         except ( | ||||
|             trio.BrokenResourceError, | ||||
|             trio.ClosedResourceError | ||||
|         ): | ||||
|             # the underlying channel may already have been pulled | ||||
|             # in which case our stop message is meaningless since | ||||
|             # it can't traverse the transport. | ||||
|             ctx = self._ctx | ||||
|             log.warning( | ||||
|                 f'Stream was already destroyed?\n' | ||||
|                 f'actor: {ctx.chan.uid}\n' | ||||
|                 f'ctx id: {ctx.cid}' | ||||
|             ) | ||||
| 
 | ||||
|         self._closed = True | ||||
| 
 | ||||
|         # Do we close the local mem chan ``self._rx_chan`` ??!? | ||||
| 
 | ||||
|         # NO, DEFINITELY NOT if we're a bi-dir ``MsgStream``! | ||||
|         # BECAUSE this same core-msg-loop mem recv-chan is used to deliver | ||||
|         # the potential final result from the surrounding inter-actor | ||||
|         # `Context` so we don't want to close it until that context has | ||||
|         # run to completion. | ||||
| 
 | ||||
|         # XXX: Notes on old behaviour: | ||||
|         # await rx_chan.aclose() | ||||
| 
 | ||||
|         # In the receive-only case, ``Portal.open_stream_from()`` used | ||||
|         # to rely on this call explicitly on teardown such that a new | ||||
|         # call to ``.receive()`` after ``rx_chan`` had been closed, would | ||||
|         # result in us raising a ``trio.EndOfChannel`` (since we | ||||
|         # remapped the ``trio.ClosedResourceError`). However, now if for some | ||||
|         # reason the stream's consumer code tries to manually receive a new | ||||
|         # value before ``.aclose()`` is called **but** the far end has | ||||
|         # stopped `.receive()` **must** raise ``trio.EndofChannel`` in | ||||
|         # order to avoid an infinite hang on ``.__anext__()``; this is | ||||
|         # why we added ``self._eoc`` to denote stream closure indepedent | ||||
|         # of ``rx_chan``. | ||||
| 
 | ||||
|         # In theory we could still use this old method and close the | ||||
|         # underlying msg-loop mem chan as above and then **not** check | ||||
|         # for ``self._eoc`` in ``.receive()`` (if for some reason we | ||||
|         # think that check is a bottle neck - not likely) **but** then | ||||
|         # we would need to map the resulting | ||||
|         # ``trio.ClosedResourceError`` to a ``trio.EndOfChannel`` in | ||||
|         # ``.receive()`` (as it originally was before bi-dir streaming | ||||
|         # support) in order to trigger stream closure. The old behaviour | ||||
|         # is arguably more confusing since we lose detection of the | ||||
|         # runtime's closure of ``rx_chan`` in the case where we may | ||||
|         # still need to consume msgs that are "in transit" from the far | ||||
|         # end (eg. for ``Context.result()``). | ||||
| 
 | ||||
|     @asynccontextmanager | ||||
|     async def subscribe( | ||||
|         self, | ||||
| 
 | ||||
|     ) -> AsyncIterator[BroadcastReceiver]: | ||||
|         ''' | ||||
|         Allocate and return a ``BroadcastReceiver`` which delegates | ||||
|         to this message stream. | ||||
| 
 | ||||
|         This allows multiple local tasks to receive each their own copy | ||||
|         of this message stream. | ||||
| 
 | ||||
|         This operation is indempotent and and mutates this stream's | ||||
|         receive machinery to copy and window-length-store each received | ||||
|         value from the far end via the internally created broudcast | ||||
|         receiver wrapper. | ||||
| 
 | ||||
|         ''' | ||||
|         # NOTE: This operation is indempotent and non-reversible, so be | ||||
|         # sure you can deal with any (theoretical) overhead of the the | ||||
|         # allocated ``BroadcastReceiver`` before calling this method for | ||||
|         # the first time. | ||||
|         if self._broadcaster is None: | ||||
| 
 | ||||
|             bcast = self._broadcaster = broadcast_receiver( | ||||
|                 self, | ||||
|                 # use memory channel size by default | ||||
|                 self._rx_chan._state.max_buffer_size,  # type: ignore | ||||
|                 receive_afunc=self.receive, | ||||
|             ) | ||||
| 
 | ||||
|             # NOTE: we override the original stream instance's receive | ||||
|             # method to now delegate to the broadcaster's ``.receive()`` | ||||
|             # such that new subscribers will be copied received values | ||||
|             # and this stream doesn't have to expect it's original | ||||
|             # consumer(s) to get a new broadcast rx handle. | ||||
|             self.receive = bcast.receive  # type: ignore | ||||
|             # seems there's no graceful way to type this with ``mypy``? | ||||
|             # https://github.com/python/mypy/issues/708 | ||||
| 
 | ||||
|         async with self._broadcaster.subscribe() as bstream: | ||||
|             assert bstream.key != self._broadcaster.key | ||||
|             assert bstream._recv == self._broadcaster._recv | ||||
| 
 | ||||
|             # NOTE: we patch on a `.send()` to the bcaster so that the | ||||
|             # caller can still conduct 2-way streaming using this | ||||
|             # ``bstream`` handle transparently as though it was the msg | ||||
|             # stream instance. | ||||
|             bstream.send = self.send  # type: ignore | ||||
| 
 | ||||
|             yield bstream | ||||
| 
 | ||||
|     async def send( | ||||
|         self, | ||||
|         data: Any | ||||
|     ) -> None: | ||||
|         ''' | ||||
|         Send a message over this stream to the far end. | ||||
| 
 | ||||
|         ''' | ||||
|         if self._ctx._error: | ||||
|             raise self._ctx._error  # from None | ||||
| 
 | ||||
|         if self._closed: | ||||
|             raise trio.ClosedResourceError('This stream was already closed') | ||||
| 
 | ||||
|         await self._ctx.chan.send({'yield': data, 'cid': self._ctx.cid}) | ||||
| 
 | ||||
| 
 | ||||
| @dataclass | ||||
| @dataclass(frozen=True) | ||||
| class Context: | ||||
|     ''' | ||||
|     An inter-actor, ``trio`` task communication context. | ||||
|     """An IAC (inter-actor communication) context. | ||||
| 
 | ||||
|     NB: This class should never be instatiated directly, it is delivered | ||||
|     by either runtime machinery to a remotely started task or by entering | ||||
|     ``Portal.open_context()``. | ||||
| 
 | ||||
|     Allows maintaining task or protocol specific state between | ||||
|     2 communicating actor tasks. A unique context is created on the | ||||
|     callee side/end for every request to a remote actor from a portal. | ||||
| 
 | ||||
|     A context can be cancelled and (possibly eventually restarted) from | ||||
|     either side of the underlying IPC channel, open task oriented | ||||
|     message streams and acts as an IPC aware inter-actor-task cancel | ||||
|     scope. | ||||
| 
 | ||||
|     ''' | ||||
|     Allows maintaining task or protocol specific state between communicating | ||||
|     actors. A unique context is created on the receiving end for every request | ||||
|     to a remote actor. | ||||
|     """ | ||||
|     chan: Channel | ||||
|     cid: str | ||||
| 
 | ||||
|     # these are the "feeder" channels for delivering | ||||
|     # message values to the local task from the runtime | ||||
|     # msg processing loop. | ||||
|     _recv_chan: trio.MemoryReceiveChannel | ||||
|     _send_chan: trio.MemorySendChannel | ||||
| 
 | ||||
|     _remote_func_type: Optional[str] = None | ||||
| 
 | ||||
|     # only set on the caller side | ||||
|     _portal: Optional['Portal'] = None    # type: ignore # noqa | ||||
|     _result: Optional[Any] = False | ||||
|     _error: Optional[BaseException] = None | ||||
| 
 | ||||
|     # status flags | ||||
|     _cancel_called: bool = False | ||||
|     _cancel_msg: Optional[str] = None | ||||
|     _enter_debugger_on_cancel: bool = True | ||||
|     _started_called: bool = False | ||||
|     _started_received: bool = False | ||||
|     _stream_opened: bool = False | ||||
| 
 | ||||
|     # only set on the callee side | ||||
|     _scope_nursery: Optional[trio.Nursery] = None | ||||
| 
 | ||||
|     _backpressure: bool = False | ||||
|     cancel_scope: trio.CancelScope | ||||
| 
 | ||||
|     async def send_yield(self, data: Any) -> None: | ||||
| 
 | ||||
|         warnings.warn( | ||||
|             "`Context.send_yield()` is now deprecated. " | ||||
|             "Use ``MessageStream.send()``. ", | ||||
|             DeprecationWarning, | ||||
|             stacklevel=2, | ||||
|         ) | ||||
|         await self.chan.send({'yield': data, 'cid': self.cid}) | ||||
| 
 | ||||
|     async def send_stop(self) -> None: | ||||
|         await self.chan.send({'stop': True, 'cid': self.cid}) | ||||
| 
 | ||||
|     async def _maybe_raise_from_remote_msg( | ||||
|         self, | ||||
|         msg: dict[str, Any], | ||||
| 
 | ||||
|     ) -> None: | ||||
|         ''' | ||||
|         (Maybe) unpack and raise a msg error into the local scope | ||||
|         nursery for this context. | ||||
| 
 | ||||
|         Acts as a form of "relay" for a remote error raised | ||||
|         in the corresponding remote callee task. | ||||
| 
 | ||||
|         ''' | ||||
|         error = msg.get('error') | ||||
|         if error: | ||||
|             # If this is an error message from a context opened by | ||||
|             # ``Portal.open_context()`` we want to interrupt any ongoing | ||||
|             # (child) tasks within that context to be notified of the remote | ||||
|             # error relayed here. | ||||
|             # | ||||
|             # The reason we may want to raise the remote error immediately | ||||
|             # is that there is no guarantee the associated local task(s) | ||||
|             # will attempt to read from any locally opened stream any time | ||||
|             # soon. | ||||
|             # | ||||
|             # NOTE: this only applies when | ||||
|             # ``Portal.open_context()`` has been called since it is assumed | ||||
|             # (currently) that other portal APIs (``Portal.run()``, | ||||
|             # ``.run_in_actor()``) do their own error checking at the point | ||||
|             # of the call and result processing. | ||||
|             log.error( | ||||
|                 f'Remote context error for {self.chan.uid}:{self.cid}:\n' | ||||
|                 f'{msg["error"]["tb_str"]}' | ||||
|             ) | ||||
|             error = unpack_error(msg, self.chan) | ||||
|             if ( | ||||
|                 isinstance(error, ContextCancelled) and | ||||
|                 self._cancel_called | ||||
|             ): | ||||
|                 # this is an expected cancel request response message | ||||
|                 # and we don't need to raise it in scope since it will | ||||
|                 # potentially override a real error | ||||
|                 return | ||||
| 
 | ||||
|             self._error = error | ||||
| 
 | ||||
|             # TODO: tempted to **not** do this by-reraising in a | ||||
|             # nursery and instead cancel a surrounding scope, detect | ||||
|             # the cancellation, then lookup the error that was set? | ||||
|             if self._scope_nursery: | ||||
| 
 | ||||
|                 async def raiser(): | ||||
|                     raise self._error from None | ||||
| 
 | ||||
|                 # from trio.testing import wait_all_tasks_blocked | ||||
|                 # await wait_all_tasks_blocked() | ||||
|                 if not self._scope_nursery._closed:  # type: ignore | ||||
|                     self._scope_nursery.start_soon(raiser) | ||||
| 
 | ||||
|     async def cancel( | ||||
|         self, | ||||
|         msg: Optional[str] = None, | ||||
| 
 | ||||
|     ) -> None: | ||||
|         ''' | ||||
|         Cancel this inter-actor-task context. | ||||
| 
 | ||||
|         Request that the far side cancel it's current linked context, | ||||
|         Timeout quickly in an attempt to sidestep 2-generals... | ||||
| 
 | ||||
|         ''' | ||||
|         side = 'caller' if self._portal else 'callee' | ||||
|         if msg: | ||||
|             assert side == 'callee', 'Only callee side can provide cancel msg' | ||||
| 
 | ||||
|         log.cancel(f'Cancelling {side} side of context to {self.chan.uid}') | ||||
| 
 | ||||
|         self._cancel_called = True | ||||
| 
 | ||||
|         if side == 'caller': | ||||
|             if not self._portal: | ||||
|                 raise RuntimeError( | ||||
|                     "No portal found, this is likely a callee side context" | ||||
|                 ) | ||||
| 
 | ||||
|             cid = self.cid | ||||
|             with trio.move_on_after(0.5) as cs: | ||||
|                 cs.shield = True | ||||
|                 log.cancel( | ||||
|                     f"Cancelling stream {cid} to " | ||||
|                     f"{self._portal.channel.uid}") | ||||
| 
 | ||||
|                 # NOTE: we're telling the far end actor to cancel a task | ||||
|                 # corresponding to *this actor*. The far end local channel | ||||
|                 # instance is passed to `Actor._cancel_task()` implicitly. | ||||
|                 await self._portal.run_from_ns('self', '_cancel_task', cid=cid) | ||||
| 
 | ||||
|             if cs.cancelled_caught: | ||||
|                 # XXX: there's no way to know if the remote task was indeed | ||||
|                 # cancelled in the case where the connection is broken or | ||||
|                 # some other network error occurred. | ||||
|                 # if not self._portal.channel.connected(): | ||||
|                 if not self.chan.connected(): | ||||
|                     log.cancel( | ||||
|                         "May have failed to cancel remote task " | ||||
|                         f"{cid} for {self._portal.channel.uid}") | ||||
|                 else: | ||||
|                     log.cancel( | ||||
|                         "Timed out on cancelling remote task " | ||||
|                         f"{cid} for {self._portal.channel.uid}") | ||||
| 
 | ||||
|         # callee side remote task | ||||
|         else: | ||||
|             self._cancel_msg = msg | ||||
| 
 | ||||
|             # TODO: should we have an explicit cancel message | ||||
|             # or is relaying the local `trio.Cancelled` as an | ||||
|             # {'error': trio.Cancelled, cid: "blah"} enough? | ||||
|             # This probably gets into the discussion in | ||||
|             # https://github.com/goodboy/tractor/issues/36 | ||||
|             assert self._scope_nursery | ||||
|             self._scope_nursery.cancel_scope.cancel() | ||||
| 
 | ||||
|         if self._recv_chan: | ||||
|             await self._recv_chan.aclose() | ||||
| 
 | ||||
|     @asynccontextmanager | ||||
|     async def open_stream( | ||||
| 
 | ||||
|         self, | ||||
|         backpressure: Optional[bool] = True, | ||||
|         msg_buffer_size: Optional[int] = None, | ||||
| 
 | ||||
|     ) -> AsyncGenerator[MsgStream, None]: | ||||
|         ''' | ||||
|         Open a ``MsgStream``, a bi-directional stream connected to the | ||||
|         cross-actor (far end) task for this ``Context``. | ||||
| 
 | ||||
|         This context manager must be entered on both the caller and | ||||
|         callee for the stream to logically be considered "connected". | ||||
| 
 | ||||
|         A ``MsgStream`` is currently "one-shot" use, meaning if you | ||||
|         close it you can not "re-open" it for streaming and instead you | ||||
|         must re-establish a new surrounding ``Context`` using | ||||
|         ``Portal.open_context()``.  In the future this may change but | ||||
|         currently there seems to be no obvious reason to support | ||||
|         "re-opening": | ||||
|             - pausing a stream can be done with a message. | ||||
|             - task errors will normally require a restart of the entire | ||||
|               scope of the inter-actor task context due to the nature of | ||||
|               ``trio``'s cancellation system. | ||||
| 
 | ||||
|         ''' | ||||
|         actor = current_actor() | ||||
| 
 | ||||
|         # here we create a mem chan that corresponds to the | ||||
|         # far end caller / callee. | ||||
| 
 | ||||
|         # Likewise if the surrounding context has been cancelled we error here | ||||
|         # since it likely means the surrounding block was exited or | ||||
|         # killed | ||||
| 
 | ||||
|         if self._cancel_called: | ||||
|             task = trio.lowlevel.current_task().name | ||||
|             raise ContextCancelled( | ||||
|                 f'Context around {actor.uid[0]}:{task} was already cancelled!' | ||||
|             ) | ||||
| 
 | ||||
|         if not self._portal and not self._started_called: | ||||
|             raise RuntimeError( | ||||
|                 'Context.started()` must be called before opening a stream' | ||||
|             ) | ||||
| 
 | ||||
|         # NOTE: in one way streaming this only happens on the | ||||
|         # caller side inside `Actor.start_remote_task()` so if you try | ||||
|         # to send a stop from the caller to the callee in the | ||||
|         # single-direction-stream case you'll get a lookup error | ||||
|         # currently. | ||||
|         ctx = actor.get_context( | ||||
|             self.chan, | ||||
|             self.cid, | ||||
|             msg_buffer_size=msg_buffer_size, | ||||
|         ) | ||||
|         ctx._backpressure = backpressure | ||||
|         assert ctx is self | ||||
| 
 | ||||
|         # XXX: If the underlying channel feeder receive mem chan has | ||||
|         # been closed then likely client code has already exited | ||||
|         # a ``.open_stream()`` block prior or there was some other | ||||
|         # unanticipated error or cancellation from ``trio``. | ||||
| 
 | ||||
|         if ctx._recv_chan._closed: | ||||
|             raise trio.ClosedResourceError( | ||||
|                 'The underlying channel for this stream was already closed!?') | ||||
| 
 | ||||
|         async with MsgStream( | ||||
|             ctx=self, | ||||
|             rx_chan=ctx._recv_chan, | ||||
|         ) as stream: | ||||
| 
 | ||||
|             if self._portal: | ||||
|                 self._portal._streams.add(stream) | ||||
| 
 | ||||
|             try: | ||||
|                 self._stream_opened = True | ||||
| 
 | ||||
|                 # XXX: do we need this? | ||||
|                 # ensure we aren't cancelled before yielding the stream | ||||
|                 # await trio.lowlevel.checkpoint() | ||||
|                 yield stream | ||||
| 
 | ||||
|                 # NOTE: Make the stream "one-shot use".  On exit, signal | ||||
|                 # ``trio.EndOfChannel``/``StopAsyncIteration`` to the | ||||
|                 # far end. | ||||
|                 await stream.aclose() | ||||
| 
 | ||||
|             finally: | ||||
|                 if self._portal: | ||||
|                     try: | ||||
|                         self._portal._streams.remove(stream) | ||||
|                     except KeyError: | ||||
|                         log.warning( | ||||
|                             f'Stream was already destroyed?\n' | ||||
|                             f'actor: {self.chan.uid}\n' | ||||
|                             f'ctx id: {self.cid}' | ||||
|                         ) | ||||
| 
 | ||||
|     async def result(self) -> Any: | ||||
|         ''' | ||||
|         From a caller side, wait for and return the final result from | ||||
|         the callee side task. | ||||
| 
 | ||||
|         ''' | ||||
|         assert self._portal, "Context.result() can not be called from callee!" | ||||
|         assert self._recv_chan | ||||
| 
 | ||||
|         if self._result is False: | ||||
| 
 | ||||
|             if not self._recv_chan._closed:  # type: ignore | ||||
| 
 | ||||
|                 # wait for a final context result consuming | ||||
|                 # and discarding any bi dir stream msgs still | ||||
|                 # in transit from the far end. | ||||
|                 while True: | ||||
| 
 | ||||
|                     msg = await self._recv_chan.receive() | ||||
|                     try: | ||||
|                         self._result = msg['return'] | ||||
|                         break | ||||
|                     except KeyError as msgerr: | ||||
| 
 | ||||
|                         if 'yield' in msg: | ||||
|                             # far end task is still streaming to us so discard | ||||
|                             log.warning(f'Discarding stream delivered {msg}') | ||||
|                             continue | ||||
| 
 | ||||
|                         elif 'stop' in msg: | ||||
|                             log.debug('Remote stream terminated') | ||||
|                             continue | ||||
| 
 | ||||
|                         # internal error should never get here | ||||
|                         assert msg.get('cid'), ( | ||||
|                             "Received internal error at portal?") | ||||
| 
 | ||||
|                         raise unpack_error( | ||||
|                             msg, self._portal.channel | ||||
|                         ) from msgerr | ||||
| 
 | ||||
|         return self._result | ||||
| 
 | ||||
|     async def started( | ||||
|         self, | ||||
|         value: Optional[Any] = None | ||||
| 
 | ||||
|     ) -> None: | ||||
|         ''' | ||||
|         Indicate to calling actor's task that this linked context | ||||
|         has started and send ``value`` to the other side. | ||||
| 
 | ||||
|         On the calling side ``value`` is the second item delivered | ||||
|         in the tuple returned by ``Portal.open_context()``. | ||||
| 
 | ||||
|         ''' | ||||
|         if self._portal: | ||||
|             raise RuntimeError( | ||||
|                 f"Caller side context {self} can not call started!") | ||||
| 
 | ||||
|         elif self._started_called: | ||||
|             raise RuntimeError( | ||||
|                 f"called 'started' twice on context with {self.chan.uid}") | ||||
| 
 | ||||
|         await self.chan.send({'started': value, 'cid': self.cid}) | ||||
|         self._started_called = True | ||||
| 
 | ||||
|     # TODO: do we need a restart api? | ||||
|     # async def restart(self) -> None: | ||||
|     #     pass | ||||
| 
 | ||||
| 
 | ||||
| def stream(func: Callable) -> Callable: | ||||
|     """Mark an async function as a streaming routine with ``@stream``. | ||||
| 
 | ||||
| def current_context(): | ||||
|     """Get the current task's context instance. | ||||
|     """ | ||||
|     # annotate | ||||
|     # TODO: apply whatever solution ``mypy`` ends up picking for this: | ||||
|     # https://github.com/python/mypy/issues/2087#issuecomment-769266912 | ||||
|     func._tractor_stream_function = True  # type: ignore | ||||
|     return _context.get() | ||||
| 
 | ||||
| 
 | ||||
| def stream(func): | ||||
|     """Mark an async function as a streaming routine with ``@stream``. | ||||
|     """ | ||||
|     func._tractor_stream_function = True | ||||
|     sig = inspect.signature(func) | ||||
|     params = sig.parameters | ||||
|     if 'stream' not in params and 'ctx' in params: | ||||
|         warnings.warn( | ||||
|             "`@tractor.stream decorated funcs should now declare a `stream` " | ||||
|             " arg, `ctx` is now designated for use with @tractor.context", | ||||
|             DeprecationWarning, | ||||
|             stacklevel=2, | ||||
|         ) | ||||
| 
 | ||||
|     if ( | ||||
|         'ctx' not in params and | ||||
|         'to_trio' not in params and | ||||
|         'stream' not in params | ||||
|     ): | ||||
|     if 'ctx' not in sig.parameters: | ||||
|         raise TypeError( | ||||
|             "The first argument to the stream function " | ||||
|             f"{func.__name__} must be `ctx: tractor.Context` " | ||||
|             "(Or ``to_trio`` if using ``asyncio`` in guest mode)." | ||||
|         ) | ||||
|     return func | ||||
| 
 | ||||
| 
 | ||||
| def context(func: Callable) -> Callable: | ||||
|     """Mark an async function as a streaming routine with ``@context``. | ||||
| 
 | ||||
|     """ | ||||
|     # annotate | ||||
|     # TODO: apply whatever solution ``mypy`` ends up picking for this: | ||||
|     # https://github.com/python/mypy/issues/2087#issuecomment-769266912 | ||||
|     func._tractor_context_function = True  # type: ignore | ||||
| 
 | ||||
|     sig = inspect.signature(func) | ||||
|     params = sig.parameters | ||||
|     if 'ctx' not in params: | ||||
|         raise TypeError( | ||||
|             "The first argument to the context function " | ||||
|             f"{func.__name__} must be `ctx: tractor.Context`" | ||||
|         ) | ||||
|     return func | ||||
|  |  | |||
|  | @ -1,40 +1,18 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| ``trio`` inspired apis and helpers | ||||
| 
 | ||||
| """ | ||||
| from contextlib import asynccontextmanager as acm | ||||
| from functools import partial | ||||
| import inspect | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     TYPE_CHECKING, | ||||
| ) | ||||
| import multiprocessing as mp | ||||
| from typing import Tuple, List, Dict, Optional | ||||
| import typing | ||||
| import warnings | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| from async_generator import asynccontextmanager | ||||
| 
 | ||||
| from ._debug import maybe_wait_for_debugger | ||||
| from ._state import current_actor, is_main_process | ||||
| from .log import get_logger, get_loglevel | ||||
| from ._runtime import Actor | ||||
| from ._actor import Actor | ||||
| from ._portal import Portal | ||||
| from ._exceptions import is_multi_cancelled | ||||
| from ._root import open_root_actor | ||||
|  | @ -42,98 +20,52 @@ from . import _state | |||
| from . import _spawn | ||||
| 
 | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     import multiprocessing as mp | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| _default_bind_addr: tuple[str, int] = ('127.0.0.1', 0) | ||||
| _default_bind_addr: Tuple[str, int] = ('127.0.0.1', 0) | ||||
| 
 | ||||
| 
 | ||||
| class ActorNursery: | ||||
|     ''' | ||||
|     The fundamental actor supervision construct: spawn and manage | ||||
|     explicit lifetime and capability restricted, bootstrapped, | ||||
|     ``trio.run()`` scheduled sub-processes. | ||||
| 
 | ||||
|     Though the concept of a "process nursery" is different in complexity | ||||
|     and slightly different in semantics then a tradtional single | ||||
|     threaded task nursery, much of the interface is the same. New | ||||
|     processes each require a top level "parent" or "root" task which is | ||||
|     itself no different then any task started by a tradtional | ||||
|     ``trio.Nursery``. The main difference is that each "actor" (a | ||||
|     process + ``trio.run()``) contains a full, paralell executing | ||||
|     ``trio``-task-tree. The following super powers ensue: | ||||
| 
 | ||||
|     - starting tasks in a child actor are completely independent of | ||||
|       tasks started in the current process. They execute in *parallel* | ||||
|       relative to tasks in the current process and are scheduled by their | ||||
|       own actor's ``trio`` run loop. | ||||
|     - tasks scheduled in a remote process still maintain an SC protocol | ||||
|       across memory boundaries using a so called "structured concurrency | ||||
|       dialogue protocol" which ensures task-hierarchy-lifetimes are linked. | ||||
|     - remote tasks (in another actor) can fail and relay failure back to | ||||
|       the caller task (in some other actor) via a seralized | ||||
|       ``RemoteActorError`` which means no zombie process or RPC | ||||
|       initiated task can ever go off on its own. | ||||
| 
 | ||||
|     ''' | ||||
|     """Spawn scoped subprocess actors. | ||||
|     """ | ||||
|     def __init__( | ||||
|         self, | ||||
|         actor: Actor, | ||||
|         ria_nursery: trio.Nursery, | ||||
|         da_nursery: trio.Nursery, | ||||
|         errors: dict[tuple[str, str], BaseException], | ||||
|         errors: Dict[Tuple[str, str], Exception], | ||||
|     ) -> None: | ||||
|         # self.supervisor = supervisor  # TODO | ||||
|         self._actor: Actor = actor | ||||
|         self._ria_nursery = ria_nursery | ||||
|         self._da_nursery = da_nursery | ||||
|         self._children: dict[ | ||||
|             tuple[str, str], | ||||
|             tuple[ | ||||
|                 Actor, | ||||
|                 trio.Process | mp.Process, | ||||
|                 Optional[Portal], | ||||
|             ] | ||||
|         self._children: Dict[ | ||||
|             Tuple[str, str], | ||||
|             Tuple[Actor, mp.Process, Optional[Portal]] | ||||
|         ] = {} | ||||
|         # portals spawned with ``run_in_actor()`` are | ||||
|         # cancelled when their "main" result arrives | ||||
|         self._cancel_after_result_on_exit: set = set() | ||||
|         self.cancelled: bool = False | ||||
|         self._join_procs = trio.Event() | ||||
|         self._at_least_one_child_in_debug: bool = False | ||||
|         self.errors = errors | ||||
|         self.exited = trio.Event() | ||||
| 
 | ||||
|     async def start_actor( | ||||
|         self, | ||||
|         name: str, | ||||
|         *, | ||||
|         bind_addr: tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: list[str] | None = None, | ||||
|         enable_modules: list[str] | None = None, | ||||
|         loglevel: str | None = None,  # set log level per subactor | ||||
|         nursery: trio.Nursery | None = None, | ||||
|         debug_mode: Optional[bool] | None = None, | ||||
|         infect_asyncio: bool = False, | ||||
|         bind_addr: Tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: List[str] = None, | ||||
|         enable_modules: List[str] = None, | ||||
|         loglevel: str = None,  # set log level per subactor | ||||
|         nursery: trio.Nursery = None, | ||||
|     ) -> Portal: | ||||
|         ''' | ||||
|         Start a (daemon) actor: an process that has no designated | ||||
|         "main task" besides the runtime. | ||||
| 
 | ||||
|         ''' | ||||
|         loglevel = loglevel or self._actor.loglevel or get_loglevel() | ||||
| 
 | ||||
|         # configure and pass runtime state | ||||
|         _rtv = _state._runtime_vars.copy() | ||||
|         _rtv['_is_root'] = False | ||||
| 
 | ||||
|         # allow setting debug policy per actor | ||||
|         if debug_mode is not None: | ||||
|             _rtv['_debug_mode'] = debug_mode | ||||
|             self._at_least_one_child_in_debug = True | ||||
| 
 | ||||
|         enable_modules = enable_modules or [] | ||||
| 
 | ||||
|         if rpc_module_paths: | ||||
|  | @ -170,25 +102,18 @@ class ActorNursery: | |||
|                 bind_addr, | ||||
|                 parent_addr, | ||||
|                 _rtv,  # run time vars | ||||
|                 infect_asyncio=infect_asyncio, | ||||
|             ) | ||||
|         ) | ||||
| 
 | ||||
|     async def run_in_actor( | ||||
|         self, | ||||
| 
 | ||||
|         fn: typing.Callable, | ||||
|         *, | ||||
| 
 | ||||
|         name: Optional[str] = None, | ||||
|         bind_addr: tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: list[str] | None = None, | ||||
|         enable_modules: list[str] | None = None, | ||||
|         loglevel: str | None = None,  # set log level per subactor | ||||
|         infect_asyncio: bool = False, | ||||
| 
 | ||||
|         bind_addr: Tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: Optional[List[str]] = None, | ||||
|         loglevel: str = None,  # set log level per subactor | ||||
|         **kwargs,  # explicit args to ``fn`` | ||||
| 
 | ||||
|     ) -> Portal: | ||||
|         """Spawn a new actor, run a lone task, then terminate the actor and | ||||
|         return its result. | ||||
|  | @ -205,23 +130,12 @@ class ActorNursery: | |||
| 
 | ||||
|         portal = await self.start_actor( | ||||
|             name, | ||||
|             enable_modules=[mod_path] + ( | ||||
|                 enable_modules or rpc_module_paths or [] | ||||
|             ), | ||||
|             rpc_module_paths=[mod_path] + (rpc_module_paths or []), | ||||
|             bind_addr=bind_addr, | ||||
|             loglevel=loglevel, | ||||
|             # use the run_in_actor nursery | ||||
|             nursery=self._ria_nursery, | ||||
|             infect_asyncio=infect_asyncio, | ||||
|         ) | ||||
| 
 | ||||
|         # XXX: don't allow stream funcs | ||||
|         if not ( | ||||
|             inspect.iscoroutinefunction(fn) and | ||||
|             not getattr(fn, '_tractor_stream_function', False) | ||||
|         ): | ||||
|             raise TypeError(f'{fn} must be an async function!') | ||||
| 
 | ||||
|         # this marks the actor to be cancelled after its portal result | ||||
|         # is retreived, see logic in `open_nursery()` below. | ||||
|         self._cancel_after_result_on_exit.add(portal) | ||||
|  | @ -241,27 +155,18 @@ class ActorNursery: | |||
|         """ | ||||
|         self.cancelled = True | ||||
| 
 | ||||
|         log.cancel(f"Cancelling nursery in {self._actor.uid}") | ||||
|         log.warning(f"Cancelling nursery in {self._actor.uid}") | ||||
|         with trio.move_on_after(3) as cs: | ||||
| 
 | ||||
|             async with trio.open_nursery() as nursery: | ||||
| 
 | ||||
|                 for subactor, proc, portal in self._children.values(): | ||||
| 
 | ||||
|                     # TODO: are we ever even going to use this or | ||||
|                     # is the spawning backend responsible for such | ||||
|                     # things? I'm thinking latter. | ||||
|                     if hard_kill: | ||||
|                         proc.terminate() | ||||
| 
 | ||||
|                     else: | ||||
|                         if portal is None:  # actor hasn't fully spawned yet | ||||
|                             event = self._actor._peer_connected[subactor.uid] | ||||
|                             log.warning( | ||||
|                                 f"{subactor.uid} wasn't finished spawning?") | ||||
| 
 | ||||
|                             await event.wait() | ||||
| 
 | ||||
|                             # channel/portal should now be up | ||||
|                             _, _, portal = self._children[subactor.uid] | ||||
| 
 | ||||
|  | @ -279,7 +184,6 @@ class ActorNursery: | |||
| 
 | ||||
|                         # spawn cancel tasks for each sub-actor | ||||
|                         assert portal | ||||
|                         if portal.channel.connected(): | ||||
|                         nursery.start_soon(portal.cancel_actor) | ||||
| 
 | ||||
|         # if we cancelled the cancel (we hung cancelling remote actors) | ||||
|  | @ -295,17 +199,13 @@ class ActorNursery: | |||
|         self._join_procs.set() | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| @asynccontextmanager | ||||
| async def _open_and_supervise_one_cancels_all_nursery( | ||||
|     actor: Actor, | ||||
| 
 | ||||
| ) -> typing.AsyncGenerator[ActorNursery, None]: | ||||
| 
 | ||||
|     # TODO: yay or nay? | ||||
|     __tracebackhide__ = True | ||||
| 
 | ||||
|     # the collection of errors retreived from spawned sub-actors | ||||
|     errors: dict[tuple[str, str], BaseException] = {} | ||||
|     errors: Dict[Tuple[str, str], Exception] = {} | ||||
| 
 | ||||
|     # This is the outermost level "deamon actor" nursery. It is awaited | ||||
|     # **after** the below inner "run in actor nursery". This allows for | ||||
|  | @ -326,7 +226,6 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|             # As such if the strategy propagates any error(s) upwards | ||||
|             # the above "daemon actor" nursery will be notified. | ||||
|             async with trio.open_nursery() as ria_nursery: | ||||
| 
 | ||||
|                 anursery = ActorNursery( | ||||
|                     actor, | ||||
|                     ria_nursery, | ||||
|  | @ -337,85 +236,64 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|                     # spawning of actors happens in the caller's scope | ||||
|                     # after we yield upwards | ||||
|                     yield anursery | ||||
| 
 | ||||
|                     # When we didn't error in the caller's scope, | ||||
|                     # signal all process-monitor-tasks to conduct | ||||
|                     # the "hard join phase". | ||||
|                     log.runtime( | ||||
|                     log.debug( | ||||
|                         f"Waiting on subactors {anursery._children} " | ||||
|                         "to complete" | ||||
|                     ) | ||||
|                     anursery._join_procs.set() | ||||
| 
 | ||||
|                 except BaseException as inner_err: | ||||
|                     errors[actor.uid] = inner_err | ||||
| 
 | ||||
|                     # If we error in the root but the debugger is | ||||
|                     # engaged we don't want to prematurely kill (and | ||||
|                     # thus clobber access to) the local tty since it | ||||
|                     # will make the pdb repl unusable. | ||||
|                     # Instead try to wait for pdb to be released before | ||||
|                     # tearing down. | ||||
|                     await maybe_wait_for_debugger( | ||||
|                         child_in_debug=anursery._at_least_one_child_in_debug | ||||
|                     ) | ||||
| 
 | ||||
|                 except BaseException as err: | ||||
|                     # if the caller's scope errored then we activate our | ||||
|                     # one-cancels-all supervisor strategy (don't | ||||
|                     # worry more are coming). | ||||
|                     anursery._join_procs.set() | ||||
| 
 | ||||
|                     try: | ||||
|                         # XXX: hypothetically an error could be | ||||
|                         # raised and then a cancel signal shows up | ||||
|                         # slightly after in which case the `else:` | ||||
|                         # block here might not complete?  For now, | ||||
|                         # shield both. | ||||
|                         with trio.CancelScope(shield=True): | ||||
|                         etype = type(inner_err) | ||||
|                             etype = type(err) | ||||
|                             if etype in ( | ||||
|                                 trio.Cancelled, | ||||
|                                 KeyboardInterrupt | ||||
|                             ) or ( | ||||
|                             is_multi_cancelled(inner_err) | ||||
|                                 is_multi_cancelled(err) | ||||
|                             ): | ||||
|                             log.cancel( | ||||
|                                 log.warning( | ||||
|                                     f"Nursery for {current_actor().uid} " | ||||
|                                     f"was cancelled with {etype}") | ||||
|                             else: | ||||
|                                 log.exception( | ||||
|                                     f"Nursery for {current_actor().uid} " | ||||
|                                 f"errored with") | ||||
|                                     f"errored with {err}, ") | ||||
| 
 | ||||
|                             # cancel all subactors | ||||
|                             await anursery.cancel() | ||||
| 
 | ||||
|                     except trio.MultiError as merr: | ||||
|                         # If we receive additional errors while waiting on | ||||
|                         # remaining subactors that were cancelled, | ||||
|                         # aggregate those errors with the original error | ||||
|                         # that triggered this teardown. | ||||
|                         if err not in merr.exceptions: | ||||
|                             raise trio.MultiError(merr.exceptions + [err]) | ||||
|                     else: | ||||
|                         raise | ||||
| 
 | ||||
|                 # Last bit before first nursery block ends in the case | ||||
|                 # where we didn't error in the caller's scope | ||||
|                 log.debug("Waiting on all subactors to complete") | ||||
|                 anursery._join_procs.set() | ||||
| 
 | ||||
|                 # ria_nursery scope end | ||||
| 
 | ||||
|         # TODO: this is the handler around the ``.run_in_actor()`` | ||||
|         # nursery. Ideally we can drop this entirely in the future as | ||||
|         # the whole ``.run_in_actor()`` API should be built "on top of" | ||||
|         # this lower level spawn-request-cancel "daemon actor" API where | ||||
|         # a local in-actor task nursery is used with one-to-one task | ||||
|         # + `await Portal.run()` calls and the results/errors are | ||||
|         # handled directly (inline) and errors by the local nursery. | ||||
|         except ( | ||||
|             Exception, | ||||
|             BaseExceptionGroup, | ||||
|             trio.Cancelled | ||||
| 
 | ||||
|         ) as err: | ||||
| 
 | ||||
|             # XXX: yet another guard before allowing the cancel | ||||
|             # sequence in case a (single) child is in debug. | ||||
|             await maybe_wait_for_debugger( | ||||
|                 child_in_debug=anursery._at_least_one_child_in_debug | ||||
|             ) | ||||
| 
 | ||||
|         # XXX: do we need a `trio.Cancelled` catch here as well? | ||||
|         except (Exception, trio.MultiError, trio.Cancelled) as err: | ||||
|             # If actor-local error was raised while waiting on | ||||
|             # ".run_in_actor()" actors then we also want to cancel all | ||||
|             # remaining sub-actors (due to our lone strategy: | ||||
|             # one-cancels-all). | ||||
|             log.cancel(f"Nursery cancelling due to {err}") | ||||
|             log.warning(f"Nursery cancelling due to {err}") | ||||
|             if anursery._children: | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     await anursery.cancel() | ||||
|  | @ -432,26 +310,22 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|                     with trio.CancelScope(shield=True): | ||||
|                         await anursery.cancel() | ||||
| 
 | ||||
|                 # use `BaseExceptionGroup` as needed | ||||
|                 # use `MultiError` as needed | ||||
|                 if len(errors) > 1: | ||||
|                     raise BaseExceptionGroup( | ||||
|                         'tractor.ActorNursery errored with', | ||||
|                         tuple(errors.values()), | ||||
|                     ) | ||||
|                     raise trio.MultiError(tuple(errors.values())) | ||||
|                 else: | ||||
|                     raise list(errors.values())[0] | ||||
| 
 | ||||
|         # da_nursery scope end - nursery checkpoint | ||||
|     # final exit | ||||
|         # ria_nursery scope end - nursery checkpoint | ||||
| 
 | ||||
|     # after nursery exit | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| @asynccontextmanager | ||||
| async def open_nursery( | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> typing.AsyncGenerator[ActorNursery, None]: | ||||
|     ''' | ||||
|     Create and yield a new ``ActorNursery`` to be used for spawning | ||||
|     """Create and yield a new ``ActorNursery`` to be used for spawning | ||||
|     structured concurrent subactors. | ||||
| 
 | ||||
|     When an actor is spawned a new trio task is started which | ||||
|  | @ -463,8 +337,7 @@ async def open_nursery( | |||
|     close it. It turns out this approach is probably more correct | ||||
|     anyway since it is more clear from the following nested nurseries | ||||
|     which cancellation scopes correspond to each spawned subactor set. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     implicit_runtime = False | ||||
| 
 | ||||
|     actor = current_actor(err_on_no_runtime=False) | ||||
|  | @ -472,8 +345,7 @@ async def open_nursery( | |||
|     try: | ||||
|         if actor is None and is_main_process(): | ||||
| 
 | ||||
|             # if we are the parent process start the | ||||
|             # actor runtime implicitly | ||||
|             # if we are the parent process start the actor runtime implicitly | ||||
|             log.info("Starting actor runtime!") | ||||
| 
 | ||||
|             # mark us for teardown on exit | ||||
|  | @ -482,23 +354,19 @@ async def open_nursery( | |||
|             async with open_root_actor(**kwargs) as actor: | ||||
|                 assert actor is current_actor() | ||||
| 
 | ||||
|                 try: | ||||
|                 async with _open_and_supervise_one_cancels_all_nursery( | ||||
|                     actor | ||||
|                 ) as anursery: | ||||
| 
 | ||||
|                     yield anursery | ||||
|                 finally: | ||||
|                     anursery.exited.set() | ||||
| 
 | ||||
|         else:  # sub-nursery case | ||||
| 
 | ||||
|             try: | ||||
|             async with _open_and_supervise_one_cancels_all_nursery( | ||||
|                 actor | ||||
|             ) as anursery: | ||||
| 
 | ||||
|                 yield anursery | ||||
|             finally: | ||||
|                 anursery.exited.set() | ||||
| 
 | ||||
|     finally: | ||||
|         log.debug("Nursery teardown complete") | ||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue