forked from goodboy/tractor
				
			Compare commits
	
		
			1 Commits 
		
	
	
		
			master
			...
			stream_clo
		
	
	| Author | SHA1 | Date | 
|---|---|---|
|  | 6f9ef99776 | 
|  | @ -1,131 +1,41 @@ | ||||||
| name: CI | name: CI | ||||||
| 
 | 
 | ||||||
| on: | on: push | ||||||
|   # any time someone pushes a new branch to origin |  | ||||||
|   push: |  | ||||||
| 
 |  | ||||||
|   # Allows you to run this workflow manually from the Actions tab |  | ||||||
|   workflow_dispatch: |  | ||||||
| 
 | 
 | ||||||
| jobs: | jobs: | ||||||
| 
 |  | ||||||
|   mypy: |   mypy: | ||||||
|     name: 'MyPy' |     name: 'MyPy' | ||||||
|     runs-on: ubuntu-latest |     runs-on: ubuntu-latest | ||||||
| 
 |  | ||||||
|     steps: |     steps: | ||||||
|       - name: Checkout |       - name: Checkout | ||||||
|         uses: actions/checkout@v2 |         uses: actions/checkout@v2 | ||||||
| 
 |  | ||||||
|       - name: Setup python |       - name: Setup python | ||||||
|         uses: actions/setup-python@v2 |         uses: actions/setup-python@v2 | ||||||
|         with: |         with: | ||||||
|           python-version: '3.10' |           python-version: '3.8' | ||||||
| 
 |  | ||||||
|       - name: Install dependencies |       - name: Install dependencies | ||||||
|         run: pip install -U . --upgrade-strategy eager -r requirements-test.txt |         run: pip install -U . --upgrade-strategy eager | ||||||
| 
 |  | ||||||
|       - name: Run MyPy check |       - name: Run MyPy check | ||||||
|         run: mypy tractor/ --ignore-missing-imports --show-traceback |         run: mypy tractor/ --ignore-missing-imports | ||||||
| 
 | 
 | ||||||
|   # test that we can generate a software distribution and install it |   testing: | ||||||
|   # thus avoid missing file issues after packaging. |  | ||||||
|   sdist-linux: |  | ||||||
|     name: 'sdist' |  | ||||||
|     runs-on: ubuntu-latest |  | ||||||
| 
 |  | ||||||
|     steps: |  | ||||||
|       - name: Checkout |  | ||||||
|         uses: actions/checkout@v2 |  | ||||||
| 
 |  | ||||||
|       - name: Setup python |  | ||||||
|         uses: actions/setup-python@v2 |  | ||||||
|         with: |  | ||||||
|           python-version: '3.10' |  | ||||||
| 
 |  | ||||||
|       - name: Build sdist |  | ||||||
|         run: python setup.py sdist --formats=zip |  | ||||||
| 
 |  | ||||||
|       - name: Install sdist from .zips |  | ||||||
|         run: python -m pip install dist/*.zip |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|   testing-linux: |  | ||||||
|     name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' |     name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' | ||||||
|     timeout-minutes: 10 |     timeout-minutes: 10 | ||||||
|     runs-on: ${{ matrix.os }} |     runs-on: ${{ matrix.os }} | ||||||
| 
 |  | ||||||
|     strategy: |     strategy: | ||||||
|       fail-fast: false |       fail-fast: false | ||||||
|       matrix: |       matrix: | ||||||
|         os: [ubuntu-latest] |         os: [ubuntu-latest, windows-latest] | ||||||
|         python: ['3.10'] |         python: ['3.7', '3.8', '3.9'] | ||||||
|         spawn_backend: [ |         spawn_backend: ['trio', 'mp'] | ||||||
|           'trio', |  | ||||||
|           'mp_spawn', |  | ||||||
|           'mp_forkserver', |  | ||||||
|         ] |  | ||||||
| 
 |  | ||||||
|     steps: |     steps: | ||||||
| 
 |  | ||||||
|       - name: Checkout |       - name: Checkout | ||||||
|         uses: actions/checkout@v2 |         uses: actions/checkout@v2 | ||||||
| 
 |  | ||||||
|       - name: Setup python |       - name: Setup python | ||||||
|         uses: actions/setup-python@v2 |         uses: actions/setup-python@v2 | ||||||
|         with: |         with: | ||||||
|           python-version: '${{ matrix.python }}' |           python-version: '${{ matrix.python }}' | ||||||
| 
 |  | ||||||
|       - name: Install dependencies |       - name: Install dependencies | ||||||
|         run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager |         run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||||
| 
 |  | ||||||
|       - name: List dependencies |  | ||||||
|         run: pip list |  | ||||||
| 
 |  | ||||||
|       - name: Run tests |       - name: Run tests | ||||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx |         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs | ||||||
| 
 |  | ||||||
|   # We skip 3.10 on windows for now due to not having any collabs to |  | ||||||
|   # debug the CI failures. Anyone wanting to hack and solve them is very |  | ||||||
|   # welcome, but our primary user base is not using that OS. |  | ||||||
| 
 |  | ||||||
|   # TODO: use job filtering to accomplish instead of repeated |  | ||||||
|   # boilerplate as is above XD: |  | ||||||
|   # - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows |  | ||||||
|   # - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix |  | ||||||
|   # - https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idif |  | ||||||
|   # testing-windows: |  | ||||||
|   #   name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' |  | ||||||
|   #   timeout-minutes: 12 |  | ||||||
|   #   runs-on: ${{ matrix.os }} |  | ||||||
| 
 |  | ||||||
|   #   strategy: |  | ||||||
|   #     fail-fast: false |  | ||||||
|   #     matrix: |  | ||||||
|   #       os: [windows-latest] |  | ||||||
|   #       python: ['3.10'] |  | ||||||
|   #       spawn_backend: ['trio', 'mp'] |  | ||||||
| 
 |  | ||||||
|   #   steps: |  | ||||||
| 
 |  | ||||||
|   #     - name: Checkout |  | ||||||
|   #       uses: actions/checkout@v2 |  | ||||||
| 
 |  | ||||||
|   #     - name: Setup python |  | ||||||
|   #       uses: actions/setup-python@v2 |  | ||||||
|   #       with: |  | ||||||
|   #         python-version: '${{ matrix.python }}' |  | ||||||
| 
 |  | ||||||
|   #     - name: Install dependencies |  | ||||||
|   #       run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager |  | ||||||
| 
 |  | ||||||
|   #     # TODO: pretty sure this solves debugger deps-issues on windows, but it needs to |  | ||||||
|   #     # be verified by someone with a native setup. |  | ||||||
|   #     # - name: Force pyreadline3 |  | ||||||
|   #     #   run: pip uninstall pyreadline; pip install -U pyreadline3 |  | ||||||
| 
 |  | ||||||
|   #     - name: List dependencies |  | ||||||
|   #       run: pip list |  | ||||||
| 
 |  | ||||||
|   #     - name: Run tests |  | ||||||
|   #       run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx |  | ||||||
|  |  | ||||||
							
								
								
									
										147
									
								
								LICENSE
								
								
								
								
							
							
						
						
									
										147
									
								
								LICENSE
								
								
								
								
							|  | @ -1,21 +1,23 @@ | ||||||
|                     GNU AFFERO GENERAL PUBLIC LICENSE |                     GNU GENERAL PUBLIC LICENSE | ||||||
|                        Version 3, 19 November 2007 |                        Version 3, 29 June 2007 | ||||||
| 
 | 
 | ||||||
|  Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/> |  Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | ||||||
|  Everyone is permitted to copy and distribute verbatim copies |  Everyone is permitted to copy and distribute verbatim copies | ||||||
|  of this license document, but changing it is not allowed. |  of this license document, but changing it is not allowed. | ||||||
| 
 | 
 | ||||||
|                             Preamble |                             Preamble | ||||||
| 
 | 
 | ||||||
|   The GNU Affero General Public License is a free, copyleft license for |   The GNU General Public License is a free, copyleft license for | ||||||
| software and other kinds of works, specifically designed to ensure | software and other kinds of works. | ||||||
| cooperation with the community in the case of network server software. |  | ||||||
| 
 | 
 | ||||||
|   The licenses for most software and other practical works are designed |   The licenses for most software and other practical works are designed | ||||||
| to take away your freedom to share and change the works.  By contrast, | to take away your freedom to share and change the works.  By contrast, | ||||||
| our General Public Licenses are intended to guarantee your freedom to | the GNU General Public License is intended to guarantee your freedom to | ||||||
| share and change all versions of a program--to make sure it remains free | share and change all versions of a program--to make sure it remains free | ||||||
| software for all its users. | software for all its users.  We, the Free Software Foundation, use the | ||||||
|  | GNU General Public License for most of our software; it applies also to | ||||||
|  | any other work released this way by its authors.  You can apply it to | ||||||
|  | your programs, too. | ||||||
| 
 | 
 | ||||||
|   When we speak of free software, we are referring to freedom, not |   When we speak of free software, we are referring to freedom, not | ||||||
| price.  Our General Public Licenses are designed to make sure that you | price.  Our General Public Licenses are designed to make sure that you | ||||||
|  | @ -24,34 +26,44 @@ them if you wish), that you receive source code or can get it if you | ||||||
| want it, that you can change the software or use pieces of it in new | want it, that you can change the software or use pieces of it in new | ||||||
| free programs, and that you know you can do these things. | free programs, and that you know you can do these things. | ||||||
| 
 | 
 | ||||||
|   Developers that use our General Public Licenses protect your rights |   To protect your rights, we need to prevent others from denying you | ||||||
| with two steps: (1) assert copyright on the software, and (2) offer | these rights or asking you to surrender the rights.  Therefore, you have | ||||||
| you this License which gives you legal permission to copy, distribute | certain responsibilities if you distribute copies of the software, or if | ||||||
| and/or modify the software. | you modify it: responsibilities to respect the freedom of others. | ||||||
| 
 | 
 | ||||||
|   A secondary benefit of defending all users' freedom is that |   For example, if you distribute copies of such a program, whether | ||||||
| improvements made in alternate versions of the program, if they | gratis or for a fee, you must pass on to the recipients the same | ||||||
| receive widespread use, become available for other developers to | freedoms that you received.  You must make sure that they, too, receive | ||||||
| incorporate.  Many developers of free software are heartened and | or can get the source code.  And you must show them these terms so they | ||||||
| encouraged by the resulting cooperation.  However, in the case of | know their rights. | ||||||
| software used on network servers, this result may fail to come about. |  | ||||||
| The GNU General Public License permits making a modified version and |  | ||||||
| letting the public access it on a server without ever releasing its |  | ||||||
| source code to the public. |  | ||||||
| 
 | 
 | ||||||
|   The GNU Affero General Public License is designed specifically to |   Developers that use the GNU GPL protect your rights with two steps: | ||||||
| ensure that, in such cases, the modified source code becomes available | (1) assert copyright on the software, and (2) offer you this License | ||||||
| to the community.  It requires the operator of a network server to | giving you legal permission to copy, distribute and/or modify it. | ||||||
| provide the source code of the modified version running there to the |  | ||||||
| users of that server.  Therefore, public use of a modified version, on |  | ||||||
| a publicly accessible server, gives the public access to the source |  | ||||||
| code of the modified version. |  | ||||||
| 
 | 
 | ||||||
|   An older license, called the Affero General Public License and |   For the developers' and authors' protection, the GPL clearly explains | ||||||
| published by Affero, was designed to accomplish similar goals.  This is | that there is no warranty for this free software.  For both users' and | ||||||
| a different license, not a version of the Affero GPL, but Affero has | authors' sake, the GPL requires that modified versions be marked as | ||||||
| released a new version of the Affero GPL which permits relicensing under | changed, so that their problems will not be attributed erroneously to | ||||||
| this license. | authors of previous versions. | ||||||
|  | 
 | ||||||
|  |   Some devices are designed to deny users access to install or run | ||||||
|  | modified versions of the software inside them, although the manufacturer | ||||||
|  | can do so.  This is fundamentally incompatible with the aim of | ||||||
|  | protecting users' freedom to change the software.  The systematic | ||||||
|  | pattern of such abuse occurs in the area of products for individuals to | ||||||
|  | use, which is precisely where it is most unacceptable.  Therefore, we | ||||||
|  | have designed this version of the GPL to prohibit the practice for those | ||||||
|  | products.  If such problems arise substantially in other domains, we | ||||||
|  | stand ready to extend this provision to those domains in future versions | ||||||
|  | of the GPL, as needed to protect the freedom of users. | ||||||
|  | 
 | ||||||
|  |   Finally, every program is threatened constantly by software patents. | ||||||
|  | States should not allow patents to restrict development and use of | ||||||
|  | software on general-purpose computers, but in those that do, we wish to | ||||||
|  | avoid the special danger that patents applied to a free program could | ||||||
|  | make it effectively proprietary.  To prevent this, the GPL assures that | ||||||
|  | patents cannot be used to render the program non-free. | ||||||
| 
 | 
 | ||||||
|   The precise terms and conditions for copying, distribution and |   The precise terms and conditions for copying, distribution and | ||||||
| modification follow. | modification follow. | ||||||
|  | @ -60,7 +72,7 @@ modification follow. | ||||||
| 
 | 
 | ||||||
|   0. Definitions. |   0. Definitions. | ||||||
| 
 | 
 | ||||||
|   "This License" refers to version 3 of the GNU Affero General Public License. |   "This License" refers to version 3 of the GNU General Public License. | ||||||
| 
 | 
 | ||||||
|   "Copyright" also means copyright-like laws that apply to other kinds of |   "Copyright" also means copyright-like laws that apply to other kinds of | ||||||
| works, such as semiconductor masks. | works, such as semiconductor masks. | ||||||
|  | @ -537,45 +549,35 @@ to collect a royalty for further conveying from those to whom you convey | ||||||
| the Program, the only way you could satisfy both those terms and this | the Program, the only way you could satisfy both those terms and this | ||||||
| License would be to refrain entirely from conveying the Program. | License would be to refrain entirely from conveying the Program. | ||||||
| 
 | 
 | ||||||
|   13. Remote Network Interaction; Use with the GNU General Public License. |   13. Use with the GNU Affero General Public License. | ||||||
| 
 |  | ||||||
|   Notwithstanding any other provision of this License, if you modify the |  | ||||||
| Program, your modified version must prominently offer all users |  | ||||||
| interacting with it remotely through a computer network (if your version |  | ||||||
| supports such interaction) an opportunity to receive the Corresponding |  | ||||||
| Source of your version by providing access to the Corresponding Source |  | ||||||
| from a network server at no charge, through some standard or customary |  | ||||||
| means of facilitating copying of software.  This Corresponding Source |  | ||||||
| shall include the Corresponding Source for any work covered by version 3 |  | ||||||
| of the GNU General Public License that is incorporated pursuant to the |  | ||||||
| following paragraph. |  | ||||||
| 
 | 
 | ||||||
|   Notwithstanding any other provision of this License, you have |   Notwithstanding any other provision of this License, you have | ||||||
| permission to link or combine any covered work with a work licensed | permission to link or combine any covered work with a work licensed | ||||||
| under version 3 of the GNU General Public License into a single | under version 3 of the GNU Affero General Public License into a single | ||||||
| combined work, and to convey the resulting work.  The terms of this | combined work, and to convey the resulting work.  The terms of this | ||||||
| License will continue to apply to the part which is the covered work, | License will continue to apply to the part which is the covered work, | ||||||
| but the work with which it is combined will remain governed by version | but the special requirements of the GNU Affero General Public License, | ||||||
| 3 of the GNU General Public License. | section 13, concerning interaction through a network will apply to the | ||||||
|  | combination as such. | ||||||
| 
 | 
 | ||||||
|   14. Revised Versions of this License. |   14. Revised Versions of this License. | ||||||
| 
 | 
 | ||||||
|   The Free Software Foundation may publish revised and/or new versions of |   The Free Software Foundation may publish revised and/or new versions of | ||||||
| the GNU Affero General Public License from time to time.  Such new versions | the GNU General Public License from time to time.  Such new versions will | ||||||
| will be similar in spirit to the present version, but may differ in detail to | be similar in spirit to the present version, but may differ in detail to | ||||||
| address new problems or concerns. | address new problems or concerns. | ||||||
| 
 | 
 | ||||||
|   Each version is given a distinguishing version number.  If the |   Each version is given a distinguishing version number.  If the | ||||||
| Program specifies that a certain numbered version of the GNU Affero General | Program specifies that a certain numbered version of the GNU General | ||||||
| Public License "or any later version" applies to it, you have the | Public License "or any later version" applies to it, you have the | ||||||
| option of following the terms and conditions either of that numbered | option of following the terms and conditions either of that numbered | ||||||
| version or of any later version published by the Free Software | version or of any later version published by the Free Software | ||||||
| Foundation.  If the Program does not specify a version number of the | Foundation.  If the Program does not specify a version number of the | ||||||
| GNU Affero General Public License, you may choose any version ever published | GNU General Public License, you may choose any version ever published | ||||||
| by the Free Software Foundation. | by the Free Software Foundation. | ||||||
| 
 | 
 | ||||||
|   If the Program specifies that a proxy can decide which future |   If the Program specifies that a proxy can decide which future | ||||||
| versions of the GNU Affero General Public License can be used, that proxy's | versions of the GNU General Public License can be used, that proxy's | ||||||
| public statement of acceptance of a version permanently authorizes you | public statement of acceptance of a version permanently authorizes you | ||||||
| to choose that version for the Program. | to choose that version for the Program. | ||||||
| 
 | 
 | ||||||
|  | @ -633,29 +635,40 @@ the "copyright" line and a pointer to where the full notice is found. | ||||||
|     Copyright (C) <year>  <name of author> |     Copyright (C) <year>  <name of author> | ||||||
| 
 | 
 | ||||||
|     This program is free software: you can redistribute it and/or modify |     This program is free software: you can redistribute it and/or modify | ||||||
|     it under the terms of the GNU Affero General Public License as published by |     it under the terms of the GNU General Public License as published by | ||||||
|     the Free Software Foundation, either version 3 of the License, or |     the Free Software Foundation, either version 3 of the License, or | ||||||
|     (at your option) any later version. |     (at your option) any later version. | ||||||
| 
 | 
 | ||||||
|     This program is distributed in the hope that it will be useful, |     This program is distributed in the hope that it will be useful, | ||||||
|     but WITHOUT ANY WARRANTY; without even the implied warranty of |     but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
|     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
|     GNU Affero General Public License for more details. |     GNU General Public License for more details. | ||||||
| 
 | 
 | ||||||
|     You should have received a copy of the GNU Affero General Public License |     You should have received a copy of the GNU General Public License | ||||||
|     along with this program.  If not, see <https://www.gnu.org/licenses/>. |     along with this program.  If not, see <http://www.gnu.org/licenses/>. | ||||||
| 
 | 
 | ||||||
| Also add information on how to contact you by electronic and paper mail. | Also add information on how to contact you by electronic and paper mail. | ||||||
| 
 | 
 | ||||||
|   If your software can interact with users remotely through a computer |   If the program does terminal interaction, make it output a short | ||||||
| network, you should also make sure that it provides a way for users to | notice like this when it starts in an interactive mode: | ||||||
| get its source.  For example, if your program is a web application, its | 
 | ||||||
| interface could display a "Source" link that leads users to an archive |     <program>  Copyright (C) <year>  <name of author> | ||||||
| of the code.  There are many ways you could offer source, and different |     This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. | ||||||
| solutions will be better for different programs; see section 13 for the |     This is free software, and you are welcome to redistribute it | ||||||
| specific requirements. |     under certain conditions; type `show c' for details. | ||||||
|  | 
 | ||||||
|  | The hypothetical commands `show w' and `show c' should show the appropriate | ||||||
|  | parts of the General Public License.  Of course, your program's commands | ||||||
|  | might be different; for a GUI interface, you would use an "about box". | ||||||
| 
 | 
 | ||||||
|   You should also get your employer (if you work as a programmer) or school, |   You should also get your employer (if you work as a programmer) or school, | ||||||
| if any, to sign a "copyright disclaimer" for the program, if necessary. | if any, to sign a "copyright disclaimer" for the program, if necessary. | ||||||
| For more information on this, and how to apply and follow the GNU AGPL, see | For more information on this, and how to apply and follow the GNU GPL, see | ||||||
| <https://www.gnu.org/licenses/>. | <http://www.gnu.org/licenses/>. | ||||||
|  | 
 | ||||||
|  |   The GNU General Public License does not permit incorporating your program | ||||||
|  | into proprietary programs.  If your program is a subroutine library, you | ||||||
|  | may consider it more useful to permit linking proprietary applications with | ||||||
|  | the library.  If this is what you want to do, use the GNU Lesser General | ||||||
|  | Public License instead of this License.  But first, please read | ||||||
|  | <http://www.gnu.org/philosophy/why-not-lgpl.html>. | ||||||
|  |  | ||||||
|  | @ -1,2 +0,0 @@ | ||||||
| # https://packaging.python.org/en/latest/guides/using-manifest-in/#using-manifest-in |  | ||||||
| include docs/README.rst |  | ||||||
							
								
								
									
										528
									
								
								NEWS.rst
								
								
								
								
							
							
						
						
									
										528
									
								
								NEWS.rst
								
								
								
								
							|  | @ -1,528 +0,0 @@ | ||||||
| ========= |  | ||||||
| Changelog |  | ||||||
| ========= |  | ||||||
| 
 |  | ||||||
| .. towncrier release notes start |  | ||||||
| 
 |  | ||||||
| tractor 0.1.0a5 (2022-08-03) |  | ||||||
| ============================ |  | ||||||
| 
 |  | ||||||
| This is our final release supporting Python 3.9 since we will be moving |  | ||||||
| internals to the new `match:` syntax from 3.10 going forward and |  | ||||||
| further, we have officially dropped usage of the `msgpack` library and |  | ||||||
| happily adopted `msgspec`. |  | ||||||
| 
 |  | ||||||
| Features |  | ||||||
| -------- |  | ||||||
| 
 |  | ||||||
| - `#165 <https://github.com/goodboy/tractor/issues/165>`_: Add SIGINT |  | ||||||
|   protection to our `pdbpp` based debugger subystem such that for |  | ||||||
|   (single-depth) actor trees in debug mode we ignore interrupts in any |  | ||||||
|   actor currently holding the TTY lock thus avoiding clobbering IPC |  | ||||||
|   connections and/or task and process state when working in the REPL. |  | ||||||
| 
 |  | ||||||
|   As a big note currently so called "nested" actor trees (trees with |  | ||||||
|   actors having more then one parent/ancestor) are not fully supported |  | ||||||
|   since we don't yet have a mechanism to relay the debug mode knowledge |  | ||||||
|   "up" the actor tree (for eg. when handling a crash in a leaf actor). |  | ||||||
|   As such currently there is a set of tests and known scenarios which will |  | ||||||
|   result in process cloberring by the zombie repaing machinery and these |  | ||||||
|   have been documented in https://github.com/goodboy/tractor/issues/320. |  | ||||||
| 
 |  | ||||||
|   The implementation details include: |  | ||||||
| 
 |  | ||||||
|   - utilizing a custom SIGINT handler which we apply whenever an actor's |  | ||||||
|     runtime enters the debug machinery, which we also make sure the |  | ||||||
|     stdlib's `pdb` configuration doesn't override (which it does by |  | ||||||
|     default without special instance config). |  | ||||||
|   - litter the runtime with `maybe_wait_for_debugger()` mostly in spots |  | ||||||
|     where the root actor should block before doing embedded nursery |  | ||||||
|     teardown ops which both cancel potential-children-in-deubg as well |  | ||||||
|     as eventually trigger zombie reaping machinery. |  | ||||||
|   - hardening of the TTY locking semantics/API both in terms of IPC |  | ||||||
|     terminations and cancellation and lock release determinism from |  | ||||||
|     sync debugger instance methods. |  | ||||||
|   - factoring of locking infrastructure into a new `._debug.Lock` global |  | ||||||
|     which encapsulates all details of the ``trio`` sync primitives and |  | ||||||
|     task/actor uid management and tracking. |  | ||||||
| 
 |  | ||||||
|   We also add `ctrl-c` cases throughout the test suite though these are |  | ||||||
|   disabled for py3.9 (`pdbpp` UX differences that don't seem worth |  | ||||||
|   compensating for, especially since this will be our last 3.9 supported |  | ||||||
|   release) and there are a slew of marked cases that aren't expected to |  | ||||||
|   work in CI more generally (as mentioned in the "nested" tree note |  | ||||||
|   above) despite seemingly working  when run manually on linux. |  | ||||||
| 
 |  | ||||||
| - `#304 <https://github.com/goodboy/tractor/issues/304>`_: Add a new |  | ||||||
|   ``to_asyncio.LinkedTaskChannel.subscribe()`` which gives task-oriented |  | ||||||
|   broadcast functionality semantically equivalent to |  | ||||||
|   ``tractor.MsgStream.subscribe()`` this makes it possible for multiple |  | ||||||
|   ``trio``-side tasks to consume ``asyncio``-side task msgs in tandem. |  | ||||||
| 
 |  | ||||||
|   Further Improvements to the test suite were added in this patch set |  | ||||||
|   including a new scenario test for a sub-actor managed "service nursery" |  | ||||||
|   (implementing the basics of a "service manager") including use of |  | ||||||
|   *infected asyncio* mode. Further we added a lower level |  | ||||||
|   ``test_trioisms.py`` to start to track issues we need to work around in |  | ||||||
|   ``trio`` itself which in this case included a bug we were trying to |  | ||||||
|   solve related to https://github.com/python-trio/trio/issues/2258. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Bug Fixes |  | ||||||
| --------- |  | ||||||
| 
 |  | ||||||
| - `#318 <https://github.com/goodboy/tractor/issues/318>`_: Fix |  | ||||||
|   a previously undetected ``trio``-``asyncio`` task lifetime linking |  | ||||||
|   issue with the ``to_asyncio.open_channel_from()`` api where both sides |  | ||||||
|   where not properly waiting/signalling termination and it was possible |  | ||||||
|   for ``asyncio``-side errors to not propagate due to a race condition. |  | ||||||
| 
 |  | ||||||
|   The implementation fix summary is: |  | ||||||
|   - add state to signal the end of the ``trio`` side task to be |  | ||||||
|     read by the ``asyncio`` side and always cancel any ongoing |  | ||||||
|     task in such cases. |  | ||||||
|   - always wait on the ``asyncio`` task termination from the ``trio`` |  | ||||||
|     side on error before maybe raising said error. |  | ||||||
|   - always close the ``trio`` mem chan on exit to ensure the other |  | ||||||
|     side can detect it and follow. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Trivial/Internal Changes |  | ||||||
| ------------------------ |  | ||||||
| 
 |  | ||||||
| - `#248 <https://github.com/goodboy/tractor/issues/248>`_: Adjust the |  | ||||||
|   `tractor._spawn.soft_wait()` strategy to avoid sending an actor cancel |  | ||||||
|   request (via `Portal.cancel_actor()`) if either the child process is |  | ||||||
|   detected as having terminated or the IPC channel is detected to be |  | ||||||
|   closed. |  | ||||||
| 
 |  | ||||||
|   This ensures (even) more deterministic inter-actor cancellation by |  | ||||||
|   avoiding the timeout condition where possible when a whild never |  | ||||||
|   sucessfully spawned, crashed, or became un-contactable over IPC. |  | ||||||
| 
 |  | ||||||
| - `#295 <https://github.com/goodboy/tractor/issues/295>`_: Add an |  | ||||||
|   experimental ``tractor.msg.NamespacePath`` type for passing Python |  | ||||||
|   objects by "reference" through a ``str``-subtype message and using the |  | ||||||
|   new ``pkgutil.resolve_name()`` for reference loading. |  | ||||||
| 
 |  | ||||||
| - `#298 <https://github.com/goodboy/tractor/issues/298>`_: Add a new |  | ||||||
|   `tractor.experimental` subpackage for staging new high level APIs and |  | ||||||
|   subystems that we might eventually make built-ins. |  | ||||||
| 
 |  | ||||||
| - `#300 <https://github.com/goodboy/tractor/issues/300>`_: Update to and |  | ||||||
|   pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) both of which |  | ||||||
|   required adjustments for backwards imcompatible API tweaks. |  | ||||||
| 
 |  | ||||||
| - `#303 <https://github.com/goodboy/tractor/issues/303>`_: Fence off |  | ||||||
|   ``multiprocessing`` imports until absolutely necessary in an effort to |  | ||||||
|   avoid "resource tracker" spawning side effects that seem to have |  | ||||||
|   varying degrees of unreliability per Python release. Port to new |  | ||||||
|   ``msgspec.DecodeError``. |  | ||||||
| 
 |  | ||||||
| - `#305 <https://github.com/goodboy/tractor/issues/305>`_: Add |  | ||||||
|   ``tractor.query_actor()`` an addr looker-upper which doesn't deliver |  | ||||||
|   a ``Portal`` instance and instead just a socket address ``tuple``. |  | ||||||
| 
 |  | ||||||
|   Sometimes it's handy to just have a simple way to figure out if |  | ||||||
|   a "service" actor is up, so add this discovery helper for that. We'll |  | ||||||
|   prolly just leave it undocumented for now until we figure out |  | ||||||
|   a longer-term/better discovery system. |  | ||||||
| 
 |  | ||||||
| - `#316 <https://github.com/goodboy/tractor/issues/316>`_: Run windows |  | ||||||
|   CI jobs on python 3.10 after some hacks for ``pdbpp`` dependency |  | ||||||
|   issues. |  | ||||||
| 
 |  | ||||||
|   Issue was to do with the now deprecated `pyreadline` project which |  | ||||||
|   should be changed over to `pyreadline3`. |  | ||||||
| 
 |  | ||||||
| - `#317 <https://github.com/goodboy/tractor/issues/317>`_: Drop use of |  | ||||||
|   the ``msgpack`` package and instead move fully to the ``msgspec`` |  | ||||||
|   codec library. |  | ||||||
| 
 |  | ||||||
|   We've now used ``msgspec`` extensively in production and there's no |  | ||||||
|   reason to not use it as default. Further this change preps us for the up |  | ||||||
|   and coming typed messaging semantics (#196), dialog-unprotocol system |  | ||||||
|   (#297), and caps-based messaging-protocols (#299) planned before our |  | ||||||
|   first beta. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| tractor 0.1.0a4 (2021-12-18) |  | ||||||
| ============================ |  | ||||||
| 
 |  | ||||||
| Features |  | ||||||
| -------- |  | ||||||
| - `#275 <https://github.com/goodboy/tractor/issues/275>`_: Re-license |  | ||||||
|   code base under AGPLv3. Also see `#274 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/274>`_ for majority |  | ||||||
|   contributor consensus on this decision. |  | ||||||
| 
 |  | ||||||
| - `#121 <https://github.com/goodboy/tractor/issues/121>`_: Add |  | ||||||
|   "infected ``asyncio`` mode; a sub-system to spawn and control |  | ||||||
|   ``asyncio`` actors using ``trio``'s guest-mode. |  | ||||||
| 
 |  | ||||||
|   This gets us the following very interesting functionality: |  | ||||||
| 
 |  | ||||||
|   - ability to spawn an actor that has a process entry point of |  | ||||||
|     ``asyncio.run()`` by passing ``infect_asyncio=True`` to |  | ||||||
|     ``Portal.start_actor()`` (and friends). |  | ||||||
|   - the ``asyncio`` actor embeds ``trio`` using guest-mode and starts |  | ||||||
|     a main ``trio`` task which runs the ``tractor.Actor._async_main()`` |  | ||||||
|     entry point engages all the normal ``tractor`` runtime IPC/messaging |  | ||||||
|     machinery; for all purposes the actor is now running normally on |  | ||||||
|     a ``trio.run()``. |  | ||||||
|   - the actor can now make one-to-one task spawning requests to the |  | ||||||
|     underlying ``asyncio`` event loop using either of: |  | ||||||
| 
 |  | ||||||
|     * ``to_asyncio.run_task()`` to spawn and run an ``asyncio`` task to |  | ||||||
|       completion and block until a return value is delivered. |  | ||||||
|     * ``async with to_asyncio.open_channel_from():`` which spawns a task |  | ||||||
|       and hands it a pair of "memory channels" to allow for bi-directional |  | ||||||
|       streaming between the now SC-linked ``trio`` and ``asyncio`` tasks. |  | ||||||
| 
 |  | ||||||
|   The output from any call(s) to ``asyncio`` can be handled as normal in |  | ||||||
|   ``trio``/``tractor`` task operation with the caveat of the overhead due |  | ||||||
|   to guest-mode use. |  | ||||||
| 
 |  | ||||||
|   For more details see the `original PR |  | ||||||
|   <https://github.com/goodboy/tractor/pull/121>`_ and `issue |  | ||||||
|   <https://github.com/goodboy/tractor/issues/120>`_. |  | ||||||
| 
 |  | ||||||
| - `#257 <https://github.com/goodboy/tractor/issues/257>`_: Add |  | ||||||
|   ``trionics.maybe_open_context()`` an actor-scoped async multi-task |  | ||||||
|   context manager resource caching API. |  | ||||||
| 
 |  | ||||||
|   Adds an SC-safe cacheing async context manager api that only enters on |  | ||||||
|   the *first* task entry and only exits on the *last* task exit while in |  | ||||||
|   between delivering the same cached value per input key. Keys can be |  | ||||||
|   either an explicit ``key`` named arg provided by the user or a |  | ||||||
|   hashable ``kwargs`` dict (will be converted to a ``list[tuple]``) which |  | ||||||
|   is passed to the underlying manager function as input. |  | ||||||
| 
 |  | ||||||
| - `#261 <https://github.com/goodboy/tractor/issues/261>`_: Add |  | ||||||
|   cross-actor-task ``Context`` oriented error relay, a new stream |  | ||||||
|   overrun error-signal ``StreamOverrun``, and support disabling |  | ||||||
|   ``MsgStream`` backpressure as the default before a stream is opened or |  | ||||||
|   by choice of the user. |  | ||||||
| 
 |  | ||||||
|   We added stricter semantics around ``tractor.Context.open_stream():`` |  | ||||||
|   particularly to do with streams which are only opened at one end. |  | ||||||
|   Previously, if only one end opened a stream there was no way for that |  | ||||||
|   sender to know if msgs are being received until first, the feeder mem |  | ||||||
|   chan on the receiver side hit a backpressure state and then that |  | ||||||
|   condition delayed its msg loop processing task to eventually create |  | ||||||
|   backpressure on the associated IPC transport. This is non-ideal in the |  | ||||||
|   case where the receiver side never opened a stream by mistake since it |  | ||||||
|   results in silent block of the sender and no adherence to the underlying |  | ||||||
|   mem chan buffer size settings (which is still unsolved btw). |  | ||||||
| 
 |  | ||||||
|   To solve this we add non-backpressure style message pushing inside |  | ||||||
|   ``Actor._push_result()`` by default and only use the backpressure |  | ||||||
|   ``trio.MemorySendChannel.send()`` call **iff** the local end of the |  | ||||||
|   context has entered ``Context.open_stream():``. This way if the stream |  | ||||||
|   was never opened but the mem chan is overrun, we relay back to the |  | ||||||
|   sender a (new exception) ``SteamOverrun`` error which is raised in the |  | ||||||
|   sender's scope with a special error message about the stream never |  | ||||||
|   having been opened. Further, this behaviour (non-backpressure style |  | ||||||
|   where senders can expect an error on overruns) can now be enabled with |  | ||||||
|   ``.open_stream(backpressure=False)`` and the underlying mem chan size |  | ||||||
|   can be specified with a kwarg ``msg_buffer_size: int``. |  | ||||||
| 
 |  | ||||||
|   Further bug fixes and enhancements in this changeset include: |  | ||||||
| 
 |  | ||||||
|   - fix a race we were ignoring where if the callee task opened a context |  | ||||||
|     it could enter ``Context.open_stream()`` before calling |  | ||||||
|     ``.started()``. |  | ||||||
|   - Disallow calling ``Context.started()`` more then once. |  | ||||||
|   - Enable ``Context`` linked tasks error relaying via the new |  | ||||||
|     ``Context._maybe_raise_from_remote_msg()`` which (for now) uses |  | ||||||
|     a simple ``trio.Nursery.start_soon()`` to raise the error via closure |  | ||||||
|     in the local scope. |  | ||||||
| 
 |  | ||||||
| - `#267 <https://github.com/goodboy/tractor/issues/267>`_: This |  | ||||||
|   (finally) adds fully acknowledged remote cancellation messaging |  | ||||||
|   support for both explicit ``Portal.cancel_actor()`` calls as well as |  | ||||||
|   when there is a "runtime-wide" cancellations (eg. during KBI or |  | ||||||
|   general actor nursery exception handling which causes a full actor |  | ||||||
|   "crash"/termination). |  | ||||||
| 
 |  | ||||||
|   You can think of this as the most ideal case in 2-generals where the |  | ||||||
|   actor requesting the cancel of its child is able to always receive back |  | ||||||
|   the ACK to that request. This leads to a more deterministic shutdown of |  | ||||||
|   the child where the parent is able to wait for the child to fully |  | ||||||
|   respond to the request. On a localhost setup, where the parent can |  | ||||||
|   monitor the state of the child through process or other OS APIs instead |  | ||||||
|   of solely through IPC messaging, the parent can know whether or not the |  | ||||||
|   child decided to cancel with more certainty. In the case of separate |  | ||||||
|   hosts, we still rely on a simple timeout approach until such a time |  | ||||||
|   where we prefer to get "fancier". |  | ||||||
| 
 |  | ||||||
| - `#271 <https://github.com/goodboy/tractor/issues/271>`_: Add a per |  | ||||||
|   actor ``debug_mode: bool`` control to our nursery. |  | ||||||
| 
 |  | ||||||
|   This allows spawning actors via ``ActorNursery.start_actor()`` (and |  | ||||||
|   other dependent methods) with a ``debug_mode=True`` flag much like |  | ||||||
|   ``tractor.open_nursery():`` such that per process crash handling |  | ||||||
|   can be toggled for cases where a user does not need/want all child actors |  | ||||||
|   to drop into the debugger on error. This is often useful when you have |  | ||||||
|   actor-tasks which are expected to error often (and be re-run) but want |  | ||||||
|   to specifically interact with some (problematic) child. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Bugfixes |  | ||||||
| -------- |  | ||||||
| 
 |  | ||||||
| - `#239 <https://github.com/goodboy/tractor/issues/239>`_: Fix |  | ||||||
|   keyboard interrupt handling in ``Portal.open_context()`` blocks. |  | ||||||
| 
 |  | ||||||
|   Previously this was not triggering cancellation of the remote task |  | ||||||
|   context and could result in hangs if a stream was also opened. This |  | ||||||
|   fix is to accept `BaseException` since it is likely any other top |  | ||||||
|   level exception other then KBI (even though not expected) should also |  | ||||||
|   get this result. |  | ||||||
| 
 |  | ||||||
| - `#264 <https://github.com/goodboy/tractor/issues/264>`_: Fix |  | ||||||
|   ``Portal.run_in_actor()`` returns ``None`` result. |  | ||||||
| 
 |  | ||||||
|   ``None`` was being used as the cached result flag and obviously breaks |  | ||||||
|   on a ``None`` returned from the remote target task. This would cause an |  | ||||||
|   infinite hang if user code ever called ``Portal.result()`` *before* the |  | ||||||
|   nursery exit. The simple fix is to use the *return message* as the |  | ||||||
|   initial "no-result-received-yet" flag value and, once received, the |  | ||||||
|   return value is read from the message to avoid the cache logic error. |  | ||||||
| 
 |  | ||||||
| - `#266 <https://github.com/goodboy/tractor/issues/266>`_: Fix |  | ||||||
|   graceful cancellation of daemon actors |  | ||||||
| 
 |  | ||||||
|   Previously, his was a bug where if the soft wait on a sub-process (the |  | ||||||
|   ``await .proc.wait()``) in the reaper task teardown was cancelled we |  | ||||||
|   would fail over to the hard reaping sequence (meant for culling off any |  | ||||||
|   potential zombies via system kill signals). The hard reap has a timeout |  | ||||||
|   of 3s (currently though in theory we could make it shorter?) before |  | ||||||
|   system signalling kicks in. This means that any daemon actor still |  | ||||||
|   running during nursery exit would get hard reaped (3s later) instead of |  | ||||||
|   cancelled via IPC message. Now we catch the ``trio.Cancelled``, call |  | ||||||
|   ``Portal.cancel_actor()`` on the daemon and expect the child to |  | ||||||
|   self-terminate after the runtime cancels and shuts down the process. |  | ||||||
| 
 |  | ||||||
| - `#278 <https://github.com/goodboy/tractor/issues/278>`_: Repair |  | ||||||
|   inter-actor stream closure semantics to work correctly with |  | ||||||
|   ``tractor.trionics.BroadcastReceiver`` task fan out usage. |  | ||||||
| 
 |  | ||||||
|   A set of previously unknown bugs discovered in `#257 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/257>`_ let graceful stream |  | ||||||
|   closure result in hanging consumer tasks that use the broadcast APIs. |  | ||||||
|   This adds better internal closure state tracking to the broadcast |  | ||||||
|   receiver and message stream APIs and in particular ensures that when an |  | ||||||
|   underlying stream/receive-channel (a broadcast receiver is receiving |  | ||||||
|   from) is closed, all consumer tasks waiting on that underlying channel |  | ||||||
|   are woken so they can receive the ``trio.EndOfChannel`` signal and |  | ||||||
|   promptly terminate. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| tractor 0.1.0a3 (2021-11-02) |  | ||||||
| ============================ |  | ||||||
| 
 |  | ||||||
| Features |  | ||||||
| -------- |  | ||||||
| 
 |  | ||||||
| - Switch to using the ``trio`` process spawner by default on windows. (#166) |  | ||||||
| 
 |  | ||||||
|   This gets windows users debugger support (manually tested) and in |  | ||||||
|   general a more resilient (nested) actor tree implementation. |  | ||||||
| 
 |  | ||||||
| - Add optional `msgspec <https://jcristharif.com/msgspec/>`_ support |  | ||||||
|   as an alernative, faster MessagePack codec. (#214) |  | ||||||
| 
 |  | ||||||
|   Provides us with a path toward supporting typed IPC message contracts. Further, |  | ||||||
|   ``msgspec`` structs may be a valid tool to start for formalizing our |  | ||||||
|   "SC dialog un-protocol" messages as described in `#36 |  | ||||||
|   <https://github.com/goodboy/tractor/issues/36>`_. |  | ||||||
| 
 |  | ||||||
| - Introduce a new ``tractor.trionics`` `sub-package`_ that exposes |  | ||||||
|   a selection of our relevant high(er) level trio primitives and |  | ||||||
|   goodies. (#241) |  | ||||||
| 
 |  | ||||||
|   At outset we offer a ``gather_contexts()`` context manager for |  | ||||||
|   concurrently entering a sequence of async context managers (much like |  | ||||||
|   a version of ``asyncio.gather()`` but for context managers) and use it |  | ||||||
|   in a new ``tractor.open_actor_cluster()`` manager-helper that can be |  | ||||||
|   entered to concurrently spawn a flat actor pool. We also now publicly |  | ||||||
|   expose our "broadcast channel" APIs (``open_broadcast_receiver()``) |  | ||||||
|   from here. |  | ||||||
| 
 |  | ||||||
| .. _sub-package: ../tractor/trionics |  | ||||||
| 
 |  | ||||||
| - Change the core message loop to handle task and actor-runtime cancel |  | ||||||
|   requests immediately instead of scheduling them as is done for rpc-task |  | ||||||
|   requests. (#245) |  | ||||||
| 
 |  | ||||||
|   In order to obtain more reliable teardown mechanics for (complex) actor |  | ||||||
|   trees it's important that we specially treat cancel requests as having |  | ||||||
|   higher priority. Previously, it was possible that task cancel requests |  | ||||||
|   could actually also themselves be cancelled if a "actor-runtime" cancel |  | ||||||
|   request was received (can happen during messy multi actor crashes that |  | ||||||
|   propagate). Instead cancels now block the msg loop until serviced and |  | ||||||
|   a response is relayed back to the requester. This also allows for |  | ||||||
|   improved debugger support since we have determinism guarantees about |  | ||||||
|   which processes must wait before hard killing their children. |  | ||||||
| 
 |  | ||||||
| - (`#248 <https://github.com/goodboy/tractor/pull/248>`_) Drop Python |  | ||||||
|   3.8 support in favour of rolling with two latest releases for the time |  | ||||||
|   being. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Misc |  | ||||||
| ---- |  | ||||||
| 
 |  | ||||||
| - (`#243 <https://github.com/goodboy/tractor/pull/243>`_) add a distinct |  | ||||||
|   ``'CANCEL'`` log level to allow the runtime to emit details about |  | ||||||
|   cancellation machinery statuses. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| tractor 0.1.0a2 (2021-09-07) |  | ||||||
| ============================ |  | ||||||
| 
 |  | ||||||
| Features |  | ||||||
| -------- |  | ||||||
| 
 |  | ||||||
| - Add `tokio-style broadcast channels |  | ||||||
|   <https://docs.rs/tokio/1.11.0/tokio/sync/broadcast/index.html>`_ as |  | ||||||
|   a solution for `#204 <https://github.com/goodboy/tractor/pull/204>`_ and |  | ||||||
|   discussed thoroughly in `trio/#987 |  | ||||||
|   <https://github.com/python-trio/trio/issues/987>`_. |  | ||||||
| 
 |  | ||||||
|   This gives us local task broadcast functionality using a new |  | ||||||
|   ``BroadcastReceiver`` type which can wrap ``trio.ReceiveChannel``  and |  | ||||||
|   provide fan-out copies of a stream of data to every subscribed consumer. |  | ||||||
|   We use this new machinery to provide a ``ReceiveMsgStream.subscribe()`` |  | ||||||
|   async context manager which can be used by actor-local concumers tasks |  | ||||||
|   to easily pull from a shared and dynamic IPC stream. (`#229 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/229>`_) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Bugfixes |  | ||||||
| -------- |  | ||||||
| 
 |  | ||||||
| - Handle broken channel/stream faults where the root's tty lock is left |  | ||||||
|   acquired by some child actor who went MIA and the root ends up hanging |  | ||||||
|   indefinitely. (`#234 <https://github.com/goodboy/tractor/pull/234>`_) |  | ||||||
| 
 |  | ||||||
|   There's two parts here: we no longer shield wait on the lock and, |  | ||||||
|   now always do our best to release the lock on the expected worst |  | ||||||
|   case connection faults. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Deprecations and Removals |  | ||||||
| ------------------------- |  | ||||||
| 
 |  | ||||||
| - Drop stream "shielding" support which was originally added to sidestep |  | ||||||
|   a cancelled call to ``.receive()`` |  | ||||||
| 
 |  | ||||||
|   In the original api design a stream instance was returned directly from |  | ||||||
|   a call to ``Portal.run()`` and thus there was no "exit phase" to handle |  | ||||||
|   cancellations and errors which would trigger implicit closure. Now that |  | ||||||
|   we have said enter/exit semantics with ``Portal.open_stream_from()`` and |  | ||||||
|   ``Context.open_stream()`` we can drop this implicit (and arguably |  | ||||||
|   confusing) behavior. (`#230 <https://github.com/goodboy/tractor/pull/230>`_) |  | ||||||
| 
 |  | ||||||
| - Drop Python 3.7 support in preparation for supporting 3.9+ syntax. |  | ||||||
|   (`#232 <https://github.com/goodboy/tractor/pull/232>`_) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| tractor 0.1.0a1 (2021-08-01) |  | ||||||
| ============================ |  | ||||||
| 
 |  | ||||||
| Features |  | ||||||
| -------- |  | ||||||
| - Updated our uni-directional streaming API (`#206 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/206>`_) to require a context |  | ||||||
|   manager style ``async with Portal.open_stream_from(target) as stream:`` |  | ||||||
|   which explicitly determines when to stop a stream in the calling (aka |  | ||||||
|   portal opening) actor much like ``async_generator.aclosing()`` |  | ||||||
|   enforcement. |  | ||||||
| 
 |  | ||||||
| - Improved the ``multiprocessing`` backend sub-actor reaping (`#208 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/208>`_) during actor nursery |  | ||||||
|   exit, particularly during cancellation scenarios that previously might |  | ||||||
|   result in hard to debug hangs. |  | ||||||
| 
 |  | ||||||
| - Added initial bi-directional streaming support in `#219 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/219>`_ with follow up debugger |  | ||||||
|   improvements via `#220 <https://github.com/goodboy/tractor/pull/220>`_ |  | ||||||
|   using the new ``tractor.Context`` cross-actor task syncing system. |  | ||||||
|   The debugger upgrades add an edge triggered last-in-tty-lock semaphore |  | ||||||
|   which allows the root process for a tree to avoid clobbering children |  | ||||||
|   who have queued to acquire the ``pdb`` repl by waiting to cancel |  | ||||||
|   sub-actors until the lock is known to be released **and** has no |  | ||||||
|   pending waiters. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Experiments and WIPs |  | ||||||
| -------------------- |  | ||||||
| - Initial optional ``msgspec`` serialization support in `#214 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/214>`_ which should hopefully |  | ||||||
|   land by next release. |  | ||||||
| 
 |  | ||||||
| - Improved "infect ``asyncio``" cross-loop task cancellation and error |  | ||||||
|   propagation by vastly simplifying the cross-loop-task streaming approach.  |  | ||||||
|   We may end up just going with a use of ``anyio`` in the medium term to |  | ||||||
|   avoid re-doing work done by their cross-event-loop portals.  See the |  | ||||||
|   ``infect_asyncio`` for details. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Improved Documentation |  | ||||||
| ---------------------- |  | ||||||
| - `Updated our readme <https://github.com/goodboy/tractor/pull/211>`_ to |  | ||||||
|   include more (and better) `examples |  | ||||||
|   <https://github.com/goodboy/tractor#run-a-func-in-a-process>`_ (with |  | ||||||
|   matching multi-terminal process monitoring shell commands) as well as |  | ||||||
|   added many more examples to the `repo set |  | ||||||
|   <https://github.com/goodboy/tractor/tree/master/examples>`_. |  | ||||||
| 
 |  | ||||||
| - Added a readme `"actors under the hood" section |  | ||||||
|   <https://github.com/goodboy/tractor#under-the-hood>`_ in an effort to |  | ||||||
|   guard against suggestions for changing the API away from ``trio``'s |  | ||||||
|   *tasks-as-functions* style. |  | ||||||
| 
 |  | ||||||
| - Moved to using the `sphinx book theme |  | ||||||
|   <https://sphinx-book-theme.readthedocs.io/en/latest/index.html>`_ |  | ||||||
|   though it needs some heavy tweaking and doesn't seem to show our logo |  | ||||||
|   on rtd :( |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Trivial/Internal Changes |  | ||||||
| ------------------------ |  | ||||||
| - Added a new ``TransportClosed`` internal exception/signal (`#215 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/215>`_ for catching TCP |  | ||||||
|   channel gentle closes instead of silently falling through the message |  | ||||||
|   handler loop via an async generator ``return``. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Deprecations and Removals |  | ||||||
| ------------------------- |  | ||||||
| - Dropped support for invoking sync functions (`#205 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/205>`_) in other |  | ||||||
|   actors/processes since you can always wrap a sync function from an |  | ||||||
|   async one.  Users can instead consider using ``trio-parallel`` which |  | ||||||
|   is a project specifically geared for purely synchronous calls in |  | ||||||
|   sub-processes. |  | ||||||
| 
 |  | ||||||
| - Deprecated our ``tractor.run()`` entrypoint `#197 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/197>`_; the runtime is now |  | ||||||
|   either started implicitly in first actor nursery use or via an |  | ||||||
|   explicit call to ``tractor.open_root_actor()``. Full removal of |  | ||||||
|   ``tractor.run()`` will come by beta release. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| tractor 0.1.0a0 (2021-02-28) |  | ||||||
| ============================ |  | ||||||
| 
 |  | ||||||
| .. |  | ||||||
|     TODO: fill out more of the details of the initial feature set in some TLDR form |  | ||||||
| 
 |  | ||||||
| Summary |  | ||||||
| ------- |  | ||||||
| - ``trio`` based process spawner (using ``subprocess``) |  | ||||||
| - initial multi-process debugging with ``pdb++`` |  | ||||||
| - windows support using both ``trio`` and ``multiprocessing`` spawners |  | ||||||
| - "portal" api for cross-process, structured concurrent, (streaming) IPC |  | ||||||
							
								
								
									
										394
									
								
								docs/README.rst
								
								
								
								
							
							
						
						
									
										394
									
								
								docs/README.rst
								
								
								
								
							|  | @ -3,22 +3,18 @@ | ||||||
| |gh_actions| | |gh_actions| | ||||||
| |docs| | |docs| | ||||||
| 
 | 
 | ||||||
| ``tractor`` is a `structured concurrent`_, multi-processing_ runtime | .. _actor model: https://en.wikipedia.org/wiki/Actor_model | ||||||
| built on trio_. | .. _trio: https://github.com/python-trio/trio | ||||||
|  | .. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing | ||||||
|  | .. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles | ||||||
|  | .. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich | ||||||
|  | .. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228 | ||||||
| 
 | 
 | ||||||
| Fundamentally, ``tractor`` gives you parallelism via |  | ||||||
| ``trio``-"*actors*": independent Python processes (aka |  | ||||||
| non-shared-memory threads) which maintain structured |  | ||||||
| concurrency (SC) *end-to-end* inside a *supervision tree*. |  | ||||||
| 
 | 
 | ||||||
| Cross-process (and thus cross-host) SC is accomplished through the | ``tractor`` is a `structured concurrent`_ "`actor model`_" built on trio_ and multi-processing_. | ||||||
| combined use of our "actor nurseries_" and an "SC-transitive IPC |  | ||||||
| protocol" constructed on top of multiple Pythons each running a ``trio`` |  | ||||||
| scheduled runtime - a call to ``trio.run()``. |  | ||||||
| 
 | 
 | ||||||
| We believe the system adheres to the `3 axioms`_ of an "`actor model`_" | We pair structured concurrency and true multi-core parallelism with | ||||||
| but likely *does not* look like what *you* probably think an "actor | the aim of being the multi-processing framework *you always wanted*. | ||||||
| model" looks like, and that's *intentional*. |  | ||||||
| 
 | 
 | ||||||
| The first step to grok ``tractor`` is to get the basics of ``trio`` down. | The first step to grok ``tractor`` is to get the basics of ``trio`` down. | ||||||
| A great place to start is the `trio docs`_ and this `blog post`_. | A great place to start is the `trio docs`_ and this `blog post`_. | ||||||
|  | @ -27,17 +23,12 @@ A great place to start is the `trio docs`_ and this `blog post`_. | ||||||
| Features | Features | ||||||
| -------- | -------- | ||||||
| - **It's just** a ``trio`` API | - **It's just** a ``trio`` API | ||||||
| - *Infinitely nesteable* process trees | - Infinitely nesteable process trees | ||||||
| - Builtin IPC streaming APIs with task fan-out broadcasting | - Built-in APIs for inter-process streaming | ||||||
| - A "native" multi-core debugger REPL using `pdbp`_ (a fork & fix of | - A (first ever?) "native" multi-core debugger for Python using `pdb++`_ | ||||||
|   `pdb++`_ thanks to @mdmintz!) | - Support for multiple process spawning backends | ||||||
| - Support for a swappable, OS specific, process spawning layer | - A modular transport layer, allowing for custom serialization, | ||||||
| - A modular transport stack, allowing for custom serialization (eg. with |   communications protocols, and environment specific IPC primitives | ||||||
|   `msgspec`_), communications protocols, and environment specific IPC |  | ||||||
|   primitives |  | ||||||
| - Support for spawning process-level-SC, inter-loop one-to-one-task oriented |  | ||||||
|   ``asyncio`` actors via "infected ``asyncio``" mode |  | ||||||
| - `structured chadcurrency`_ from the ground up |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| Run a func in a process | Run a func in a process | ||||||
|  | @ -138,8 +129,7 @@ Zombie safe: self-destruct a process tree | ||||||
|             print('This process tree will self-destruct in 1 sec...') |             print('This process tree will self-destruct in 1 sec...') | ||||||
|             await trio.sleep(1) |             await trio.sleep(1) | ||||||
| 
 | 
 | ||||||
|             # raise an error in root actor/process and trigger |             # you could have done this yourself | ||||||
|             # reaping of all minions |  | ||||||
|             raise Exception('Self Destructed') |             raise Exception('Self Destructed') | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -156,7 +146,7 @@ it **is a bug**. | ||||||
| 
 | 
 | ||||||
| "Native" multi-process debugging | "Native" multi-process debugging | ||||||
| -------------------------------- | -------------------------------- | ||||||
| Using the magic of `pdbp`_ and our internal IPC, we've | Using the magic of `pdb++`_ and our internal IPC, we've | ||||||
| been able to create a native feeling debugging experience for | been able to create a native feeling debugging experience for | ||||||
| any (sub-)process in your ``tractor`` tree. | any (sub-)process in your ``tractor`` tree. | ||||||
| 
 | 
 | ||||||
|  | @ -209,98 +199,6 @@ And, yes, there's a built-in crash handling mode B) | ||||||
| We're hoping to add a respawn-from-repl system soon! | We're hoping to add a respawn-from-repl system soon! | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| SC compatible bi-directional streaming |  | ||||||
| -------------------------------------- |  | ||||||
| Yes, you saw it here first; we provide 2-way streams |  | ||||||
| with reliable, transitive setup/teardown semantics. |  | ||||||
| 
 |  | ||||||
| Our nascent api is remniscent of ``trio.Nursery.start()`` |  | ||||||
| style invocation: |  | ||||||
| 
 |  | ||||||
| .. code:: python |  | ||||||
| 
 |  | ||||||
|     import trio |  | ||||||
|     import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     @tractor.context |  | ||||||
|     async def simple_rpc( |  | ||||||
| 
 |  | ||||||
|         ctx: tractor.Context, |  | ||||||
|         data: int, |  | ||||||
| 
 |  | ||||||
|     ) -> None: |  | ||||||
|         '''Test a small ping-pong 2-way streaming server. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         # signal to parent that we're up much like |  | ||||||
|         # ``trio_typing.TaskStatus.started()`` |  | ||||||
|         await ctx.started(data + 1) |  | ||||||
| 
 |  | ||||||
|         async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|             count = 0 |  | ||||||
|             async for msg in stream: |  | ||||||
| 
 |  | ||||||
|                 assert msg == 'ping' |  | ||||||
|                 await stream.send('pong') |  | ||||||
|                 count += 1 |  | ||||||
| 
 |  | ||||||
|             else: |  | ||||||
|                 assert count == 10 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     async def main() -> None: |  | ||||||
| 
 |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|             portal = await n.start_actor( |  | ||||||
|                 'rpc_server', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             # XXX: this syntax requires py3.9 |  | ||||||
|             async with ( |  | ||||||
| 
 |  | ||||||
|                 portal.open_context( |  | ||||||
|                     simple_rpc, |  | ||||||
|                     data=10, |  | ||||||
|                 ) as (ctx, sent), |  | ||||||
| 
 |  | ||||||
|                 ctx.open_stream() as stream, |  | ||||||
|             ): |  | ||||||
| 
 |  | ||||||
|                 assert sent == 11 |  | ||||||
| 
 |  | ||||||
|                 count = 0 |  | ||||||
|                 # receive msgs using async for style |  | ||||||
|                 await stream.send('ping') |  | ||||||
| 
 |  | ||||||
|                 async for msg in stream: |  | ||||||
|                     assert msg == 'pong' |  | ||||||
|                     await stream.send('ping') |  | ||||||
|                     count += 1 |  | ||||||
| 
 |  | ||||||
|                     if count >= 9: |  | ||||||
|                         break |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|             # explicitly teardown the daemon-actor |  | ||||||
|             await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     if __name__ == '__main__': |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| See original proposal and discussion in `#53`_ as well |  | ||||||
| as follow up improvements in `#223`_ that we'd love to |  | ||||||
| hear your thoughts on! |  | ||||||
| 
 |  | ||||||
| .. _#53: https://github.com/goodboy/tractor/issues/53 |  | ||||||
| .. _#223: https://github.com/goodboy/tractor/issues/223 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Worker poolz are easy peasy | Worker poolz are easy peasy | ||||||
| --------------------------- | --------------------------- | ||||||
| The initial ask from most new users is *"how do I make a worker | The initial ask from most new users is *"how do I make a worker | ||||||
|  | @ -323,173 +221,9 @@ real time:: | ||||||
| This uses no extra threads, fancy semaphores or futures; all we need | This uses no extra threads, fancy semaphores or futures; all we need | ||||||
| is ``tractor``'s IPC! | is ``tractor``'s IPC! | ||||||
| 
 | 
 | ||||||
| "Infected ``asyncio``" mode |  | ||||||
| --------------------------- |  | ||||||
| Have a bunch of ``asyncio`` code you want to force to be SC at the process level? |  | ||||||
| 
 |  | ||||||
| Check out our experimental system for `guest-mode`_ controlled |  | ||||||
| ``asyncio`` actors: |  | ||||||
| 
 |  | ||||||
| .. code:: python |  | ||||||
| 
 |  | ||||||
|     import asyncio |  | ||||||
|     from statistics import mean |  | ||||||
|     import time |  | ||||||
| 
 |  | ||||||
|     import trio |  | ||||||
|     import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     async def aio_echo_server( |  | ||||||
|         to_trio: trio.MemorySendChannel, |  | ||||||
|         from_trio: asyncio.Queue, |  | ||||||
|     ) -> None: |  | ||||||
| 
 |  | ||||||
|         # a first message must be sent **from** this ``asyncio`` |  | ||||||
|         # task or the ``trio`` side will never unblock from |  | ||||||
|         # ``tractor.to_asyncio.open_channel_from():`` |  | ||||||
|         to_trio.send_nowait('start') |  | ||||||
| 
 |  | ||||||
|         # XXX: this uses an ``from_trio: asyncio.Queue`` currently but we |  | ||||||
|         # should probably offer something better. |  | ||||||
|         while True: |  | ||||||
|             # echo the msg back |  | ||||||
|             to_trio.send_nowait(await from_trio.get()) |  | ||||||
|             await asyncio.sleep(0) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     @tractor.context |  | ||||||
|     async def trio_to_aio_echo_server( |  | ||||||
|         ctx: tractor.Context, |  | ||||||
|     ): |  | ||||||
|         # this will block until the ``asyncio`` task sends a "first" |  | ||||||
|         # message. |  | ||||||
|         async with tractor.to_asyncio.open_channel_from( |  | ||||||
|             aio_echo_server, |  | ||||||
|         ) as (first, chan): |  | ||||||
| 
 |  | ||||||
|             assert first == 'start' |  | ||||||
|             await ctx.started(first) |  | ||||||
| 
 |  | ||||||
|             async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|                 async for msg in stream: |  | ||||||
|                     await chan.send(msg) |  | ||||||
| 
 |  | ||||||
|                     out = await chan.receive() |  | ||||||
|                     # echo back to parent actor-task |  | ||||||
|                     await stream.send(out) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             p = await n.start_actor( |  | ||||||
|                 'aio_server', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
|             async with p.open_context( |  | ||||||
|                 trio_to_aio_echo_server, |  | ||||||
|             ) as (ctx, first): |  | ||||||
| 
 |  | ||||||
|                 assert first == 'start' |  | ||||||
| 
 |  | ||||||
|                 count = 0 |  | ||||||
|                 async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|                     delays = [] |  | ||||||
|                     send = time.time() |  | ||||||
| 
 |  | ||||||
|                     await stream.send(count) |  | ||||||
|                     async for msg in stream: |  | ||||||
|                         recv = time.time() |  | ||||||
|                         delays.append(recv - send) |  | ||||||
|                         assert msg == count |  | ||||||
|                         count += 1 |  | ||||||
|                         send = time.time() |  | ||||||
|                         await stream.send(count) |  | ||||||
| 
 |  | ||||||
|                         if count >= 1e3: |  | ||||||
|                             break |  | ||||||
| 
 |  | ||||||
|             print(f'mean round trip rate (Hz): {1/mean(delays)}') |  | ||||||
|             await p.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     if __name__ == '__main__': |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Yes, we spawn a python process, run ``asyncio``, start ``trio`` on the |  | ||||||
| ``asyncio`` loop, then send commands to the ``trio`` scheduled tasks to |  | ||||||
| tell ``asyncio`` tasks what to do XD |  | ||||||
| 
 |  | ||||||
| We need help refining the `asyncio`-side channel API to be more |  | ||||||
| `trio`-like. Feel free to sling your opinion in `#273`_! |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| .. _#273: https://github.com/goodboy/tractor/issues/273 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Higher level "cluster" APIs |  | ||||||
| --------------------------- |  | ||||||
| To be extra terse the ``tractor`` devs have started hacking some "higher |  | ||||||
| level" APIs for managing actor trees/clusters. These interfaces should |  | ||||||
| generally be condsidered provisional for now but we encourage you to try |  | ||||||
| them and provide feedback. Here's a new API that let's you quickly |  | ||||||
| spawn a flat cluster: |  | ||||||
| 
 |  | ||||||
| .. code:: python |  | ||||||
| 
 |  | ||||||
|     import trio |  | ||||||
|     import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     async def sleepy_jane(): |  | ||||||
|         uid = tractor.current_actor().uid |  | ||||||
|         print(f'Yo i am actor {uid}') |  | ||||||
|         await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         ''' |  | ||||||
|         Spawn a flat actor cluster, with one process per |  | ||||||
|         detected core. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         portal_map: dict[str, tractor.Portal] |  | ||||||
|         results: dict[str, str] |  | ||||||
| 
 |  | ||||||
|         # look at this hip new syntax! |  | ||||||
|         async with ( |  | ||||||
| 
 |  | ||||||
|             tractor.open_actor_cluster( |  | ||||||
|                 modules=[__name__] |  | ||||||
|             ) as portal_map, |  | ||||||
| 
 |  | ||||||
|             trio.open_nursery() as n, |  | ||||||
|         ): |  | ||||||
| 
 |  | ||||||
|             for (name, portal) in portal_map.items(): |  | ||||||
|                 n.start_soon(portal.run, sleepy_jane) |  | ||||||
| 
 |  | ||||||
|             await trio.sleep(0.5) |  | ||||||
| 
 |  | ||||||
|             # kill the cluster with a cancel |  | ||||||
|             raise KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     if __name__ == '__main__': |  | ||||||
|         try: |  | ||||||
|             trio.run(main) |  | ||||||
|         except KeyboardInterrupt: |  | ||||||
|             pass |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| .. _full worker pool re-implementation: https://github.com/goodboy/tractor/blob/master/examples/parallelism/concurrent_actors_primes.py | .. _full worker pool re-implementation: https://github.com/goodboy/tractor/blob/master/examples/parallelism/concurrent_actors_primes.py | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| Install | Install | ||||||
| ------- | ------- | ||||||
| From PyPi:: | From PyPi:: | ||||||
|  | @ -510,77 +244,29 @@ distributed Python. You can think of it as a ``trio`` | ||||||
| stdlib's ``multiprocessing`` but built on async programming primitives | stdlib's ``multiprocessing`` but built on async programming primitives | ||||||
| from the ground up. | from the ground up. | ||||||
| 
 | 
 | ||||||
|  | ``tractor``'s nurseries let you spawn ``trio`` *"actors"*: new Python | ||||||
|  | processes which each run a ``trio`` scheduled runtime - a call to ``trio.run()``. | ||||||
|  | 
 | ||||||
| Don't be scared off by this description. ``tractor`` **is just** ``trio`` | Don't be scared off by this description. ``tractor`` **is just** ``trio`` | ||||||
| but with nurseries for process management and cancel-able streaming IPC. | but with nurseries for process management and cancel-able streaming IPC. | ||||||
| If you understand how to work with ``trio``, ``tractor`` will give you | If you understand how to work with ``trio``, ``tractor`` will give you | ||||||
| the parallelism you may have been needing. | the parallelism you've been missing. | ||||||
| 
 | 
 | ||||||
| 
 | "Actors" communicate by exchanging asynchronous messages_ and avoid | ||||||
| Wait, huh?! I thought "actors" have messages, and mailboxes and stuff?! | sharing state. The intention of this model is to allow for highly | ||||||
| *********************************************************************** | distributed software that, through the adherence to *structured | ||||||
| Let's stop and ask how many canon actor model papers have you actually read ;) | concurrency*, results in systems which fail in predictable and | ||||||
| 
 | recoverable ways. | ||||||
| From our experience many "actor systems" aren't really "actor models" |  | ||||||
| since they **don't adhere** to the `3 axioms`_ and pay even less |  | ||||||
| attention to the problem of *unbounded non-determinism* (which was the |  | ||||||
| whole point for creation of the model in the first place). |  | ||||||
| 
 |  | ||||||
| From the author's mouth, **the only thing required** is `adherance to`_ |  | ||||||
| the `3 axioms`_, *and that's it*. |  | ||||||
| 
 |  | ||||||
| ``tractor`` adheres to said base requirements of an "actor model":: |  | ||||||
| 
 |  | ||||||
|     In response to a message, an actor may: |  | ||||||
| 
 |  | ||||||
|     - send a finite number of new messages |  | ||||||
|     - create a finite number of new actors |  | ||||||
|     - designate a new behavior to process subsequent messages |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| **and** requires *no further api changes* to accomplish this. |  | ||||||
| 
 |  | ||||||
| If you want do debate this further please feel free to chime in on our |  | ||||||
| chat or discuss on one of the following issues *after you've read |  | ||||||
| everything in them*: |  | ||||||
| 
 |  | ||||||
| - https://github.com/goodboy/tractor/issues/210 |  | ||||||
| - https://github.com/goodboy/tractor/issues/18 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Let's clarify our parlance |  | ||||||
| ************************** |  | ||||||
| Whether or not ``tractor`` has "actors" underneath should be mostly |  | ||||||
| irrelevant to users other then for referring to the interactions of our |  | ||||||
| primary runtime primitives: each Python process + ``trio.run()`` |  | ||||||
| + surrounding IPC machinery. These are our high level, base |  | ||||||
| *runtime-units-of-abstraction* which both *are* (as much as they can |  | ||||||
| be in Python) and will be referred to as our *"actors"*. |  | ||||||
| 
 |  | ||||||
| The main goal of ``tractor`` is is to allow for highly distributed |  | ||||||
| software that, through the adherence to *structured concurrency*, |  | ||||||
| results in systems which fail in predictable, recoverable and maybe even |  | ||||||
| understandable ways; being an "actor model" is just one way to describe |  | ||||||
| properties of the system. |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| What's on the TODO: | What's on the TODO: | ||||||
| ------------------- | ------------------- | ||||||
| Help us push toward the future of distributed `Python`. | Help us push toward the future. | ||||||
| 
 | 
 | ||||||
| - Erlang-style supervisors via composed context managers (see `#22 | - (Soon to land) ``asyncio`` support allowing for "infected" actors where | ||||||
|   <https://github.com/goodboy/tractor/issues/22>`_) |   `trio` drives the `asyncio` scheduler via the astounding "`guest mode`_" | ||||||
| - Typed messaging protocols (ex. via ``msgspec.Struct``, see `#36 | - Typed messaging protocols (ex. via ``msgspec``) | ||||||
|   <https://github.com/goodboy/tractor/issues/36>`_) | - Erlang-style supervisors via composed context managers | ||||||
| - Typed capability-based (dialog) protocols ( see `#196 |  | ||||||
|   <https://github.com/goodboy/tractor/issues/196>`_ with draft work |  | ||||||
|   started in `#311 <https://github.com/goodboy/tractor/pull/311>`_) |  | ||||||
| - We **recently disabled CI-testing on windows** and need help getting |  | ||||||
|   it running again! (see `#327 |  | ||||||
|   <https://github.com/goodboy/tractor/pull/327>`_). **We do have windows |  | ||||||
|   support** (and have for quite a while) but since no active hacker |  | ||||||
|   exists in the user-base to help test on that OS, for now we're not |  | ||||||
|   actively maintaining testing due to the added hassle and general |  | ||||||
|   latency.. |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| Feel like saying hi? | Feel like saying hi? | ||||||
|  | @ -592,32 +278,18 @@ say hi, please feel free to reach us in our `matrix channel`_.  If | ||||||
| matrix seems too hip, we're also mostly all in the the `trio gitter | matrix seems too hip, we're also mostly all in the the `trio gitter | ||||||
| channel`_! | channel`_! | ||||||
| 
 | 
 | ||||||
| .. _structured concurrent: https://trio.discourse.group/t/concise-definition-of-structured-concurrency/228 |  | ||||||
| .. _multi-processing: https://en.wikipedia.org/wiki/Multiprocessing |  | ||||||
| .. _trio: https://github.com/python-trio/trio |  | ||||||
| .. _nurseries: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/#nurseries-a-structured-replacement-for-go-statements |  | ||||||
| .. _actor model: https://en.wikipedia.org/wiki/Actor_model |  | ||||||
| .. _trionic: https://trio.readthedocs.io/en/latest/design.html#high-level-design-principles |  | ||||||
| .. _async sandwich: https://trio.readthedocs.io/en/latest/tutorial.html#async-sandwich |  | ||||||
| .. _3 axioms: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=162s |  | ||||||
| .. .. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts |  | ||||||
| .. _adherance to: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=1821s |  | ||||||
| .. _trio gitter channel: https://gitter.im/python-trio/general | .. _trio gitter channel: https://gitter.im/python-trio/general | ||||||
| .. _matrix channel: https://matrix.to/#/!tractor:matrix.org | .. _matrix channel: https://matrix.to/#/!tractor:matrix.org | ||||||
| .. _pdbp: https://github.com/mdmintz/pdbp |  | ||||||
| .. _pdb++: https://github.com/pdbpp/pdbpp | .. _pdb++: https://github.com/pdbpp/pdbpp | ||||||
| .. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops | .. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops | ||||||
| .. _messages: https://en.wikipedia.org/wiki/Message_passing | .. _messages: https://en.wikipedia.org/wiki/Message_passing | ||||||
| .. _trio docs: https://trio.readthedocs.io/en/latest/ | .. _trio docs: https://trio.readthedocs.io/en/latest/ | ||||||
| .. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/ | .. _blog post: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/ | ||||||
| .. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency | .. _structured concurrency: https://vorpus.org/blog/notes-on-structured-concurrency-or-go-statement-considered-harmful/ | ||||||
| .. _structured chadcurrency: https://en.wikipedia.org/wiki/Structured_concurrency | .. _3 axioms: https://en.wikipedia.org/wiki/Actor_model#Fundamental_concepts | ||||||
| .. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency |  | ||||||
| .. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony | .. _unrequirements: https://en.wikipedia.org/wiki/Actor_model#Direct_communication_and_asynchrony | ||||||
| .. _async generators: https://www.python.org/dev/peps/pep-0525/ | .. _async generators: https://www.python.org/dev/peps/pep-0525/ | ||||||
| .. _trio-parallel: https://github.com/richardsheridan/trio-parallel | .. _trio-parallel: https://github.com/richardsheridan/trio-parallel | ||||||
| .. _msgspec: https://jcristharif.com/msgspec/ |  | ||||||
| .. _guest-mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| .. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square | .. |gh_actions| image:: https://img.shields.io/endpoint.svg?url=https%3A%2F%2Factions-badge.atrox.dev%2Fgoodboy%2Ftractor%2Fbadge&style=popout-square | ||||||
|  |  | ||||||
							
								
								
									
										38
									
								
								docs/conf.py
								
								
								
								
							
							
						
						
									
										38
									
								
								docs/conf.py
								
								
								
								
							|  | @ -54,44 +54,28 @@ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] | ||||||
| # The theme to use for HTML and HTML Help pages.  See the documentation for | # The theme to use for HTML and HTML Help pages.  See the documentation for | ||||||
| # a list of builtin themes. | # a list of builtin themes. | ||||||
| # | # | ||||||
| html_theme = 'sphinx_book_theme' | html_theme = 'sphinx_typlog_theme' | ||||||
| 
 | 
 | ||||||
| pygments_style = 'algol_nu' | pygments_style = 'sphinx' | ||||||
| 
 | 
 | ||||||
| # Theme options are theme-specific and customize the look and feel of a theme | # Theme options are theme-specific and customize the look and feel of a theme | ||||||
| # further.  For a list of options available for each theme, see the | # further.  For a list of options available for each theme, see the | ||||||
| # documentation. | # documentation. | ||||||
| html_theme_options = { | html_theme_options = { | ||||||
|     # 'logo': 'tractor_logo_side.svg', |     'logo': 'tractor_logo_side.svg', | ||||||
|     # 'description': 'Structured concurrent "actors"', |     'description': 'Structured concurrent "actors"', | ||||||
|     "repository_url": "https://github.com/goodboy/tractor", |     'github_user': 'goodboy', | ||||||
|     "use_repository_button": True, |     'github_repo': 'tractor', | ||||||
|     "home_page_in_toc": False, |  | ||||||
|     "show_toc_level": 1, |  | ||||||
|     "path_to_docs": "docs", |  | ||||||
| 
 |  | ||||||
| } | } | ||||||
| html_sidebars = { | html_sidebars = { | ||||||
|     "**": [ |     "**": [ | ||||||
|         "sbt-sidebar-nav.html", |         'logo.html', | ||||||
|         # "sidebar-search-bs.html", |         'github.html', | ||||||
|         # 'localtoc.html', |         'relations.html', | ||||||
|     ], |         'searchbox.html' | ||||||
|     #     'logo.html', |     ] | ||||||
|     #     'github.html', |  | ||||||
|     #     'relations.html', |  | ||||||
|     #     'searchbox.html' |  | ||||||
|     # ] |  | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| # doesn't seem to work? |  | ||||||
| # extra_navbar = "<p>nextttt-gennnnn</p>" |  | ||||||
| 
 |  | ||||||
| html_title = '' |  | ||||||
| html_logo = '_static/tractor_logo_side.svg' |  | ||||||
| html_favicon = '_static/tractor_logo_side.svg' |  | ||||||
| # show_navbar_depth = 1 |  | ||||||
| 
 |  | ||||||
| # Add any paths that contain custom static files (such as style sheets) here, | # Add any paths that contain custom static files (such as style sheets) here, | ||||||
| # relative to this directory. They are copied after the builtin static files, | # relative to this directory. They are copied after the builtin static files, | ||||||
| # so a file named "default.css" will overwrite the builtin "default.css". | # so a file named "default.css" will overwrite the builtin "default.css". | ||||||
|  |  | ||||||
|  | @ -1,51 +0,0 @@ | ||||||
| Hot tips for ``tractor`` hackers |  | ||||||
| ================================ |  | ||||||
| 
 |  | ||||||
| This is a WIP guide for newcomers to the project mostly to do with |  | ||||||
| dev, testing, CI and release gotchas, reminders and best practises. |  | ||||||
| 
 |  | ||||||
| ``tractor`` is a fairly novel project compared to most since it is |  | ||||||
| effectively a new way of doing distributed computing in Python and is |  | ||||||
| much closer to working with an "application level runtime" (like erlang |  | ||||||
| OTP or scala's akka project) then it is a traditional Python library. |  | ||||||
| As such, having an arsenal of tools and recipes for figuring out the |  | ||||||
| right way to debug problems when they do arise is somewhat of |  | ||||||
| a necessity. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Making a Release |  | ||||||
| ---------------- |  | ||||||
| We currently do nothing special here except the traditional |  | ||||||
| PyPa release recipe as in `documented by twine`_. I personally |  | ||||||
| create sub-dirs within the generated `dist/` with an explicit |  | ||||||
| release name such as `alpha3/` when there's been a sequence of |  | ||||||
| releases I've made, but it really is up to you how you like to |  | ||||||
| organize generated sdists locally. |  | ||||||
| 
 |  | ||||||
| The resulting build cmds are approximately: |  | ||||||
| 
 |  | ||||||
| .. code:: bash |  | ||||||
| 
 |  | ||||||
|     python setup.py sdist -d ./dist/XXX.X/ |  | ||||||
| 
 |  | ||||||
|     twine upload -r testpypi dist/XXX.X/* |  | ||||||
| 
 |  | ||||||
|     twine upload dist/XXX.X/* |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| .. _documented by twine: https://twine.readthedocs.io/en/latest/#using-twine |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Debugging and monitoring actor trees |  | ||||||
| ------------------------------------ |  | ||||||
| TODO: but there are tips in the readme for some terminal commands |  | ||||||
| which can be used to see the process trees easily on Linux. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| Using the log system to trace `trio` task flow |  | ||||||
| ---------------------------------------------- |  | ||||||
| TODO: the logging system is meant to be oriented around |  | ||||||
| stack "layers" of the runtime such that you can track |  | ||||||
| "logical abstraction layers" in the code such as errors, cancellation, |  | ||||||
| IPC and streaming, and the low level transport and wire protocols. |  | ||||||
|  | @ -3,13 +3,12 @@ | ||||||
|    You can adapt this file completely to your liking, but it should at least |    You can adapt this file completely to your liking, but it should at least | ||||||
|    contain the root `toctree` directive. |    contain the root `toctree` directive. | ||||||
| 
 | 
 | ||||||
| ``tractor`` | tractor | ||||||
| =========== | ======= | ||||||
| 
 |  | ||||||
| A `structured concurrent`_, async-native "`actor model`_" built on trio_ and multiprocessing_. | A `structured concurrent`_, async-native "`actor model`_" built on trio_ and multiprocessing_. | ||||||
| 
 | 
 | ||||||
| .. toctree:: | .. toctree:: | ||||||
|    :maxdepth: 1 |    :maxdepth: 2 | ||||||
|    :caption: Contents: |    :caption: Contents: | ||||||
| 
 | 
 | ||||||
| .. _actor model: https://en.wikipedia.org/wiki/Actor_model | .. _actor model: https://en.wikipedia.org/wiki/Actor_model | ||||||
|  | @ -59,6 +58,8 @@ say hi, please feel free to ping me on the `trio gitter channel`_! | ||||||
| .. _trio gitter channel: https://gitter.im/python-trio/general | .. _trio gitter channel: https://gitter.im/python-trio/general | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | .. contents:: | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| Philosophy | Philosophy | ||||||
| ---------- | ---------- | ||||||
|  | @ -144,7 +145,7 @@ and use the ``run_in_actor()`` method: | ||||||
| 
 | 
 | ||||||
| What's going on? | What's going on? | ||||||
| 
 | 
 | ||||||
| - an initial *actor* is started with ``trio.run()`` and told to execute | - an initial *actor* is started with ``tractor.run()`` and told to execute | ||||||
|   its main task_: ``main()`` |   its main task_: ``main()`` | ||||||
| 
 | 
 | ||||||
| - inside ``main()`` an actor is *spawned* using an ``ActorNusery`` and is told | - inside ``main()`` an actor is *spawned* using an ``ActorNusery`` and is told | ||||||
|  | @ -181,7 +182,7 @@ Here is a similar example using the latter method: | ||||||
| 
 | 
 | ||||||
| .. literalinclude:: ../examples/actor_spawning_and_causality_with_daemon.py | .. literalinclude:: ../examples/actor_spawning_and_causality_with_daemon.py | ||||||
| 
 | 
 | ||||||
| The ``enable_modules`` `kwarg` above is a list of module path | The ``rpc_module_paths`` `kwarg` above is a list of module path | ||||||
| strings that will be loaded and made accessible for execution in the | strings that will be loaded and made accessible for execution in the | ||||||
| remote actor through a call to ``Portal.run()``. For now this is | remote actor through a call to ``Portal.run()``. For now this is | ||||||
| a simple mechanism to restrict the functionality of the remote | a simple mechanism to restrict the functionality of the remote | ||||||
|  | @ -396,7 +397,7 @@ tasks spawned via multiple RPC calls to an actor can modify | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|         # a per process cache |         # a per process cache | ||||||
|         _actor_cache: dict[str, bool] = {} |         _actor_cache: Dict[str, bool] = {} | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|         def ping_endpoints(endpoints: List[str]): |         def ping_endpoints(endpoints: List[str]): | ||||||
|  | @ -457,7 +458,7 @@ find an actor's socket address by name use the ``find_actor()`` function: | ||||||
| .. literalinclude:: ../examples/service_discovery.py | .. literalinclude:: ../examples/service_discovery.py | ||||||
| 
 | 
 | ||||||
| The ``name`` value you should pass to ``find_actor()`` is the one you passed as the | The ``name`` value you should pass to ``find_actor()`` is the one you passed as the | ||||||
| *first* argument to either ``trio.run()`` or ``ActorNursery.start_actor()``. | *first* argument to either ``tractor.run()`` or ``ActorNursery.start_actor()``. | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| Running actors standalone | Running actors standalone | ||||||
|  | @ -471,17 +472,7 @@ need to hop into a debugger. You just need to pass the existing | ||||||
| 
 | 
 | ||||||
| .. code:: python | .. code:: python | ||||||
| 
 | 
 | ||||||
|     import trio |     tractor.run(main, arbiter_addr=('192.168.0.10', 1616)) | ||||||
|     import tractor |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         async with tractor.open_root_actor( |  | ||||||
|             arbiter_addr=('192.168.0.10', 1616) |  | ||||||
|         ): |  | ||||||
|             await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| Choosing a process spawning backend | Choosing a process spawning backend | ||||||
|  | @ -489,7 +480,7 @@ Choosing a process spawning backend | ||||||
| ``tractor`` is architected to support multiple actor (sub-process) | ``tractor`` is architected to support multiple actor (sub-process) | ||||||
| spawning backends. Specific defaults are chosen based on your system | spawning backends. Specific defaults are chosen based on your system | ||||||
| but you can also explicitly select a backend of choice at startup | but you can also explicitly select a backend of choice at startup | ||||||
| via a ``start_method`` kwarg to ``tractor.open_nursery()``. | via a ``start_method`` kwarg to ``tractor.run()``. | ||||||
| 
 | 
 | ||||||
| Currently the options available are: | Currently the options available are: | ||||||
| 
 | 
 | ||||||
|  | @ -545,14 +536,13 @@ main python module of the program: | ||||||
| .. code:: python | .. code:: python | ||||||
| 
 | 
 | ||||||
|     # application/__main__.py |     # application/__main__.py | ||||||
|     import trio |  | ||||||
|     import tractor |     import tractor | ||||||
|     import multiprocessing |     import multiprocessing | ||||||
|     from . import tractor_app |     from . import tractor_app | ||||||
| 
 | 
 | ||||||
|     if __name__ == '__main__': |     if __name__ == '__main__': | ||||||
|         multiprocessing.freeze_support() |         multiprocessing.freeze_support() | ||||||
|         trio.run(tractor_app.main) |         tractor.run(tractor_app.main) | ||||||
| 
 | 
 | ||||||
| And execute as:: | And execute as:: | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -16,4 +16,4 @@ if __name__ == '__main__': | ||||||
|     # temporary dir and name it test_example.py. We import that script |     # temporary dir and name it test_example.py. We import that script | ||||||
|     # module here and invoke it's ``main()``. |     # module here and invoke it's ``main()``. | ||||||
|     from . import test_example |     from . import test_example | ||||||
|     test_example.trio.run(test_example.main) |     test_example.tractor.run(test_example.main, start_method='spawn') | ||||||
|  |  | ||||||
|  | @ -1,4 +1,3 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| _this_module = __name__ | _this_module = __name__ | ||||||
|  | @ -41,4 +40,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main) | ||||||
|  |  | ||||||
|  | @ -1,8 +1,7 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def cellar_door(): | def cellar_door(): | ||||||
|     assert not tractor.is_root_process() |     assert not tractor.is_root_process() | ||||||
|     return "Dang that's beautiful" |     return "Dang that's beautiful" | ||||||
| 
 | 
 | ||||||
|  | @ -24,4 +23,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main) | ||||||
|  |  | ||||||
|  | @ -1,8 +1,7 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def movie_theatre_question(): | def movie_theatre_question(): | ||||||
|     """A question asked in a dark theatre, in a tangent |     """A question asked in a dark theatre, in a tangent | ||||||
|     (errr, I mean different) process. |     (errr, I mean different) process. | ||||||
|     """ |     """ | ||||||
|  | @ -17,7 +16,7 @@ async def main(): | ||||||
|         portal = await n.start_actor( |         portal = await n.start_actor( | ||||||
|             'frank', |             'frank', | ||||||
|             # enable the actor to run funcs from this current module |             # enable the actor to run funcs from this current module | ||||||
|             enable_modules=[__name__], |             rpc_module_paths=[__name__], | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|         print(await portal.run(movie_theatre_question)) |         print(await portal.run(movie_theatre_question)) | ||||||
|  | @ -31,4 +30,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main) | ||||||
|  |  | ||||||
|  | @ -1,151 +0,0 @@ | ||||||
| ''' |  | ||||||
| Complex edge case where during real-time streaming the IPC tranport |  | ||||||
| channels are wiped out (purposely in this example though it could have |  | ||||||
| been an outage) and we want to ensure that despite being in debug mode |  | ||||||
| (or not) the user can sent SIGINT once they notice the hang and the |  | ||||||
| actor tree will eventually be cancelled without leaving any zombies. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| import trio |  | ||||||
| from tractor import ( |  | ||||||
|     open_nursery, |  | ||||||
|     context, |  | ||||||
|     Context, |  | ||||||
|     MsgStream, |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def break_channel_silently_then_error( |  | ||||||
|     stream: MsgStream, |  | ||||||
| ): |  | ||||||
|     async for msg in stream: |  | ||||||
|         await stream.send(msg) |  | ||||||
| 
 |  | ||||||
|         # XXX: close the channel right after an error is raised |  | ||||||
|         # purposely breaking the IPC transport to make sure the parent |  | ||||||
|         # doesn't get stuck in debug or hang on the connection join. |  | ||||||
|         # this more or less simulates an infinite msg-receive hang on |  | ||||||
|         # the other end. |  | ||||||
|         await stream._ctx.chan.send(None) |  | ||||||
|         assert 0 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def close_stream_and_error( |  | ||||||
|     stream: MsgStream, |  | ||||||
| ): |  | ||||||
|     async for msg in stream: |  | ||||||
|         await stream.send(msg) |  | ||||||
| 
 |  | ||||||
|         # wipe out channel right before raising |  | ||||||
|         await stream._ctx.chan.send(None) |  | ||||||
|         await stream.aclose() |  | ||||||
|         assert 0 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @context |  | ||||||
| async def recv_and_spawn_net_killers( |  | ||||||
| 
 |  | ||||||
|     ctx: Context, |  | ||||||
|     break_ipc_after: bool | int = False, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Receive stream msgs and spawn some IPC killers mid-stream. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
|     async with ( |  | ||||||
|         ctx.open_stream() as stream, |  | ||||||
|         trio.open_nursery() as n, |  | ||||||
|     ): |  | ||||||
|         async for i in stream: |  | ||||||
|             print(f'child echoing {i}') |  | ||||||
|             await stream.send(i) |  | ||||||
|             if ( |  | ||||||
|                 break_ipc_after |  | ||||||
|                 and i > break_ipc_after |  | ||||||
|             ): |  | ||||||
|                 '#################################\n' |  | ||||||
|                 'Simulating child-side IPC BREAK!\n' |  | ||||||
|                 '#################################' |  | ||||||
|                 n.start_soon(break_channel_silently_then_error, stream) |  | ||||||
|                 n.start_soon(close_stream_and_error, stream) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main( |  | ||||||
|     debug_mode: bool = False, |  | ||||||
|     start_method: str = 'trio', |  | ||||||
| 
 |  | ||||||
|     # by default we break the parent IPC first (if configured to break |  | ||||||
|     # at all), but this can be changed so the child does first (even if |  | ||||||
|     # both are set to break). |  | ||||||
|     break_parent_ipc_after: int | bool = False, |  | ||||||
|     break_child_ipc_after: int | bool = False, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     async with ( |  | ||||||
|         open_nursery( |  | ||||||
|             start_method=start_method, |  | ||||||
| 
 |  | ||||||
|             # NOTE: even debugger is used we shouldn't get |  | ||||||
|             # a hang since it never engages due to broken IPC |  | ||||||
|             debug_mode=debug_mode, |  | ||||||
|             loglevel='warning', |  | ||||||
| 
 |  | ||||||
|         ) as an, |  | ||||||
|     ): |  | ||||||
|         portal = await an.start_actor( |  | ||||||
|             'chitty_hijo', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         async with portal.open_context( |  | ||||||
|             recv_and_spawn_net_killers, |  | ||||||
|             break_ipc_after=break_child_ipc_after, |  | ||||||
| 
 |  | ||||||
|         ) as (ctx, sent): |  | ||||||
|             async with ctx.open_stream() as stream: |  | ||||||
|                 for i in range(1000): |  | ||||||
| 
 |  | ||||||
|                     if ( |  | ||||||
|                         break_parent_ipc_after |  | ||||||
|                         and i > break_parent_ipc_after |  | ||||||
|                     ): |  | ||||||
|                         print( |  | ||||||
|                             '#################################\n' |  | ||||||
|                             'Simulating parent-side IPC BREAK!\n' |  | ||||||
|                             '#################################' |  | ||||||
|                         ) |  | ||||||
|                         await stream._ctx.chan.send(None) |  | ||||||
| 
 |  | ||||||
|                     # it actually breaks right here in the |  | ||||||
|                     # mp_spawn/forkserver backends and thus the zombie |  | ||||||
|                     # reaper never even kicks in? |  | ||||||
|                     print(f'parent sending {i}') |  | ||||||
|                     await stream.send(i) |  | ||||||
| 
 |  | ||||||
|                     with trio.move_on_after(2) as cs: |  | ||||||
| 
 |  | ||||||
|                         # NOTE: in the parent side IPC failure case this |  | ||||||
|                         # will raise an ``EndOfChannel`` after the child |  | ||||||
|                         # is killed and sends a stop msg back to it's |  | ||||||
|                         # caller/this-parent. |  | ||||||
|                         rx = await stream.receive() |  | ||||||
| 
 |  | ||||||
|                         print(f"I'm a happy user and echoed to me is {rx}") |  | ||||||
| 
 |  | ||||||
|                     if cs.cancelled_caught: |  | ||||||
|                         # pretend to be a user seeing no streaming action |  | ||||||
|                         # thinking it's a hang, and then hitting ctl-c.. |  | ||||||
|                         print("YOO i'm a user anddd thingz hangin..") |  | ||||||
| 
 |  | ||||||
|                 print( |  | ||||||
|                     "YOO i'm mad send side dun but thingz hangin..\n" |  | ||||||
|                     'MASHING CTlR-C Ctl-c..' |  | ||||||
|                 ) |  | ||||||
|                 raise KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -1,42 +1,36 @@ | ||||||
| from typing import AsyncIterator |  | ||||||
| from itertools import repeat | from itertools import repeat | ||||||
| 
 |  | ||||||
| import trio | import trio | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
|  | tractor.log.get_console_log("INFO") | ||||||
| 
 | 
 | ||||||
| async def stream_forever() -> AsyncIterator[int]: |  | ||||||
| 
 | 
 | ||||||
|  | async def stream_forever(): | ||||||
|     for i in repeat("I can see these little future bubble things"): |     for i in repeat("I can see these little future bubble things"): | ||||||
|         # each yielded value is sent over the ``Channel`` to the parent actor |         # each yielded value is sent over the ``Channel`` to the | ||||||
|  |         # parent actor | ||||||
|         yield i |         yield i | ||||||
|         await trio.sleep(0.01) |         await trio.sleep(0.01) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def main(): | async def main(): | ||||||
| 
 |     # stream for at most 1 seconds | ||||||
|  |     with trio.move_on_after(1) as cancel_scope: | ||||||
|         async with tractor.open_nursery() as n: |         async with tractor.open_nursery() as n: | ||||||
| 
 |  | ||||||
|             portal = await n.start_actor( |             portal = await n.start_actor( | ||||||
|             'donny', |                 f'donny', | ||||||
|             enable_modules=[__name__], |                 rpc_module_paths=[__name__], | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|             # this async for loop streams values from the above |             # this async for loop streams values from the above | ||||||
|             # async generator running in a separate process |             # async generator running in a separate process | ||||||
|         async with portal.open_stream_from(stream_forever) as stream: |             async for letter in await portal.run(stream_forever): | ||||||
|             count = 0 |  | ||||||
|             async for letter in stream: |  | ||||||
|                 print(letter) |                 print(letter) | ||||||
|                 count += 1 |  | ||||||
| 
 | 
 | ||||||
|                 if count > 50: |     # we support trio's cancellation system | ||||||
|                     break |     assert cancel_scope.cancelled_caught | ||||||
| 
 |     assert n.cancelled | ||||||
|         print('stream terminated') |  | ||||||
| 
 |  | ||||||
|         await portal.cancel_actor() |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main) | ||||||
|  |  | ||||||
|  | @ -1,54 +0,0 @@ | ||||||
| ''' |  | ||||||
| Fast fail test with a context. |  | ||||||
| 
 |  | ||||||
| Ensure the partially initialized sub-actor process |  | ||||||
| doesn't cause a hang on error/cancel of the parent |  | ||||||
| nursery. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def sleep( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ): |  | ||||||
|     await trio.sleep(0.5) |  | ||||||
|     await ctx.started() |  | ||||||
|     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def open_ctx( |  | ||||||
|     n: tractor._supervise.ActorNursery |  | ||||||
| ): |  | ||||||
| 
 |  | ||||||
|     # spawn both actors |  | ||||||
|     portal = await n.start_actor( |  | ||||||
|         name='sleeper', |  | ||||||
|         enable_modules=[__name__], |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     async with portal.open_context( |  | ||||||
|         sleep, |  | ||||||
|     ) as (ctx, first): |  | ||||||
|         assert first is None |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main(): |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery( |  | ||||||
|         debug_mode=True, |  | ||||||
|         loglevel='runtime', |  | ||||||
|     ) as an: |  | ||||||
| 
 |  | ||||||
|         async with trio.open_nursery() as n: |  | ||||||
|             n.start_soon(open_ctx, an) |  | ||||||
| 
 |  | ||||||
|             await trio.sleep(0.2) |  | ||||||
|             await trio.sleep(0.1) |  | ||||||
|             assert 0 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -11,7 +11,7 @@ async def breakpoint_forever(): | ||||||
| 
 | 
 | ||||||
| async def name_error(): | async def name_error(): | ||||||
|     "Raise a ``NameError``" |     "Raise a ``NameError``" | ||||||
|     getattr(doggypants)  # noqa |     getattr(doggypants) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def main(): | async def main(): | ||||||
|  | @ -26,18 +26,7 @@ async def main(): | ||||||
|         p1 = await n.start_actor('name_error', enable_modules=[__name__]) |         p1 = await n.start_actor('name_error', enable_modules=[__name__]) | ||||||
| 
 | 
 | ||||||
|         # retreive results |         # retreive results | ||||||
|         async with p0.open_stream_from(breakpoint_forever) as stream: |         stream = await p0.run(breakpoint_forever) | ||||||
| 
 |  | ||||||
|             # triggers the first name error |  | ||||||
|             try: |  | ||||||
|                 await p1.run(name_error) |  | ||||||
|             except tractor.RemoteActorError as rae: |  | ||||||
|                 assert rae.type is NameError |  | ||||||
| 
 |  | ||||||
|             async for i in stream: |  | ||||||
| 
 |  | ||||||
|                 # a second time try the failing subactor and this tie |  | ||||||
|                 # let error propagate up to the parent/nursery. |  | ||||||
|         await p1.run(name_error) |         await p1.run(name_error) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -4,7 +4,7 @@ import tractor | ||||||
| 
 | 
 | ||||||
| async def name_error(): | async def name_error(): | ||||||
|     "Raise a ``NameError``" |     "Raise a ``NameError``" | ||||||
|     getattr(doggypants)  # noqa |     getattr(doggypants) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def breakpoint_forever(): | async def breakpoint_forever(): | ||||||
|  | @ -12,31 +12,18 @@ async def breakpoint_forever(): | ||||||
|     while True: |     while True: | ||||||
|         await tractor.breakpoint() |         await tractor.breakpoint() | ||||||
| 
 | 
 | ||||||
|         # NOTE: if the test never sent 'q'/'quit' commands |  | ||||||
|         # on the pdb repl, without this checkpoint line the |  | ||||||
|         # repl would spin in this actor forever. |  | ||||||
|         # await trio.sleep(0) |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| async def spawn_until(depth=0): | async def spawn_until(depth=0): | ||||||
|     """"A nested nursery that triggers another ``NameError``. |     """"A nested nursery that triggers another ``NameError``. | ||||||
|     """ |     """ | ||||||
|     async with tractor.open_nursery() as n: |     async with tractor.open_nursery() as n: | ||||||
|         if depth < 1: |         if depth < 1: | ||||||
| 
 |             # await n.run_in_actor('breakpoint_forever', breakpoint_forever) | ||||||
|             await n.run_in_actor(breakpoint_forever) |             await n.run_in_actor( | ||||||
| 
 |  | ||||||
|             p = await n.run_in_actor( |  | ||||||
|                 name_error, |                 name_error, | ||||||
|                 name='name_error' |                 name='name_error' | ||||||
|             ) |             ) | ||||||
|             await trio.sleep(0.5) |  | ||||||
|             # rx and propagate error from child |  | ||||||
|             await p.result() |  | ||||||
| 
 |  | ||||||
|         else: |         else: | ||||||
|             # recusrive call to spawn another process branching layer of |  | ||||||
|             # the tree |  | ||||||
|             depth -= 1 |             depth -= 1 | ||||||
|             await n.run_in_actor( |             await n.run_in_actor( | ||||||
|                 spawn_until, |                 spawn_until, | ||||||
|  | @ -66,7 +53,6 @@ async def main(): | ||||||
|     """ |     """ | ||||||
|     async with tractor.open_nursery( |     async with tractor.open_nursery( | ||||||
|         debug_mode=True, |         debug_mode=True, | ||||||
|         # loglevel='cancel', |  | ||||||
|     ) as n: |     ) as n: | ||||||
| 
 | 
 | ||||||
|         # spawn both actors |         # spawn both actors | ||||||
|  | @ -81,16 +67,8 @@ async def main(): | ||||||
|             name='spawner1', |             name='spawner1', | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|         # TODO: test this case as well where the parent don't see |  | ||||||
|         # the sub-actor errors by default and instead expect a user |  | ||||||
|         # ctrl-c to kill the root. |  | ||||||
|         with trio.move_on_after(3): |  | ||||||
|             await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|         # gah still an issue here. |         # gah still an issue here. | ||||||
|         await portal.result() |         await portal.result() | ||||||
| 
 |  | ||||||
|         # should never get here |  | ||||||
|         await portal1.result() |         await portal1.result() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,15 +1,9 @@ | ||||||
| ''' |  | ||||||
| Test that a nested nursery will avoid clobbering |  | ||||||
| the debugger latched by a broken child. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def name_error(): | async def name_error(): | ||||||
|     "Raise a ``NameError``" |     "Raise a ``NameError``" | ||||||
|     getattr(doggypants)  # noqa |     getattr(doggypants) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def spawn_error(): | async def spawn_error(): | ||||||
|  | @ -38,10 +32,7 @@ async def main(): | ||||||
|         - root actor should then fail on assert |         - root actor should then fail on assert | ||||||
|         - program termination |         - program termination | ||||||
|     """ |     """ | ||||||
|     async with tractor.open_nursery( |     async with tractor.open_nursery() as n: | ||||||
|         debug_mode=True, |  | ||||||
|         # loglevel='cancel', |  | ||||||
|     ) as n: |  | ||||||
| 
 | 
 | ||||||
|         # spawn both actors |         # spawn both actors | ||||||
|         portal = await n.run_in_actor( |         portal = await n.run_in_actor( | ||||||
|  | @ -63,4 +54,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main, debug_mode=True) | ||||||
|  |  | ||||||
|  | @ -11,7 +11,7 @@ async def breakpoint_forever(): | ||||||
| 
 | 
 | ||||||
| async def name_error(): | async def name_error(): | ||||||
|     "Raise a ``NameError``" |     "Raise a ``NameError``" | ||||||
|     getattr(doggypants)  # noqa |     getattr(doggypants) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def spawn_error(): | async def spawn_error(): | ||||||
|  | @ -36,9 +36,7 @@ async def main(): | ||||||
|     `-python -m tractor._child --uid ('spawn_error', '52ee14a5 ...) |     `-python -m tractor._child --uid ('spawn_error', '52ee14a5 ...) | ||||||
|        `-python -m tractor._child --uid ('name_error', '3391222c ...) |        `-python -m tractor._child --uid ('name_error', '3391222c ...) | ||||||
|     """ |     """ | ||||||
|     async with tractor.open_nursery( |     async with tractor.open_nursery() as n: | ||||||
|         debug_mode=True, |  | ||||||
|     ) as n: |  | ||||||
| 
 | 
 | ||||||
|         # Spawn both actors, don't bother with collecting results |         # Spawn both actors, don't bother with collecting results | ||||||
|         # (would result in a different debugger outcome due to parent's |         # (would result in a different debugger outcome due to parent's | ||||||
|  | @ -49,4 +47,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main, debug_mode=True) | ||||||
|  |  | ||||||
|  | @ -1,40 +0,0 @@ | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def just_sleep( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
|     **kwargs, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Start and sleep. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
|     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main() -> None: |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery( |  | ||||||
|         debug_mode=True, |  | ||||||
|     ) as n: |  | ||||||
|         portal = await n.start_actor( |  | ||||||
|             'ctx_child', |  | ||||||
| 
 |  | ||||||
|             # XXX: we don't enable the current module in order |  | ||||||
|             # to trigger `ModuleNotFound`. |  | ||||||
|             enable_modules=[], |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         async with portal.open_context( |  | ||||||
|             just_sleep,  # taken from pytest parameterization |  | ||||||
|         ) as (ctx, sent): |  | ||||||
|             raise KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -1,27 +0,0 @@ | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| async def die(): |  | ||||||
|     raise RuntimeError |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main(): |  | ||||||
|     async with tractor.open_nursery() as tn: |  | ||||||
| 
 |  | ||||||
|         debug_actor = await tn.start_actor( |  | ||||||
|             'debugged_boi', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|             debug_mode=True, |  | ||||||
|         ) |  | ||||||
|         crash_boi = await tn.start_actor( |  | ||||||
|             'crash_boi', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|             # debug_mode=True, |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         async with trio.open_nursery() as n: |  | ||||||
|             n.start_soon(debug_actor.run, die) |  | ||||||
|             n.start_soon(crash_boi.run, die) |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -1,24 +0,0 @@ | ||||||
| import os |  | ||||||
| import sys |  | ||||||
| 
 |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main() -> None: |  | ||||||
|     async with tractor.open_nursery(debug_mode=True) as an: |  | ||||||
| 
 |  | ||||||
|         assert os.environ['PYTHONBREAKPOINT'] == 'tractor._debug._set_trace' |  | ||||||
| 
 |  | ||||||
|         # TODO: an assert that verifies the hook has indeed been, hooked |  | ||||||
|         # XD |  | ||||||
|         assert sys.breakpointhook is not tractor._debug._set_trace |  | ||||||
| 
 |  | ||||||
|         breakpoint() |  | ||||||
| 
 |  | ||||||
|     # TODO: an assert that verifies the hook is unhooked.. |  | ||||||
|     assert sys.breakpointhook |  | ||||||
|     breakpoint() |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -4,10 +4,6 @@ import tractor | ||||||
| 
 | 
 | ||||||
| async def main(): | async def main(): | ||||||
| 
 | 
 | ||||||
|     async with tractor.open_root_actor( |  | ||||||
|         debug_mode=True, |  | ||||||
|     ): |  | ||||||
| 
 |  | ||||||
|     await trio.sleep(0.1) |     await trio.sleep(0.1) | ||||||
| 
 | 
 | ||||||
|     await tractor.breakpoint() |     await tractor.breakpoint() | ||||||
|  | @ -16,4 +12,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main, debug_mode=True) | ||||||
|  |  | ||||||
|  | @ -1,15 +1,11 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def main(): | async def main(): | ||||||
| 
 | 
 | ||||||
|     async with tractor.open_root_actor( |  | ||||||
|         debug_mode=True, |  | ||||||
|     ): |  | ||||||
|     while True: |     while True: | ||||||
|         await tractor.breakpoint() |         await tractor.breakpoint() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main, debug_mode=True) | ||||||
|  |  | ||||||
|  | @ -1,13 +1,9 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def main(): | async def main(): | ||||||
|     async with tractor.open_root_actor( |  | ||||||
|         debug_mode=True, |  | ||||||
|     ): |  | ||||||
|     assert 0 |     assert 0 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main, debug_mode=True) | ||||||
|  |  | ||||||
|  | @ -1,10 +1,9 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def name_error(): | async def name_error(): | ||||||
|     "Raise a ``NameError``" |     "Raise a ``NameError``" | ||||||
|     getattr(doggypants)  # noqa |     getattr(doggypants) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def spawn_until(depth=0): | async def spawn_until(depth=0): | ||||||
|  | @ -38,10 +37,7 @@ async def main(): | ||||||
|        └─ python -m tractor._child --uid ('name_error', '6c2733b8 ...) |        └─ python -m tractor._child --uid ('name_error', '6c2733b8 ...) | ||||||
| 
 | 
 | ||||||
|     """ |     """ | ||||||
|     async with tractor.open_nursery( |     async with tractor.open_nursery() as n: | ||||||
|         debug_mode=True, |  | ||||||
|         loglevel='warning' |  | ||||||
|     ) as n: |  | ||||||
| 
 | 
 | ||||||
|         # spawn both actors |         # spawn both actors | ||||||
|         portal = await n.run_in_actor( |         portal = await n.run_in_actor( | ||||||
|  | @ -62,4 +58,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main, debug_mode=True, loglevel='warning') | ||||||
|  |  | ||||||
|  | @ -1,31 +0,0 @@ | ||||||
| 
 |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def key_error(): |  | ||||||
|     "Raise a ``NameError``" |  | ||||||
|     return {}['doggy'] |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main(): |  | ||||||
|     """Root dies  |  | ||||||
| 
 |  | ||||||
|     """ |  | ||||||
|     async with tractor.open_nursery( |  | ||||||
|         debug_mode=True, |  | ||||||
|         loglevel='debug' |  | ||||||
|     ) as n: |  | ||||||
| 
 |  | ||||||
|         # spawn both actors |  | ||||||
|         portal = await n.run_in_actor(key_error) |  | ||||||
| 
 |  | ||||||
|         # XXX: originally a bug caused by this is where root would enter |  | ||||||
|         # the debugger and clobber the tty used by the repl even though |  | ||||||
|         # child should have it locked. |  | ||||||
|         with trio.fail_after(1): |  | ||||||
|             await trio.Event().wait() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -1,50 +0,0 @@ | ||||||
| import tractor |  | ||||||
| import trio |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def gen(): |  | ||||||
|     yield 'yo' |  | ||||||
|     await tractor.breakpoint() |  | ||||||
|     yield 'yo' |  | ||||||
|     await tractor.breakpoint() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def just_bp( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     await ctx.started() |  | ||||||
|     await tractor.breakpoint() |  | ||||||
| 
 |  | ||||||
|     # TODO: bps and errors in this call.. |  | ||||||
|     async for val in gen(): |  | ||||||
|         print(val) |  | ||||||
| 
 |  | ||||||
|     # await trio.sleep(0.5) |  | ||||||
| 
 |  | ||||||
|     # prematurely destroy the connection |  | ||||||
|     await ctx.chan.aclose() |  | ||||||
| 
 |  | ||||||
|     # THIS CAUSES AN UNRECOVERABLE HANG |  | ||||||
|     # without latest ``pdbpp``: |  | ||||||
|     assert 0 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main(): |  | ||||||
|     async with tractor.open_nursery( |  | ||||||
|         debug_mode=True, |  | ||||||
|     ) as n: |  | ||||||
|         p = await n.start_actor( |  | ||||||
|             'bp_boi', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
|         async with p.open_context( |  | ||||||
|             just_bp, |  | ||||||
|         ) as (ctx, first): |  | ||||||
|             await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -12,9 +12,7 @@ async def breakpoint_forever(): | ||||||
| 
 | 
 | ||||||
| async def main(): | async def main(): | ||||||
| 
 | 
 | ||||||
|     async with tractor.open_nursery( |     async with tractor.open_nursery() as n: | ||||||
|         debug_mode=True, |  | ||||||
|     ) as n: |  | ||||||
| 
 | 
 | ||||||
|         portal = await n.run_in_actor( |         portal = await n.run_in_actor( | ||||||
|             breakpoint_forever, |             breakpoint_forever, | ||||||
|  | @ -23,4 +21,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main, debug_mode=True) | ||||||
|  |  | ||||||
|  | @ -1,4 +1,3 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -7,13 +6,11 @@ async def name_error(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def main(): | async def main(): | ||||||
|     async with tractor.open_nursery( |     async with tractor.open_nursery() as n: | ||||||
|         debug_mode=True, |  | ||||||
|     ) as n: |  | ||||||
| 
 | 
 | ||||||
|         portal = await n.run_in_actor(name_error) |         portal = await n.run_in_actor(name_error) | ||||||
|         await portal.result() |         await portal.result() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main) |     tractor.run(main, debug_mode=True) | ||||||
|  |  | ||||||
|  | @ -7,7 +7,7 @@ import tractor | ||||||
| async def stream_data(seed): | async def stream_data(seed): | ||||||
|     for i in range(seed): |     for i in range(seed): | ||||||
|         yield i |         yield i | ||||||
|         await trio.sleep(0.0001)  # trigger scheduler |         await trio.sleep(0)  # trigger scheduler | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # this is the third actor; the aggregator | # this is the third actor; the aggregator | ||||||
|  | @ -21,7 +21,7 @@ async def aggregate(seed): | ||||||
|             # fork point |             # fork point | ||||||
|             portal = await nursery.start_actor( |             portal = await nursery.start_actor( | ||||||
|                 name=f'streamer_{i}', |                 name=f'streamer_{i}', | ||||||
|                 enable_modules=[__name__], |                 rpc_module_paths=[__name__], | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|             portals.append(portal) |             portals.append(portal) | ||||||
|  | @ -29,11 +29,8 @@ async def aggregate(seed): | ||||||
|         send_chan, recv_chan = trio.open_memory_channel(500) |         send_chan, recv_chan = trio.open_memory_channel(500) | ||||||
| 
 | 
 | ||||||
|         async def push_to_chan(portal, send_chan): |         async def push_to_chan(portal, send_chan): | ||||||
| 
 |  | ||||||
|             # TODO: https://github.com/goodboy/tractor/issues/207 |  | ||||||
|             async with send_chan: |             async with send_chan: | ||||||
|                 async with portal.open_stream_from(stream_data, seed=seed) as stream: |                 async for value in await portal.run(stream_data, seed=seed): | ||||||
|                     async for value in stream: |  | ||||||
|                     # leverage trio's built-in backpressure |                     # leverage trio's built-in backpressure | ||||||
|                     await send_chan.send(value) |                     await send_chan.send(value) | ||||||
| 
 | 
 | ||||||
|  | @ -68,32 +65,25 @@ async def aggregate(seed): | ||||||
| # this is the main actor and *arbiter* | # this is the main actor and *arbiter* | ||||||
| async def main(): | async def main(): | ||||||
|     # a nursery which spawns "actors" |     # a nursery which spawns "actors" | ||||||
|     async with tractor.open_nursery( |     async with tractor.open_nursery() as nursery: | ||||||
|         arbiter_addr=('127.0.0.1', 1616) |  | ||||||
|     ) as nursery: |  | ||||||
| 
 | 
 | ||||||
|         seed = int(1e3) |         seed = int(1e3) | ||||||
|  |         import time | ||||||
|         pre_start = time.time() |         pre_start = time.time() | ||||||
| 
 | 
 | ||||||
|         portal = await nursery.start_actor( |         portal = await nursery.run_in_actor( | ||||||
|             name='aggregator', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         async with portal.open_stream_from( |  | ||||||
|             aggregate, |             aggregate, | ||||||
|  |             name='aggregator', | ||||||
|             seed=seed, |             seed=seed, | ||||||
|         ) as stream: |         ) | ||||||
| 
 | 
 | ||||||
|         start = time.time() |         start = time.time() | ||||||
|         # the portal call returns exactly what you'd expect |         # the portal call returns exactly what you'd expect | ||||||
|         # as if the remote "aggregate" function was called locally |         # as if the remote "aggregate" function was called locally | ||||||
|         result_stream = [] |         result_stream = [] | ||||||
|             async for value in stream: |         async for value in await portal.result(): | ||||||
|             result_stream.append(value) |             result_stream.append(value) | ||||||
| 
 | 
 | ||||||
|         await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
|         print(f"STREAM TIME = {time.time() - start}") |         print(f"STREAM TIME = {time.time() - start}") | ||||||
|         print(f"STREAM + SPAWN TIME = {time.time() - pre_start}") |         print(f"STREAM + SPAWN TIME = {time.time() - pre_start}") | ||||||
|         assert result_stream == list(range(seed)) |         assert result_stream == list(range(seed)) | ||||||
|  | @ -101,4 +91,4 @@ async def main(): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     final_stream = trio.run(main) |     final_stream = tractor.run(main, arbiter_addr=('127.0.0.1', 1616)) | ||||||
|  |  | ||||||
|  | @ -1,92 +0,0 @@ | ||||||
| ''' |  | ||||||
| An SC compliant infected ``asyncio`` echo server. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| import asyncio |  | ||||||
| from statistics import mean |  | ||||||
| import time |  | ||||||
| 
 |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def aio_echo_server( |  | ||||||
|     to_trio: trio.MemorySendChannel, |  | ||||||
|     from_trio: asyncio.Queue, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     # a first message must be sent **from** this ``asyncio`` |  | ||||||
|     # task or the ``trio`` side will never unblock from |  | ||||||
|     # ``tractor.to_asyncio.open_channel_from():`` |  | ||||||
|     to_trio.send_nowait('start') |  | ||||||
| 
 |  | ||||||
|     # XXX: this uses an ``from_trio: asyncio.Queue`` currently but we |  | ||||||
|     # should probably offer something better. |  | ||||||
|     while True: |  | ||||||
|         # echo the msg back |  | ||||||
|         to_trio.send_nowait(await from_trio.get()) |  | ||||||
|         await asyncio.sleep(0) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def trio_to_aio_echo_server( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ): |  | ||||||
|     # this will block until the ``asyncio`` task sends a "first" |  | ||||||
|     # message. |  | ||||||
|     async with tractor.to_asyncio.open_channel_from( |  | ||||||
|         aio_echo_server, |  | ||||||
|     ) as (first, chan): |  | ||||||
| 
 |  | ||||||
|         assert first == 'start' |  | ||||||
|         await ctx.started(first) |  | ||||||
| 
 |  | ||||||
|         async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|             async for msg in stream: |  | ||||||
|                 await chan.send(msg) |  | ||||||
| 
 |  | ||||||
|                 out = await chan.receive() |  | ||||||
|                 # echo back to parent actor-task |  | ||||||
|                 await stream.send(out) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main(): |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery() as n: |  | ||||||
|         p = await n.start_actor( |  | ||||||
|             'aio_server', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|             infect_asyncio=True, |  | ||||||
|         ) |  | ||||||
|         async with p.open_context( |  | ||||||
|             trio_to_aio_echo_server, |  | ||||||
|         ) as (ctx, first): |  | ||||||
| 
 |  | ||||||
|             assert first == 'start' |  | ||||||
| 
 |  | ||||||
|             count = 0 |  | ||||||
|             async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|                 delays = [] |  | ||||||
|                 send = time.time() |  | ||||||
| 
 |  | ||||||
|                 await stream.send(count) |  | ||||||
|                 async for msg in stream: |  | ||||||
|                     recv = time.time() |  | ||||||
|                     delays.append(recv - send) |  | ||||||
|                     assert msg == count |  | ||||||
|                     count += 1 |  | ||||||
|                     send = time.time() |  | ||||||
|                     await stream.send(count) |  | ||||||
| 
 |  | ||||||
|                     if count >= 1e3: |  | ||||||
|                         break |  | ||||||
| 
 |  | ||||||
|         print(f'mean round trip rate (Hz): {1/mean(delays)}') |  | ||||||
|         await p.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -1,49 +0,0 @@ | ||||||
| import trio |  | ||||||
| import click |  | ||||||
| import tractor |  | ||||||
| import pydantic |  | ||||||
| # from multiprocessing import shared_memory |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def just_sleep( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
|     **kwargs, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Test a small ping-pong 2-way streaming server. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
|     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main() -> None: |  | ||||||
| 
 |  | ||||||
|     proc = await trio.open_process( ( |  | ||||||
|         'python', |  | ||||||
|         '-c', |  | ||||||
|         'import trio; trio.run(trio.sleep_forever)', |  | ||||||
|     )) |  | ||||||
|     await proc.wait() |  | ||||||
|     # await trio.sleep_forever() |  | ||||||
|     # async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|     #     portal = await n.start_actor( |  | ||||||
|     #         'rpc_server', |  | ||||||
|     #         enable_modules=[__name__], |  | ||||||
|     #     ) |  | ||||||
| 
 |  | ||||||
|     #     async with portal.open_context( |  | ||||||
|     #         just_sleep,  # taken from pytest parameterization |  | ||||||
|     #     ) as (ctx, sent): |  | ||||||
|     #         await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     import time |  | ||||||
|     # time.sleep(999) |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -15,8 +15,7 @@ async def stream_data(seed=10): | ||||||
| 
 | 
 | ||||||
| async def stream_from_portal(p, consumed): | async def stream_from_portal(p, consumed): | ||||||
| 
 | 
 | ||||||
|     async with p.open_stream_from(stream_data) as stream: |     async for item in await p.run(stream_data): | ||||||
|         async for item in stream: |  | ||||||
|         if item in consumed: |         if item in consumed: | ||||||
|             consumed.remove(item) |             consumed.remove(item) | ||||||
|         else: |         else: | ||||||
|  |  | ||||||
|  | @ -10,7 +10,6 @@ PRIMES = [ | ||||||
|     115797848077099, |     115797848077099, | ||||||
|     1099726899285419] |     1099726899285419] | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| def is_prime(n): | def is_prime(n): | ||||||
|     if n < 2: |     if n < 2: | ||||||
|         return False |         return False | ||||||
|  | @ -25,7 +24,6 @@ def is_prime(n): | ||||||
|             return False |             return False | ||||||
|     return True |     return True | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| def main(): | def main(): | ||||||
|     with concurrent.futures.ProcessPoolExecutor() as executor: |     with concurrent.futures.ProcessPoolExecutor() as executor: | ||||||
|         start = time.time() |         start = time.time() | ||||||
|  | @ -35,7 +33,6 @@ def main(): | ||||||
| 
 | 
 | ||||||
|         print(f'processing took {time.time() - start} seconds') |         print(f'processing took {time.time() - start} seconds') | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
| 
 | 
 | ||||||
|     start = time.time() |     start = time.time() | ||||||
|  |  | ||||||
|  | @ -9,7 +9,7 @@ is ``tractor``'s channels. | ||||||
| 
 | 
 | ||||||
| """ | """ | ||||||
| from contextlib import asynccontextmanager | from contextlib import asynccontextmanager | ||||||
| from typing import Callable | from typing import List, Callable | ||||||
| import itertools | import itertools | ||||||
| import math | import math | ||||||
| import time | import time | ||||||
|  | @ -29,7 +29,7 @@ PRIMES = [ | ||||||
| ] | ] | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def is_prime(n): | def is_prime(n): | ||||||
|     if n < 2: |     if n < 2: | ||||||
|         return False |         return False | ||||||
|     if n == 2: |     if n == 2: | ||||||
|  | @ -71,8 +71,8 @@ async def worker_pool(workers=4): | ||||||
| 
 | 
 | ||||||
|         async def _map( |         async def _map( | ||||||
|             worker_func: Callable[[int], bool], |             worker_func: Callable[[int], bool], | ||||||
|             sequence: list[int] |             sequence: List[int] | ||||||
|         ) -> list[bool]: |         ) -> List[bool]: | ||||||
| 
 | 
 | ||||||
|             # define an async (local) task to collect results from workers |             # define an async (local) task to collect results from workers | ||||||
|             async def send_result(func, value, portal): |             async def send_result(func, value, portal): | ||||||
|  |  | ||||||
|  | @ -1,44 +0,0 @@ | ||||||
| 
 |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def sleepy_jane(): |  | ||||||
|     uid = tractor.current_actor().uid |  | ||||||
|     print(f'Yo i am actor {uid}') |  | ||||||
|     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main(): |  | ||||||
|     ''' |  | ||||||
|     Spawn a flat actor cluster, with one process per |  | ||||||
|     detected core. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     portal_map: dict[str, tractor.Portal] |  | ||||||
|     results: dict[str, str] |  | ||||||
| 
 |  | ||||||
|     # look at this hip new syntax! |  | ||||||
|     async with ( |  | ||||||
| 
 |  | ||||||
|         tractor.open_actor_cluster( |  | ||||||
|             modules=[__name__] |  | ||||||
|         ) as portal_map, |  | ||||||
| 
 |  | ||||||
|         trio.open_nursery() as n, |  | ||||||
|     ): |  | ||||||
| 
 |  | ||||||
|         for (name, portal) in portal_map.items(): |  | ||||||
|             n.start_soon(portal.run, sleepy_jane) |  | ||||||
| 
 |  | ||||||
|         await trio.sleep(0.5) |  | ||||||
| 
 |  | ||||||
|         # kill the cluster with a cancel |  | ||||||
|         raise KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     try: |  | ||||||
|         trio.run(main) |  | ||||||
|     except KeyboardInterrupt: |  | ||||||
|         pass |  | ||||||
|  | @ -1,4 +1,3 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -12,7 +11,7 @@ async def main(): | ||||||
|         for i in range(3): |         for i in range(3): | ||||||
|             real_actors.append(await n.start_actor( |             real_actors.append(await n.start_actor( | ||||||
|                 f'actor_{i}', |                 f'actor_{i}', | ||||||
|                 enable_modules=[__name__], |                 rpc_module_paths=[__name__], | ||||||
|             )) |             )) | ||||||
| 
 | 
 | ||||||
|         # start one actor that will fail immediately |         # start one actor that will fail immediately | ||||||
|  | @ -25,6 +24,6 @@ async def main(): | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     try: |     try: | ||||||
|         # also raises |         # also raises | ||||||
|         trio.run(main) |         tractor.run(main) | ||||||
|     except tractor.RemoteActorError: |     except tractor.RemoteActorError: | ||||||
|         print("Look Maa that actor failed hard, hehhh!") |         print("Look Maa that actor failed hard, hehhh!") | ||||||
|  |  | ||||||
|  | @ -1,72 +0,0 @@ | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def simple_rpc( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
|     data: int, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     '''Test a small ping-pong 2-way streaming server. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     # signal to parent that we're up much like |  | ||||||
|     # ``trio_typing.TaskStatus.started()`` |  | ||||||
|     await ctx.started(data + 1) |  | ||||||
| 
 |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|         count = 0 |  | ||||||
|         async for msg in stream: |  | ||||||
| 
 |  | ||||||
|             assert msg == 'ping' |  | ||||||
|             await stream.send('pong') |  | ||||||
|             count += 1 |  | ||||||
| 
 |  | ||||||
|         else: |  | ||||||
|             assert count == 10 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def main() -> None: |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|         portal = await n.start_actor( |  | ||||||
|             'rpc_server', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         # XXX: syntax requires py3.9 |  | ||||||
|         async with ( |  | ||||||
| 
 |  | ||||||
|             portal.open_context( |  | ||||||
|                 simple_rpc,  # taken from pytest parameterization |  | ||||||
|                 data=10, |  | ||||||
| 
 |  | ||||||
|             ) as (ctx, sent), |  | ||||||
| 
 |  | ||||||
|             ctx.open_stream() as stream, |  | ||||||
|         ): |  | ||||||
| 
 |  | ||||||
|             assert sent == 11 |  | ||||||
| 
 |  | ||||||
|             count = 0 |  | ||||||
|             # receive msgs using async for style |  | ||||||
|             await stream.send('ping') |  | ||||||
| 
 |  | ||||||
|             async for msg in stream: |  | ||||||
|                 assert msg == 'pong' |  | ||||||
|                 await stream.send('ping') |  | ||||||
|                 count += 1 |  | ||||||
| 
 |  | ||||||
|                 if count >= 9: |  | ||||||
|                     break |  | ||||||
| 
 |  | ||||||
|         # explicitly teardown the daemon-actor |  | ||||||
|         await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| if __name__ == '__main__': |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -1,9 +1,7 @@ | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
| tractor.log.get_console_log("INFO") | tractor.log.get_console_log("INFO") | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| async def main(service_name): | async def main(service_name): | ||||||
| 
 | 
 | ||||||
|     async with tractor.open_nursery() as an: |     async with tractor.open_nursery() as an: | ||||||
|  | @ -19,4 +17,4 @@ async def main(service_name): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if __name__ == '__main__': | if __name__ == '__main__': | ||||||
|     trio.run(main, 'some_actor_name') |     tractor.run(main, 'some_actor_name') | ||||||
|  |  | ||||||
|  | @ -1 +0,0 @@ | ||||||
| !.gitignore |  | ||||||
|  | @ -1,16 +0,0 @@ | ||||||
| Strictly support Python 3.10+, start runtime machinery reorg |  | ||||||
| 
 |  | ||||||
| Since we want to push forward using the new `match:` syntax for our |  | ||||||
| internal RPC-msg loops, we officially drop 3.9 support for the next |  | ||||||
| release which should coincide well with the first release of 3.11. |  | ||||||
| 
 |  | ||||||
| This patch set also officially removes the ``tractor.run()`` API (which |  | ||||||
| has been deprecated for some time) as well as starts an initial re-org |  | ||||||
| of the internal runtime core by: |  | ||||||
| - renaming ``tractor._actor`` -> ``._runtime`` |  | ||||||
| - moving the ``._runtime.ActorActor._process_messages()`` and |  | ||||||
|   ``._async_main()`` to be module level singleton-task-functions since |  | ||||||
|   they are only started once for each connection and actor spawn |  | ||||||
|   respectively; this internal API thus looks more similar to (at the |  | ||||||
|   time of writing) the ``trio``-internals in ``trio._core._run``. |  | ||||||
| - officially remove ``tractor.run()``, now deprecated for some time. |  | ||||||
|  | @ -1,4 +0,0 @@ | ||||||
| Only set `._debug.Lock.local_pdb_complete` if has been created. |  | ||||||
| 
 |  | ||||||
| This can be triggered by a very rare race condition (and thus we have no |  | ||||||
| working test yet) but it is known to exist in (a) consumer project(s). |  | ||||||
|  | @ -1,25 +0,0 @@ | ||||||
| Add support for ``trio >= 0.22`` and support for the new Python 3.11 |  | ||||||
| ``[Base]ExceptionGroup`` from `pep 654`_ via the backported |  | ||||||
| `exceptiongroup`_ package and some final fixes to the debug mode |  | ||||||
| subsystem. |  | ||||||
| 
 |  | ||||||
| This port ended up driving some (hopefully) final fixes to our debugger |  | ||||||
| subsystem including the solution to all lingering stdstreams locking |  | ||||||
| race-conditions and deadlock scenarios. This includes extending the |  | ||||||
| debugger tests suite as well as cancellation and ``asyncio`` mode cases. |  | ||||||
| Some of the notable details: |  | ||||||
| 
 |  | ||||||
| - always reverting to the ``trio`` SIGINT handler when leaving debug |  | ||||||
|   mode. |  | ||||||
| - bypassing child attempts to acquire the debug lock when detected |  | ||||||
|   to be amdist actor-runtime-cancellation. |  | ||||||
| - allowing the root actor to cancel local but IPC-stale subactor |  | ||||||
|   requests-tasks for the debug lock when in a "no IPC peers" state. |  | ||||||
| 
 |  | ||||||
| Further we refined our ``ActorNursery`` semantics to be more similar to |  | ||||||
| ``trio`` in the sense that parent task errors are always packed into the |  | ||||||
| actor-nursery emitted exception group and adjusted all tests and |  | ||||||
| examples accordingly. |  | ||||||
| 
 |  | ||||||
| .. _pep 654: https://peps.python.org/pep-0654/#handling-exception-groups |  | ||||||
| .. _exceptiongroup: https://github.com/python-trio/exceptiongroup |  | ||||||
|  | @ -1,5 +0,0 @@ | ||||||
| Establish an explicit "backend spawning" method table; use it from CI |  | ||||||
| 
 |  | ||||||
| More clearly lays out the current set of (3) backends: ``['trio', |  | ||||||
| 'mp_spawn', 'mp_forkserver']`` and adjusts the ``._spawn.py`` internals |  | ||||||
| as well as the test suite to accommodate. |  | ||||||
|  | @ -1,4 +0,0 @@ | ||||||
| Add ``key: Callable[..., Hashable]`` support to ``.trionics.maybe_open_context()`` |  | ||||||
| 
 |  | ||||||
| Gives users finer grained control over cache hit behaviour using |  | ||||||
| a callable which receives the input ``kwargs: dict``. |  | ||||||
|  | @ -1,41 +0,0 @@ | ||||||
| Add support for debug-lock blocking using a ``._debug.Lock._blocked: |  | ||||||
| set[tuple]`` and add ids when no-more IPC connections with the |  | ||||||
| root actor are detected. |  | ||||||
| 
 |  | ||||||
| This is an enhancement which (mostly) solves a lingering debugger |  | ||||||
| locking race case we needed to handle: |  | ||||||
| 
 |  | ||||||
| - child crashes acquires TTY lock in root and attaches to ``pdb`` |  | ||||||
| - child IPC goes down such that all channels to the root are broken |  | ||||||
|   / non-functional. |  | ||||||
| - root is stuck thinking the child is still in debug even though it |  | ||||||
|   can't be contacted and the child actor machinery hasn't been |  | ||||||
|   cancelled by its parent. |  | ||||||
| - root get's stuck in deadlock with child since it won't send a cancel |  | ||||||
|   request until the child is finished debugging (to avoid clobbering |  | ||||||
|   a child that is actually using the debugger), but the child can't |  | ||||||
|   unlock the debugger bc IPC is down and it can't contact the root. |  | ||||||
| 
 |  | ||||||
| To avoid this scenario add debug lock blocking list via |  | ||||||
| `._debug.Lock._blocked: set[tuple]` which holds actor uids for any actor |  | ||||||
| that is detected by the root as having no transport channel connections |  | ||||||
| (of which at least one should exist if this sub-actor at some point |  | ||||||
| acquired the debug lock). The root consequently checks this list for any |  | ||||||
| actor that tries to (re)acquire the lock and blocks with |  | ||||||
| a ``ContextCancelled``. Further, when a debug condition is tested in |  | ||||||
| ``._runtime._invoke``, the context's ``._enter_debugger_on_cancel`` is |  | ||||||
| set to `False` if the actor was put on the block list then all |  | ||||||
| post-mortem / crash handling will be bypassed for that task. |  | ||||||
| 
 |  | ||||||
| In theory this approach to block list management may cause problems |  | ||||||
| where some nested child actor acquires and releases the lock multiple |  | ||||||
| times and it gets stuck on the block list after the first use? If this |  | ||||||
| turns out to be an issue we can try changing the strat so blocks are |  | ||||||
| only added when the root has zero IPC peers left? |  | ||||||
| 
 |  | ||||||
| Further, this adds a root-locking-task side cancel scope, |  | ||||||
| ``Lock._root_local_task_cs_in_debug``, which can be ``.cancel()``-ed by the root |  | ||||||
| runtime when a stale lock is detected during the IPC channel testing. |  | ||||||
| However, right now we're NOT using this since it seems to cause test |  | ||||||
| failures likely due to causing pre-mature cancellation and maybe needs |  | ||||||
| a bit more experimenting? |  | ||||||
|  | @ -1,19 +0,0 @@ | ||||||
| Rework our ``.trionics.BroadcastReceiver`` internals to avoid method |  | ||||||
| recursion and approach a design and interface closer to ``trio``'s |  | ||||||
| ``MemoryReceiveChannel``. |  | ||||||
| 
 |  | ||||||
| The details of the internal changes include: |  | ||||||
| 
 |  | ||||||
| - implementing a ``BroadcastReceiver.receive_nowait()`` and using it |  | ||||||
|   within the async ``.receive()`` thus avoiding recursion from |  | ||||||
|   ``.receive()``. |  | ||||||
| - failing over to an internal ``._receive_from_underlying()`` when the |  | ||||||
|   ``_nowait()`` call raises ``trio.WouldBlock`` |  | ||||||
| - adding ``BroadcastState.statistics()`` for debugging and testing both |  | ||||||
|   internals and by users. |  | ||||||
| - add an internal ``BroadcastReceiver._raise_on_lag: bool`` which can be |  | ||||||
|   set to avoid ``Lagged`` raising for possible use cases where a user |  | ||||||
|   wants to choose between a [cheap or nasty |  | ||||||
|   pattern](https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern) |  | ||||||
|   the the particular stream (we use this in ``piker``'s dark clearing |  | ||||||
|   engine to avoid fast feeds breaking during HFT periods). |  | ||||||
|  | @ -1,11 +0,0 @@ | ||||||
| Always ``list``-cast the ``mngrs`` input to |  | ||||||
| ``.trionics.gather_contexts()`` and ensure its size otherwise raise |  | ||||||
| a ``ValueError``. |  | ||||||
| 
 |  | ||||||
| Turns out that trying to pass an inline-style generator comprehension |  | ||||||
| doesn't seem to work inside the ``async with`` expression? Further, in |  | ||||||
| such a case we can get a hang waiting on the all-entered event |  | ||||||
| completion when the internal mngrs iteration is a noop. Instead we |  | ||||||
| always greedily check a size and error on empty input; the lazy |  | ||||||
| iteration of a generator input is not beneficial anyway since we're |  | ||||||
| entering all manager instances in concurrent tasks. |  | ||||||
|  | @ -1,15 +0,0 @@ | ||||||
| Fixes to ensure IPC (channel) breakage doesn't result in hung actor |  | ||||||
| trees; the zombie reaping and general supervision machinery will always |  | ||||||
| clean up and terminate. |  | ||||||
| 
 |  | ||||||
| This includes not only the (mostly minor) fixes to solve these cases but |  | ||||||
| also a new extensive test suite in `test_advanced_faults.py` with an |  | ||||||
| accompanying highly configurable example module-script in |  | ||||||
| `examples/advanced_faults/ipc_failure_during_stream.py`. Tests ensure we |  | ||||||
| never get hang or zombies despite operating in debug mode and attempt to |  | ||||||
| simulate all possible IPC transport failure cases for a local-host actor |  | ||||||
| tree. |  | ||||||
| 
 |  | ||||||
| Further we simplify `Context.open_stream.__aexit__()` to just call |  | ||||||
| `MsgStream.aclose()` directly more or less avoiding a pure duplicate |  | ||||||
| code path. |  | ||||||
|  | @ -1,10 +0,0 @@ | ||||||
| Always redraw the `pdbpp` prompt on `SIGINT` during REPL use. |  | ||||||
| 
 |  | ||||||
| There was recent changes todo with Python 3.10 that required us to pin |  | ||||||
| to a specific commit in `pdbpp` which have recently been fixed minus |  | ||||||
| this last issue with `SIGINT` shielding: not clobbering or not |  | ||||||
| showing the `(Pdb++)` prompt on ctlr-c by the user. This repairs all |  | ||||||
| that by firstly removing the standard KBI intercepting of the std lib's |  | ||||||
| `pdb.Pdb._cmdloop()` as well as ensuring that only the actor with REPL |  | ||||||
| control ever reports `SIGINT` handler log msgs and prompt redraws. With |  | ||||||
| this we move back to using pypi `pdbpp` release. |  | ||||||
|  | @ -1,7 +0,0 @@ | ||||||
| Drop `trio.Process.aclose()` usage, copy into our spawning code. |  | ||||||
| 
 |  | ||||||
| The details are laid out in https://github.com/goodboy/tractor/issues/330. |  | ||||||
| `trio` changed is process running quite some time ago, this just copies |  | ||||||
| out the small bit we needed (from the old `.aclose()`) for hard kills |  | ||||||
| where a soft runtime cancel request fails and our "zombie killer" |  | ||||||
| implementation kicks in. |  | ||||||
|  | @ -1,15 +0,0 @@ | ||||||
| Switch to using the fork & fix of `pdb++`, `pdbp`: |  | ||||||
| https://github.com/mdmintz/pdbp |  | ||||||
| 
 |  | ||||||
| Allows us to sidestep a variety of issues that aren't being maintained |  | ||||||
| in the upstream project thanks to the hard work of @mdmintz! |  | ||||||
| 
 |  | ||||||
| We also include some default settings adjustments as per recent |  | ||||||
| development on the fork: |  | ||||||
| 
 |  | ||||||
| - sticky mode is still turned on by default but now activates when |  | ||||||
|   a using the `ll` repl command. |  | ||||||
| - turn off line truncation by default to avoid inter-line gaps when |  | ||||||
|   resizing the terimnal during use. |  | ||||||
| - when using the backtrace cmd either by `w` or `bt`, the config |  | ||||||
|   automatically switches to non-sticky mode. |  | ||||||
|  | @ -1,8 +0,0 @@ | ||||||
| See both the `towncrier docs`_ and the `pluggy release readme`_ for hot |  | ||||||
| tips. We basically have the most minimal setup and release process right |  | ||||||
| now and use the default `fragment set`_. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| .. _towncrier docs: https://github.com/twisted/towncrier#quick-start |  | ||||||
| .. _pluggy release readme: https://github.com/pytest-dev/pluggy/blob/main/changelog/README.rst |  | ||||||
| .. _fragment set: https://github.com/twisted/towncrier#news-fragments |  | ||||||
|  | @ -1,37 +0,0 @@ | ||||||
| {% for section in sections %} |  | ||||||
| {% set underline = "-" %} |  | ||||||
| {% if section %} |  | ||||||
| {{section}} |  | ||||||
| {{ underline * section|length }}{% set underline = "~" %} |  | ||||||
| 
 |  | ||||||
| {% endif %} |  | ||||||
| {% if sections[section] %} |  | ||||||
| {% for category, val in definitions.items() if category in sections[section] %} |  | ||||||
| 
 |  | ||||||
| {{ definitions[category]['name'] }} |  | ||||||
| {{ underline * definitions[category]['name']|length }} |  | ||||||
| 
 |  | ||||||
| {% if definitions[category]['showcontent'] %} |  | ||||||
| {% for text, values in sections[section][category]|dictsort(by='value') %} |  | ||||||
| {% set issue_joiner = joiner(', ') %} |  | ||||||
| - {% for value in values|sort %}{{ issue_joiner() }}`{{ value }} <https://github.com/goodboy/tractor/issues/{{ value[1:] }}>`_{% endfor %}: {{ text }} |  | ||||||
| 
 |  | ||||||
| {% endfor %} |  | ||||||
| {% else %} |  | ||||||
| - {{ sections[section][category]['']|sort|join(', ') }} |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| {% endif %} |  | ||||||
| {% if sections[section][category]|length == 0 %} |  | ||||||
| 
 |  | ||||||
| No significant changes. |  | ||||||
| 
 |  | ||||||
| {% else %} |  | ||||||
| {% endif %} |  | ||||||
| {% endfor %} |  | ||||||
| {% else %} |  | ||||||
| 
 |  | ||||||
| No significant changes. |  | ||||||
| 
 |  | ||||||
| {% endif %} |  | ||||||
| {% endfor %} |  | ||||||
|  | @ -1,28 +0,0 @@ | ||||||
| [tool.towncrier] |  | ||||||
| package = "tractor" |  | ||||||
| filename = "NEWS.rst" |  | ||||||
| directory = "nooz/" |  | ||||||
| version = "0.1.0a6" |  | ||||||
| title_format = "tractor {version} ({project_date})" |  | ||||||
| template = "nooz/_template.rst" |  | ||||||
| all_bullets = true |  | ||||||
| 
 |  | ||||||
|   [[tool.towncrier.type]] |  | ||||||
|   directory = "feature" |  | ||||||
|   name = "Features" |  | ||||||
|   showcontent = true |  | ||||||
| 
 |  | ||||||
|   [[tool.towncrier.type]] |  | ||||||
|   directory = "bugfix" |  | ||||||
|   name = "Bug Fixes" |  | ||||||
|   showcontent = true |  | ||||||
| 
 |  | ||||||
|   [[tool.towncrier.type]] |  | ||||||
|   directory = "doc" |  | ||||||
|   name = "Improved Documentation" |  | ||||||
|   showcontent = true |  | ||||||
| 
 |  | ||||||
|   [[tool.towncrier.type]] |  | ||||||
|   directory = "trivial" |  | ||||||
|   name = "Trivial/Internal Changes" |  | ||||||
|   showcontent = true |  | ||||||
|  | @ -1,2 +1,2 @@ | ||||||
| sphinx | sphinx | ||||||
| sphinx_book_theme | sphinx_typlog_theme | ||||||
|  | @ -1,8 +1,6 @@ | ||||||
| pytest | pytest | ||||||
| pytest-trio | pytest-trio | ||||||
| pytest-timeout | pdbpp | ||||||
| pdbp |  | ||||||
| mypy | mypy | ||||||
| trio_typing | trio_typing | ||||||
| pexpect | pexpect | ||||||
| towncrier |  | ||||||
|  |  | ||||||
							
								
								
									
										73
									
								
								setup.py
								
								
								
								
							
							
						
						
									
										73
									
								
								setup.py
								
								
								
								
							|  | @ -1,22 +1,21 @@ | ||||||
| #!/usr/bin/env python | #!/usr/bin/env python | ||||||
| # | # | ||||||
| # tractor: structured concurrent "actors". | # tractor: a trionic actor model built on `multiprocessing` and `trio` | ||||||
| # | # | ||||||
| # Copyright 2018-eternity Tyler Goodlet. | # Copyright (C) 2018-2020  Tyler Goodlet | ||||||
| 
 | 
 | ||||||
| # This program is free software: you can redistribute it and/or modify | # This program is free software: you can redistribute it and/or modify | ||||||
| # it under the terms of the GNU Affero General Public License as published by | # it under the terms of the GNU General Public License as published by | ||||||
| # the Free Software Foundation, either version 3 of the License, or | # the Free Software Foundation, either version 3 of the License, or | ||||||
| # (at your option) any later version. | # (at your option) any later version. | ||||||
| 
 | 
 | ||||||
| # This program is distributed in the hope that it will be useful, | # This program is distributed in the hope that it will be useful, | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||||
| # GNU Affero General Public License for more details. | # GNU General Public License for more details. | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 | 
 | ||||||
|  | # You should have received a copy of the GNU General Public License | ||||||
|  | # along with this program.  If not, see <http://www.gnu.org/licenses/>. | ||||||
| from setuptools import setup | from setuptools import setup | ||||||
| 
 | 
 | ||||||
| with open('docs/README.rst', encoding='utf-8') as f: | with open('docs/README.rst', encoding='utf-8') as f: | ||||||
|  | @ -25,62 +24,36 @@ with open('docs/README.rst', encoding='utf-8') as f: | ||||||
| 
 | 
 | ||||||
| setup( | setup( | ||||||
|     name="tractor", |     name="tractor", | ||||||
|     version='0.1.0a6dev0',  # alpha zone |     version='0.1.0a0',  # first ever alpha | ||||||
|     description='structured concurrrent `trio`-"actors"', |     description='structured concurrrent "actors"', | ||||||
|     long_description=readme, |     long_description=readme, | ||||||
|     license='AGPLv3', |     license='GPLv3', | ||||||
|     author='Tyler Goodlet', |     author='Tyler Goodlet', | ||||||
|     maintainer='Tyler Goodlet', |     maintainer='Tyler Goodlet', | ||||||
|     maintainer_email='goodboy_foss@protonmail.com', |     maintainer_email='jgbt@protonmail.com', | ||||||
|     url='https://github.com/goodboy/tractor', |     url='https://github.com/goodboy/tractor', | ||||||
|     platforms=['linux', 'windows'], |     platforms=['linux', 'windows'], | ||||||
|     packages=[ |     packages=[ | ||||||
|         'tractor', |         'tractor', | ||||||
|         'tractor.experimental', |         'tractor.testing', | ||||||
|         'tractor.trionics', |  | ||||||
|     ], |     ], | ||||||
|     install_requires=[ |     install_requires=[ | ||||||
| 
 |         'trio>0.8', | ||||||
|         # trio related |         'msgpack', | ||||||
|         # proper range spec: |  | ||||||
|         # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5 |  | ||||||
|         'trio >= 0.22', |  | ||||||
|         'async_generator', |         'async_generator', | ||||||
|         'trio_typing', |  | ||||||
|         'exceptiongroup', |  | ||||||
| 
 |  | ||||||
|         # tooling |  | ||||||
|         'tricycle', |  | ||||||
|         'trio_typing', |  | ||||||
|         'colorlog', |         'colorlog', | ||||||
|         'wrapt', |         'wrapt', | ||||||
| 
 |         'trio_typing', | ||||||
|         # IPC serialization |         'pdbpp', | ||||||
|         'msgspec', |  | ||||||
| 
 |  | ||||||
|         # debug mode REPL |  | ||||||
|         'pdbp', |  | ||||||
| 
 |  | ||||||
|         # pip ref docs on these specs: |  | ||||||
|         # https://pip.pypa.io/en/stable/reference/requirement-specifiers/#examples |  | ||||||
|         # and pep: |  | ||||||
|         # https://peps.python.org/pep-0440/#version-specifiers |  | ||||||
| 
 |  | ||||||
|         # windows deps workaround for ``pdbpp`` |  | ||||||
|         # https://github.com/pdbpp/pdbpp/issues/498 |  | ||||||
|         # https://github.com/pdbpp/fancycompleter/issues/37 |  | ||||||
|         'pyreadline3 ; platform_system == "Windows"', |  | ||||||
| 
 |  | ||||||
|     ], |     ], | ||||||
|     tests_require=['pytest'], |     tests_require=['pytest'], | ||||||
|     python_requires=">=3.10", |     python_requires=">=3.7", | ||||||
|     keywords=[ |     keywords=[ | ||||||
|         'trio', |         'trio', | ||||||
|         'async', |         "async", | ||||||
|         'concurrency', |         "concurrency", | ||||||
|         'structured concurrency', |         "actor model", | ||||||
|         'actor model', |         "distributed", | ||||||
|         'distributed', |  | ||||||
|         'multiprocessing' |         'multiprocessing' | ||||||
|     ], |     ], | ||||||
|     classifiers=[ |     classifiers=[ | ||||||
|  | @ -88,10 +61,12 @@ setup( | ||||||
|         "Operating System :: POSIX :: Linux", |         "Operating System :: POSIX :: Linux", | ||||||
|         "Operating System :: Microsoft :: Windows", |         "Operating System :: Microsoft :: Windows", | ||||||
|         "Framework :: Trio", |         "Framework :: Trio", | ||||||
|         "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", |         "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", | ||||||
|         "Programming Language :: Python :: Implementation :: CPython", |         "Programming Language :: Python :: Implementation :: CPython", | ||||||
|         "Programming Language :: Python :: 3 :: Only", |         "Programming Language :: Python :: 3 :: Only", | ||||||
|         "Programming Language :: Python :: 3.10", |         "Programming Language :: Python :: 3.7", | ||||||
|  |         "Programming Language :: Python :: 3.8", | ||||||
|  |         "Programming Language :: Python :: 3.9", | ||||||
|         "Intended Audience :: Science/Research", |         "Intended Audience :: Science/Research", | ||||||
|         "Intended Audience :: Developers", |         "Intended Audience :: Developers", | ||||||
|         "Topic :: System :: Distributed Computing", |         "Topic :: System :: Distributed Computing", | ||||||
|  |  | ||||||
|  | @ -7,91 +7,16 @@ import os | ||||||
| import random | import random | ||||||
| import signal | import signal | ||||||
| import platform | import platform | ||||||
| import pathlib |  | ||||||
| import time | import time | ||||||
| import inspect |  | ||||||
| from functools import partial, wraps |  | ||||||
| 
 | 
 | ||||||
| import pytest | import pytest | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| 
 | 
 | ||||||
|  | # export for tests | ||||||
|  | from tractor.testing import tractor_test  # noqa | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| pytest_plugins = ['pytester'] | pytest_plugins = ['pytester'] | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def tractor_test(fn): |  | ||||||
|     """ |  | ||||||
|     Use: |  | ||||||
| 
 |  | ||||||
|     @tractor_test |  | ||||||
|     async def test_whatever(): |  | ||||||
|         await ... |  | ||||||
| 
 |  | ||||||
|     If fixtures: |  | ||||||
| 
 |  | ||||||
|         - ``arb_addr`` (a socket addr tuple where arbiter is listening) |  | ||||||
|         - ``loglevel`` (logging level passed to tractor internals) |  | ||||||
|         - ``start_method`` (subprocess spawning backend) |  | ||||||
| 
 |  | ||||||
|     are defined in the `pytest` fixture space they will be automatically |  | ||||||
|     injected to tests declaring these funcargs. |  | ||||||
|     """ |  | ||||||
|     @wraps(fn) |  | ||||||
|     def wrapper( |  | ||||||
|         *args, |  | ||||||
|         loglevel=None, |  | ||||||
|         arb_addr=None, |  | ||||||
|         start_method=None, |  | ||||||
|         **kwargs |  | ||||||
|     ): |  | ||||||
|         # __tracebackhide__ = True |  | ||||||
| 
 |  | ||||||
|         if 'arb_addr' in inspect.signature(fn).parameters: |  | ||||||
|             # injects test suite fixture value to test as well |  | ||||||
|             # as `run()` |  | ||||||
|             kwargs['arb_addr'] = arb_addr |  | ||||||
| 
 |  | ||||||
|         if 'loglevel' in inspect.signature(fn).parameters: |  | ||||||
|             # allows test suites to define a 'loglevel' fixture |  | ||||||
|             # that activates the internal logging |  | ||||||
|             kwargs['loglevel'] = loglevel |  | ||||||
| 
 |  | ||||||
|         if start_method is None: |  | ||||||
|             if platform.system() == "Windows": |  | ||||||
|                 start_method = 'trio' |  | ||||||
| 
 |  | ||||||
|         if 'start_method' in inspect.signature(fn).parameters: |  | ||||||
|             # set of subprocess spawning backends |  | ||||||
|             kwargs['start_method'] = start_method |  | ||||||
| 
 |  | ||||||
|         if kwargs: |  | ||||||
| 
 |  | ||||||
|             # use explicit root actor start |  | ||||||
| 
 |  | ||||||
|             async def _main(): |  | ||||||
|                 async with tractor.open_root_actor( |  | ||||||
|                     # **kwargs, |  | ||||||
|                     arbiter_addr=arb_addr, |  | ||||||
|                     loglevel=loglevel, |  | ||||||
|                     start_method=start_method, |  | ||||||
| 
 |  | ||||||
|                     # TODO: only enable when pytest is passed --pdb |  | ||||||
|                     # debug_mode=True, |  | ||||||
| 
 |  | ||||||
|                 ): |  | ||||||
|                     await fn(*args, **kwargs) |  | ||||||
| 
 |  | ||||||
|             main = _main |  | ||||||
| 
 |  | ||||||
|         else: |  | ||||||
|             # use implicit root actor start |  | ||||||
|             main = partial(fn, *args, **kwargs) |  | ||||||
| 
 |  | ||||||
|         return trio.run(main) |  | ||||||
| 
 |  | ||||||
|     return wrapper |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| _arb_addr = '127.0.0.1', random.randint(1000, 9999) | _arb_addr = '127.0.0.1', random.randint(1000, 9999) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -114,27 +39,20 @@ no_windows = pytest.mark.skipif( | ||||||
| ) | ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def repodir() -> pathlib.Path: | def repodir(): | ||||||
|     ''' |     """Return the abspath to the repo directory. | ||||||
|     Return the abspath to the repo directory. |     """ | ||||||
| 
 |     dirname = os.path.dirname | ||||||
|     ''' |     dirpath = os.path.abspath( | ||||||
|     # 2 parents up to step up through tests/<repo_dir> |         dirname(dirname(os.path.realpath(__file__))) | ||||||
|     return pathlib.Path(__file__).parent.parent.absolute() |         ) | ||||||
| 
 |     return dirpath | ||||||
| 
 |  | ||||||
| def examples_dir() -> pathlib.Path: |  | ||||||
|     ''' |  | ||||||
|     Return the abspath to the examples directory as `pathlib.Path`. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     return repodir() / 'examples' |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def pytest_addoption(parser): | def pytest_addoption(parser): | ||||||
|     parser.addoption( |     parser.addoption( | ||||||
|         "--ll", action="store", dest='loglevel', |         "--ll", action="store", dest='loglevel', | ||||||
|         default='ERROR', help="logging level to set when testing" |         default=None, help="logging level to set when testing" | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     parser.addoption( |     parser.addoption( | ||||||
|  | @ -146,6 +64,10 @@ def pytest_addoption(parser): | ||||||
| 
 | 
 | ||||||
| def pytest_configure(config): | def pytest_configure(config): | ||||||
|     backend = config.option.spawn_backend |     backend = config.option.spawn_backend | ||||||
|  | 
 | ||||||
|  |     if backend == 'mp': | ||||||
|  |         tractor._spawn.try_set_start_method('spawn') | ||||||
|  |     elif backend == 'trio': | ||||||
|         tractor._spawn.try_set_start_method(backend) |         tractor._spawn.try_set_start_method(backend) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -153,24 +75,20 @@ def pytest_configure(config): | ||||||
| def loglevel(request): | def loglevel(request): | ||||||
|     orig = tractor.log._default_loglevel |     orig = tractor.log._default_loglevel | ||||||
|     level = tractor.log._default_loglevel = request.config.option.loglevel |     level = tractor.log._default_loglevel = request.config.option.loglevel | ||||||
|     tractor.log.get_console_log(level) |  | ||||||
|     yield level |     yield level | ||||||
|     tractor.log._default_loglevel = orig |     tractor.log._default_loglevel = orig | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.fixture(scope='session') | @pytest.fixture(scope='session') | ||||||
| def spawn_backend(request) -> str: | def spawn_backend(request): | ||||||
|     return request.config.option.spawn_backend |     return request.config.option.spawn_backend | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| _ci_env: bool = os.environ.get('CI', False) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.fixture(scope='session') | @pytest.fixture(scope='session') | ||||||
| def ci_env() -> bool: | def ci_env() -> bool: | ||||||
|     """Detect CI envoirment. |     """Detect CI envoirment. | ||||||
|     """ |     """ | ||||||
|     return _ci_env |     return os.environ.get('TRAVIS', False) or os.environ.get('CI', False) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.fixture(scope='session') | @pytest.fixture(scope='session') | ||||||
|  | @ -180,24 +98,24 @@ def arb_addr(): | ||||||
| 
 | 
 | ||||||
| def pytest_generate_tests(metafunc): | def pytest_generate_tests(metafunc): | ||||||
|     spawn_backend = metafunc.config.option.spawn_backend |     spawn_backend = metafunc.config.option.spawn_backend | ||||||
| 
 |  | ||||||
|     if not spawn_backend: |     if not spawn_backend: | ||||||
|         # XXX some weird windows bug with `pytest`? |         # XXX some weird windows bug with `pytest`? | ||||||
|         spawn_backend = 'trio' |         spawn_backend = 'mp' | ||||||
|  |     assert spawn_backend in ('mp', 'trio') | ||||||
| 
 | 
 | ||||||
|     # TODO: maybe just use the literal `._spawn.SpawnMethodKey`? |  | ||||||
|     assert spawn_backend in ( |  | ||||||
|         'mp_spawn', |  | ||||||
|         'mp_forkserver', |  | ||||||
|         'trio', |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     # NOTE: used to be used to dyanmically parametrize tests for when |  | ||||||
|     # you just passed --spawn-backend=`mp` on the cli, but now we expect |  | ||||||
|     # that cli input to be manually specified, BUT, maybe we'll do |  | ||||||
|     # something like this again in the future? |  | ||||||
|     if 'start_method' in metafunc.fixturenames: |     if 'start_method' in metafunc.fixturenames: | ||||||
|         metafunc.parametrize("start_method", [spawn_backend], scope='module') |         if spawn_backend == 'mp': | ||||||
|  |             from multiprocessing import get_all_start_methods | ||||||
|  |             methods = get_all_start_methods() | ||||||
|  |             if 'fork' in methods: | ||||||
|  |                 # fork not available on windows, so check before | ||||||
|  |                 # removing XXX: the fork method is in general | ||||||
|  |                 # incompatible with trio's global scheduler state | ||||||
|  |                 methods.remove('fork') | ||||||
|  |         elif spawn_backend == 'trio': | ||||||
|  |             methods = ['trio'] | ||||||
|  | 
 | ||||||
|  |         metafunc.parametrize("start_method", methods, scope='module') | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def sig_prog(proc, sig): | def sig_prog(proc, sig): | ||||||
|  | @ -213,22 +131,16 @@ def sig_prog(proc, sig): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.fixture | @pytest.fixture | ||||||
| def daemon( | def daemon(loglevel, testdir, arb_addr): | ||||||
|     loglevel: str, |     """Run a daemon actor as a "remote arbiter". | ||||||
|     testdir, |     """ | ||||||
|     arb_addr: tuple[str, int], |  | ||||||
| ): |  | ||||||
|     ''' |  | ||||||
|     Run a daemon actor as a "remote arbiter". |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     if loglevel in ('trace', 'debug'): |     if loglevel in ('trace', 'debug'): | ||||||
|         # too much logging will lock up the subproc (smh) |         # too much logging will lock up the subproc (smh) | ||||||
|         loglevel = 'info' |         loglevel = 'info' | ||||||
| 
 | 
 | ||||||
|     cmdargs = [ |     cmdargs = [ | ||||||
|         sys.executable, '-c', |         sys.executable, '-c', | ||||||
|         "import tractor; tractor.run_daemon([], registry_addr={}, loglevel={})" |         "import tractor; tractor.run_daemon([], arbiter_addr={}, loglevel={})" | ||||||
|         .format( |         .format( | ||||||
|             arb_addr, |             arb_addr, | ||||||
|             "'{}'".format(loglevel) if loglevel else None) |             "'{}'".format(loglevel) if loglevel else None) | ||||||
|  |  | ||||||
|  | @ -1,129 +0,0 @@ | ||||||
| """ |  | ||||||
| Bidirectional streaming. |  | ||||||
| 
 |  | ||||||
| """ |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def simple_rpc( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
|     data: int, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Test a small ping-pong server. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     # signal to parent that we're up |  | ||||||
|     await ctx.started(data + 1) |  | ||||||
| 
 |  | ||||||
|     print('opening stream in callee') |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|         count = 0 |  | ||||||
|         while True: |  | ||||||
|             try: |  | ||||||
|                 await stream.receive() == 'ping' |  | ||||||
|             except trio.EndOfChannel: |  | ||||||
|                 assert count == 10 |  | ||||||
|                 break |  | ||||||
|             else: |  | ||||||
|                 print('pong') |  | ||||||
|                 await stream.send('pong') |  | ||||||
|                 count += 1 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def simple_rpc_with_forloop( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
|     data: int, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     """Same as previous test but using ``async for`` syntax/api. |  | ||||||
| 
 |  | ||||||
|     """ |  | ||||||
| 
 |  | ||||||
|     # signal to parent that we're up |  | ||||||
|     await ctx.started(data + 1) |  | ||||||
| 
 |  | ||||||
|     print('opening stream in callee') |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|         count = 0 |  | ||||||
|         async for msg in stream: |  | ||||||
| 
 |  | ||||||
|             assert msg == 'ping' |  | ||||||
|             print('pong') |  | ||||||
|             await stream.send('pong') |  | ||||||
|             count += 1 |  | ||||||
| 
 |  | ||||||
|         else: |  | ||||||
|             assert count == 10 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'use_async_for', |  | ||||||
|     [True, False], |  | ||||||
| ) |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'server_func', |  | ||||||
|     [simple_rpc, simple_rpc_with_forloop], |  | ||||||
| ) |  | ||||||
| def test_simple_rpc(server_func, use_async_for): |  | ||||||
|     ''' |  | ||||||
|     The simplest request response pattern. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|             portal = await n.start_actor( |  | ||||||
|                 'rpc_server', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             async with portal.open_context( |  | ||||||
|                 server_func,  # taken from pytest parameterization |  | ||||||
|                 data=10, |  | ||||||
|             ) as (ctx, sent): |  | ||||||
| 
 |  | ||||||
|                 assert sent == 11 |  | ||||||
| 
 |  | ||||||
|                 async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|                     if use_async_for: |  | ||||||
| 
 |  | ||||||
|                         count = 0 |  | ||||||
|                         # receive msgs using async for style |  | ||||||
|                         print('ping') |  | ||||||
|                         await stream.send('ping') |  | ||||||
| 
 |  | ||||||
|                         async for msg in stream: |  | ||||||
|                             assert msg == 'pong' |  | ||||||
|                             print('ping') |  | ||||||
|                             await stream.send('ping') |  | ||||||
|                             count += 1 |  | ||||||
| 
 |  | ||||||
|                             if count >= 9: |  | ||||||
|                                 break |  | ||||||
| 
 |  | ||||||
|                     else: |  | ||||||
|                         # classic send/receive style |  | ||||||
|                         for _ in range(10): |  | ||||||
| 
 |  | ||||||
|                             print('ping') |  | ||||||
|                             await stream.send('ping') |  | ||||||
|                             assert await stream.receive() == 'pong' |  | ||||||
| 
 |  | ||||||
|                 # stream should terminate here |  | ||||||
| 
 |  | ||||||
|             # final context result(s) should be consumed here in __aexit__() |  | ||||||
| 
 |  | ||||||
|             await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -1,193 +0,0 @@ | ||||||
| ''' |  | ||||||
| Sketchy network blackoutz, ugly byzantine gens, puedes eschuchar la |  | ||||||
| cancelacion?.. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| from functools import partial |  | ||||||
| 
 |  | ||||||
| import pytest |  | ||||||
| from _pytest.pathlib import import_path |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| from conftest import ( |  | ||||||
|     examples_dir, |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'debug_mode', |  | ||||||
|     [False, True], |  | ||||||
|     ids=['no_debug_mode', 'debug_mode'], |  | ||||||
| ) |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'ipc_break', |  | ||||||
|     [ |  | ||||||
|         # no breaks |  | ||||||
|         { |  | ||||||
|             'break_parent_ipc_after': False, |  | ||||||
|             'break_child_ipc_after': False, |  | ||||||
|         }, |  | ||||||
| 
 |  | ||||||
|         # only parent breaks |  | ||||||
|         { |  | ||||||
|             'break_parent_ipc_after': 500, |  | ||||||
|             'break_child_ipc_after': False, |  | ||||||
|         }, |  | ||||||
| 
 |  | ||||||
|         # only child breaks |  | ||||||
|         { |  | ||||||
|             'break_parent_ipc_after': False, |  | ||||||
|             'break_child_ipc_after': 500, |  | ||||||
|         }, |  | ||||||
| 
 |  | ||||||
|         # both: break parent first |  | ||||||
|         { |  | ||||||
|             'break_parent_ipc_after': 500, |  | ||||||
|             'break_child_ipc_after': 800, |  | ||||||
|         }, |  | ||||||
|         # both: break child first |  | ||||||
|         { |  | ||||||
|             'break_parent_ipc_after': 800, |  | ||||||
|             'break_child_ipc_after': 500, |  | ||||||
|         }, |  | ||||||
| 
 |  | ||||||
|     ], |  | ||||||
|     ids=[ |  | ||||||
|         'no_break', |  | ||||||
|         'break_parent', |  | ||||||
|         'break_child', |  | ||||||
|         'break_both_parent_first', |  | ||||||
|         'break_both_child_first', |  | ||||||
|     ], |  | ||||||
| ) |  | ||||||
| def test_ipc_channel_break_during_stream( |  | ||||||
|     debug_mode: bool, |  | ||||||
|     spawn_backend: str, |  | ||||||
|     ipc_break: dict | None, |  | ||||||
| ): |  | ||||||
|     ''' |  | ||||||
|     Ensure we can have an IPC channel break its connection during |  | ||||||
|     streaming and it's still possible for the (simulated) user to kill |  | ||||||
|     the actor tree using SIGINT. |  | ||||||
| 
 |  | ||||||
|     We also verify the type of connection error expected in the parent |  | ||||||
|     depending on which side if the IPC breaks first. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     if spawn_backend != 'trio': |  | ||||||
|         if debug_mode: |  | ||||||
|             pytest.skip('`debug_mode` only supported on `trio` spawner') |  | ||||||
| 
 |  | ||||||
|         # non-`trio` spawners should never hit the hang condition that |  | ||||||
|         # requires the user to do ctl-c to cancel the actor tree. |  | ||||||
|         expect_final_exc = trio.ClosedResourceError |  | ||||||
| 
 |  | ||||||
|     mod = import_path( |  | ||||||
|         examples_dir() / 'advanced_faults' / 'ipc_failure_during_stream.py', |  | ||||||
|         root=examples_dir(), |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     expect_final_exc = KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
|     # when ONLY the child breaks we expect the parent to get a closed |  | ||||||
|     # resource error on the next `MsgStream.receive()` and then fail out |  | ||||||
|     # and cancel the child from there. |  | ||||||
|     if ( |  | ||||||
| 
 |  | ||||||
|         # only child breaks |  | ||||||
|         ( |  | ||||||
|             ipc_break['break_child_ipc_after'] |  | ||||||
|             and ipc_break['break_parent_ipc_after'] is False |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         # both break but, parent breaks first |  | ||||||
|         or ( |  | ||||||
|             ipc_break['break_child_ipc_after'] is not False |  | ||||||
|             and ( |  | ||||||
|                 ipc_break['break_parent_ipc_after'] |  | ||||||
|                 > ipc_break['break_child_ipc_after'] |  | ||||||
|             ) |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|     ): |  | ||||||
|         expect_final_exc = trio.ClosedResourceError |  | ||||||
| 
 |  | ||||||
|     # when the parent IPC side dies (even if the child's does as well |  | ||||||
|     # but the child fails BEFORE the parent) we expect the channel to be |  | ||||||
|     # sent a stop msg from the child at some point which will signal the |  | ||||||
|     # parent that the stream has been terminated. |  | ||||||
|     # NOTE: when the parent breaks "after" the child you get this same |  | ||||||
|     # case as well, the child breaks the IPC channel with a stop msg |  | ||||||
|     # before any closure takes place. |  | ||||||
|     elif ( |  | ||||||
|         # only parent breaks |  | ||||||
|         ( |  | ||||||
|             ipc_break['break_parent_ipc_after'] |  | ||||||
|             and ipc_break['break_child_ipc_after'] is False |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         # both break but, child breaks first |  | ||||||
|         or ( |  | ||||||
|             ipc_break['break_parent_ipc_after'] is not False |  | ||||||
|             and ( |  | ||||||
|                 ipc_break['break_child_ipc_after'] |  | ||||||
|                 > ipc_break['break_parent_ipc_after'] |  | ||||||
|             ) |  | ||||||
|         ) |  | ||||||
|     ): |  | ||||||
|         expect_final_exc = trio.EndOfChannel |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(expect_final_exc): |  | ||||||
|         trio.run( |  | ||||||
|             partial( |  | ||||||
|                 mod.main, |  | ||||||
|                 debug_mode=debug_mode, |  | ||||||
|                 start_method=spawn_backend, |  | ||||||
|                 **ipc_break, |  | ||||||
|             ) |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def break_ipc_after_started( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ) -> None: |  | ||||||
|     await ctx.started() |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
|         await stream.aclose() |  | ||||||
|         await trio.sleep(0.2) |  | ||||||
|         await ctx.chan.send(None) |  | ||||||
|         print('child broke IPC and terminating') |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages(): |  | ||||||
|     ''' |  | ||||||
|     Verify that is a subactor's IPC goes down just after bringing up a stream |  | ||||||
|     the parent can trigger a SIGINT and the child will be reaped out-of-IPC by |  | ||||||
|     the localhost process supervision machinery: aka "zombie lord". |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.start_actor( |  | ||||||
|                 'ipc_breaker', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             with trio.move_on_after(1): |  | ||||||
|                 async with ( |  | ||||||
|                     portal.open_context( |  | ||||||
|                         break_ipc_after_started |  | ||||||
|                     ) as (ctx, sent), |  | ||||||
|                 ): |  | ||||||
|                     async with ctx.open_stream(): |  | ||||||
|                         await trio.sleep(0.5) |  | ||||||
| 
 |  | ||||||
|                     print('parent waiting on context') |  | ||||||
| 
 |  | ||||||
|             print('parent exited context') |  | ||||||
|             raise KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(KeyboardInterrupt): |  | ||||||
|         trio.run(main) |  | ||||||
|  | @ -1,380 +0,0 @@ | ||||||
| ''' |  | ||||||
| Advanced streaming patterns using bidirectional streams and contexts. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| from collections import Counter |  | ||||||
| import itertools |  | ||||||
| import platform |  | ||||||
| 
 |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def is_win(): |  | ||||||
|     return platform.system() == 'Windows' |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| _registry: dict[str, set[tractor.MsgStream]] = { |  | ||||||
|     'even': set(), |  | ||||||
|     'odd': set(), |  | ||||||
| } |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def publisher( |  | ||||||
| 
 |  | ||||||
|     seed: int = 0, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     global _registry |  | ||||||
| 
 |  | ||||||
|     def is_even(i): |  | ||||||
|         return i % 2 == 0 |  | ||||||
| 
 |  | ||||||
|     for val in itertools.count(seed): |  | ||||||
| 
 |  | ||||||
|         sub = 'even' if is_even(val) else 'odd' |  | ||||||
| 
 |  | ||||||
|         for sub_stream in _registry[sub].copy(): |  | ||||||
|             await sub_stream.send(val) |  | ||||||
| 
 |  | ||||||
|         # throttle send rate to ~1kHz |  | ||||||
|         # making it readable to a human user |  | ||||||
|         await trio.sleep(1/1000) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def subscribe( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     global _registry |  | ||||||
| 
 |  | ||||||
|     # syn caller |  | ||||||
|     await ctx.started(None) |  | ||||||
| 
 |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|         # update subs list as consumer requests |  | ||||||
|         async for new_subs in stream: |  | ||||||
| 
 |  | ||||||
|             new_subs = set(new_subs) |  | ||||||
|             remove = new_subs - _registry.keys() |  | ||||||
| 
 |  | ||||||
|             print(f'setting sub to {new_subs} for {ctx.chan.uid}') |  | ||||||
| 
 |  | ||||||
|             # remove old subs |  | ||||||
|             for sub in remove: |  | ||||||
|                 _registry[sub].remove(stream) |  | ||||||
| 
 |  | ||||||
|             # add new subs for consumer |  | ||||||
|             for sub in new_subs: |  | ||||||
|                 _registry[sub].add(stream) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def consumer( |  | ||||||
| 
 |  | ||||||
|     subs: list[str], |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     uid = tractor.current_actor().uid |  | ||||||
| 
 |  | ||||||
|     async with tractor.wait_for_actor('publisher') as portal: |  | ||||||
|         async with portal.open_context(subscribe) as (ctx, first): |  | ||||||
|             async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|                 # flip between the provided subs dynamically |  | ||||||
|                 if len(subs) > 1: |  | ||||||
| 
 |  | ||||||
|                     for sub in itertools.cycle(subs): |  | ||||||
|                         print(f'setting dynamic sub to {sub}') |  | ||||||
|                         await stream.send([sub]) |  | ||||||
| 
 |  | ||||||
|                         count = 0 |  | ||||||
|                         async for value in stream: |  | ||||||
|                             print(f'{uid} got: {value}') |  | ||||||
|                             if count > 5: |  | ||||||
|                                 break |  | ||||||
|                             count += 1 |  | ||||||
| 
 |  | ||||||
|                 else:  # static sub |  | ||||||
| 
 |  | ||||||
|                     await stream.send(subs) |  | ||||||
|                     async for value in stream: |  | ||||||
|                         print(f'{uid} got: {value}') |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_dynamic_pub_sub(): |  | ||||||
| 
 |  | ||||||
|     global _registry |  | ||||||
| 
 |  | ||||||
|     from multiprocessing import cpu_count |  | ||||||
|     cpus = cpu_count() |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|             # name of this actor will be same as target func |  | ||||||
|             await n.run_in_actor(publisher) |  | ||||||
| 
 |  | ||||||
|             for i, sub in zip( |  | ||||||
|                 range(cpus - 2), |  | ||||||
|                 itertools.cycle(_registry.keys()) |  | ||||||
|             ): |  | ||||||
|                 await n.run_in_actor( |  | ||||||
|                     consumer, |  | ||||||
|                     name=f'consumer_{sub}', |  | ||||||
|                     subs=[sub], |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|             # make one dynamic subscriber |  | ||||||
|             await n.run_in_actor( |  | ||||||
|                 consumer, |  | ||||||
|                 name='consumer_dynamic', |  | ||||||
|                 subs=list(_registry.keys()), |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             # block until cancelled by user |  | ||||||
|             with trio.fail_after(3): |  | ||||||
|                 await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         trio.run(main) |  | ||||||
|     except trio.TooSlowError: |  | ||||||
|         pass |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def one_task_streams_and_one_handles_reqresp( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     await ctx.started() |  | ||||||
| 
 |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|         async def pingpong(): |  | ||||||
|             '''Run a simple req/response service. |  | ||||||
| 
 |  | ||||||
|             ''' |  | ||||||
|             async for msg in stream: |  | ||||||
|                 print('rpc server ping') |  | ||||||
|                 assert msg == 'ping' |  | ||||||
|                 print('rpc server pong') |  | ||||||
|                 await stream.send('pong') |  | ||||||
| 
 |  | ||||||
|         async with trio.open_nursery() as n: |  | ||||||
|             n.start_soon(pingpong) |  | ||||||
| 
 |  | ||||||
|             for _ in itertools.count(): |  | ||||||
|                 await stream.send('yo') |  | ||||||
|                 await trio.sleep(0.01) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_reqresp_ontopof_streaming(): |  | ||||||
|     ''' |  | ||||||
|     Test a subactor that both streams with one task and |  | ||||||
|     spawns another which handles a small requests-response |  | ||||||
|     dialogue over the same bidir-stream. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         # flat to make sure we get at least one pong |  | ||||||
|         got_pong: bool = False |  | ||||||
|         timeout: int = 2 |  | ||||||
| 
 |  | ||||||
|         if is_win():  # smh |  | ||||||
|             timeout = 4 |  | ||||||
| 
 |  | ||||||
|         with trio.move_on_after(timeout): |  | ||||||
|             async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|                 # name of this actor will be same as target func |  | ||||||
|                 portal = await n.start_actor( |  | ||||||
|                     'dual_tasks', |  | ||||||
|                     enable_modules=[__name__] |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|                 async with portal.open_context( |  | ||||||
|                     one_task_streams_and_one_handles_reqresp, |  | ||||||
| 
 |  | ||||||
|                 ) as (ctx, first): |  | ||||||
| 
 |  | ||||||
|                     assert first is None |  | ||||||
| 
 |  | ||||||
|                     async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|                         await stream.send('ping') |  | ||||||
| 
 |  | ||||||
|                         async for msg in stream: |  | ||||||
|                             print(f'client received: {msg}') |  | ||||||
| 
 |  | ||||||
|                             assert msg in {'pong', 'yo'} |  | ||||||
| 
 |  | ||||||
|                             if msg == 'pong': |  | ||||||
|                                 got_pong = True |  | ||||||
|                                 await stream.send('ping') |  | ||||||
|                                 print('client sent ping') |  | ||||||
| 
 |  | ||||||
|         assert got_pong |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         trio.run(main) |  | ||||||
|     except trio.TooSlowError: |  | ||||||
|         pass |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def async_gen_stream(sequence): |  | ||||||
|     for i in sequence: |  | ||||||
|         yield i |  | ||||||
|         await trio.sleep(0.1) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def echo_ctx_stream( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ) -> None: |  | ||||||
|     await ctx.started() |  | ||||||
| 
 |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
|         async for msg in stream: |  | ||||||
|             await stream.send(msg) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_sigint_both_stream_types(): |  | ||||||
|     '''Verify that running a bi-directional and recv only stream |  | ||||||
|     side-by-side will cancel correctly from SIGINT. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     timeout: float = 2 |  | ||||||
|     if is_win():  # smh |  | ||||||
|         timeout += 1 |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         with trio.fail_after(timeout): |  | ||||||
|             async with tractor.open_nursery() as n: |  | ||||||
|                 # name of this actor will be same as target func |  | ||||||
|                 portal = await n.start_actor( |  | ||||||
|                     '2_way', |  | ||||||
|                     enable_modules=[__name__] |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|                 async with portal.open_context(echo_ctx_stream) as (ctx, _): |  | ||||||
|                     async with ctx.open_stream() as stream: |  | ||||||
|                         async with portal.open_stream_from( |  | ||||||
|                             async_gen_stream, |  | ||||||
|                             sequence=list(range(1)), |  | ||||||
|                         ) as gen_stream: |  | ||||||
| 
 |  | ||||||
|                             msg = await gen_stream.receive() |  | ||||||
|                             await stream.send(msg) |  | ||||||
|                             resp = await stream.receive() |  | ||||||
|                             assert resp == msg |  | ||||||
|                             raise KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         trio.run(main) |  | ||||||
|         assert 0, "Didn't receive KBI!?" |  | ||||||
|     except KeyboardInterrupt: |  | ||||||
|         pass |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def inf_streamer( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Stream increasing ints until terminated with a 'done' msg. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
| 
 |  | ||||||
|     async with ( |  | ||||||
|         ctx.open_stream() as stream, |  | ||||||
|         trio.open_nursery() as n, |  | ||||||
|     ): |  | ||||||
|         async def bail_on_sentinel(): |  | ||||||
|             async for msg in stream: |  | ||||||
|                 if msg == 'done': |  | ||||||
|                     await stream.aclose() |  | ||||||
|                 else: |  | ||||||
|                     print(f'streamer received {msg}') |  | ||||||
| 
 |  | ||||||
|         # start termination detector |  | ||||||
|         n.start_soon(bail_on_sentinel) |  | ||||||
| 
 |  | ||||||
|         for val in itertools.count(): |  | ||||||
|             try: |  | ||||||
|                 await stream.send(val) |  | ||||||
|             except trio.ClosedResourceError: |  | ||||||
|                 # close out the stream gracefully |  | ||||||
|                 break |  | ||||||
| 
 |  | ||||||
|     print('terminating streamer') |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_local_task_fanout_from_stream(): |  | ||||||
|     ''' |  | ||||||
|     Single stream with multiple local consumer tasks using the |  | ||||||
|     ``MsgStream.subscribe()` api. |  | ||||||
| 
 |  | ||||||
|     Ensure all tasks receive all values after stream completes sending. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     consumers = 22 |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         counts = Counter() |  | ||||||
| 
 |  | ||||||
|         async with tractor.open_nursery() as tn: |  | ||||||
|             p = await tn.start_actor( |  | ||||||
|                 'inf_streamer', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
|             async with ( |  | ||||||
|                 p.open_context(inf_streamer) as (ctx, _), |  | ||||||
|                 ctx.open_stream() as stream, |  | ||||||
|             ): |  | ||||||
| 
 |  | ||||||
|                 async def pull_and_count(name: str): |  | ||||||
|                     # name = trio.lowlevel.current_task().name |  | ||||||
|                     async with stream.subscribe() as recver: |  | ||||||
|                         assert isinstance( |  | ||||||
|                             recver, |  | ||||||
|                             tractor.trionics.BroadcastReceiver |  | ||||||
|                         ) |  | ||||||
|                         async for val in recver: |  | ||||||
|                             # print(f'{name}: {val}') |  | ||||||
|                             counts[name] += 1 |  | ||||||
| 
 |  | ||||||
|                         print(f'{name} bcaster ended') |  | ||||||
| 
 |  | ||||||
|                     print(f'{name} completed') |  | ||||||
| 
 |  | ||||||
|                 with trio.fail_after(3): |  | ||||||
|                     async with trio.open_nursery() as nurse: |  | ||||||
|                         for i in range(consumers): |  | ||||||
|                             nurse.start_soon(pull_and_count, i) |  | ||||||
| 
 |  | ||||||
|                         await trio.sleep(0.5) |  | ||||||
|                         print('\nterminating') |  | ||||||
|                         await stream.send('done') |  | ||||||
| 
 |  | ||||||
|             print('closed stream connection') |  | ||||||
| 
 |  | ||||||
|             assert len(counts) == consumers |  | ||||||
|             mx = max(counts.values()) |  | ||||||
|             # make sure each task received all stream values |  | ||||||
|             assert all(val == mx for val in counts.values()) |  | ||||||
| 
 |  | ||||||
|             await p.cancel_actor() |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -1,6 +1,5 @@ | ||||||
| """ | """ | ||||||
| Cancellation and error propagation | Cancellation and error propagation | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| import os | import os | ||||||
| import signal | import signal | ||||||
|  | @ -8,10 +7,6 @@ import platform | ||||||
| import time | import time | ||||||
| from itertools import repeat | from itertools import repeat | ||||||
| 
 | 
 | ||||||
| from exceptiongroup import ( |  | ||||||
|     BaseExceptionGroup, |  | ||||||
|     ExceptionGroup, |  | ||||||
| ) |  | ||||||
| import pytest | import pytest | ||||||
| import trio | import trio | ||||||
| import tractor | import tractor | ||||||
|  | @ -19,10 +14,6 @@ import tractor | ||||||
| from conftest import tractor_test, no_windows | from conftest import tractor_test, no_windows | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def is_win(): |  | ||||||
|     return platform.system() == 'Windows' |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def assert_err(delay=0): | async def assert_err(delay=0): | ||||||
|     await trio.sleep(delay) |     await trio.sleep(delay) | ||||||
|     assert 0 |     assert 0 | ||||||
|  | @ -56,57 +47,31 @@ def test_remote_error(arb_addr, args_err): | ||||||
|     args, errtype = args_err |     args, errtype = args_err | ||||||
| 
 | 
 | ||||||
|     async def main(): |     async def main(): | ||||||
|         async with tractor.open_nursery( |         async with tractor.open_nursery() as nursery: | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|         ) as nursery: |  | ||||||
| 
 | 
 | ||||||
|             # on a remote type error caused by bad input args |             portal = await nursery.run_in_actor(assert_err, name='errorer', **args) | ||||||
|             # this should raise directly which means we **don't** get |  | ||||||
|             # an exception group outside the nursery since the error |  | ||||||
|             # here and the far end task error are one in the same? |  | ||||||
|             portal = await nursery.run_in_actor( |  | ||||||
|                 assert_err, name='errorer', **args |  | ||||||
|             ) |  | ||||||
| 
 | 
 | ||||||
|             # get result(s) from main task |             # get result(s) from main task | ||||||
|             try: |             try: | ||||||
|                 # this means the root actor will also raise a local |  | ||||||
|                 # parent task error and thus an eg will propagate out |  | ||||||
|                 # of this actor nursery. |  | ||||||
|                 await portal.result() |                 await portal.result() | ||||||
|             except tractor.RemoteActorError as err: |             except tractor.RemoteActorError as err: | ||||||
|                 assert err.type == errtype |                 assert err.type == errtype | ||||||
|                 print("Look Maa that actor failed hard, hehh") |                 print("Look Maa that actor failed hard, hehh") | ||||||
|                 raise |                 raise | ||||||
| 
 | 
 | ||||||
|     # ensure boxed errors |  | ||||||
|     if args: |  | ||||||
|     with pytest.raises(tractor.RemoteActorError) as excinfo: |     with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||||
|             trio.run(main) |         tractor.run(main, arbiter_addr=arb_addr) | ||||||
| 
 | 
 | ||||||
|  |     # ensure boxed error is correct | ||||||
|     assert excinfo.value.type == errtype |     assert excinfo.value.type == errtype | ||||||
| 
 | 
 | ||||||
|     else: |  | ||||||
|         # the root task will also error on the `.result()` call |  | ||||||
|         # so we expect an error from there AND the child. |  | ||||||
|         with pytest.raises(BaseExceptionGroup) as excinfo: |  | ||||||
|             trio.run(main) |  | ||||||
| 
 |  | ||||||
|         # ensure boxed errors |  | ||||||
|         for exc in excinfo.value.exceptions: |  | ||||||
|             assert exc.type == errtype |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| def test_multierror(arb_addr): | def test_multierror(arb_addr): | ||||||
|     ''' |     """Verify we raise a ``trio.MultiError`` out of a nursery where | ||||||
|     Verify we raise a ``BaseExceptionGroup`` out of a nursery where |  | ||||||
|     more then one actor errors. |     more then one actor errors. | ||||||
| 
 |     """ | ||||||
|     ''' |  | ||||||
|     async def main(): |     async def main(): | ||||||
|         async with tractor.open_nursery( |         async with tractor.open_nursery() as nursery: | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|         ) as nursery: |  | ||||||
| 
 | 
 | ||||||
|             await nursery.run_in_actor(assert_err, name='errorer1') |             await nursery.run_in_actor(assert_err, name='errorer1') | ||||||
|             portal2 = await nursery.run_in_actor(assert_err, name='errorer2') |             portal2 = await nursery.run_in_actor(assert_err, name='errorer2') | ||||||
|  | @ -119,11 +84,11 @@ def test_multierror(arb_addr): | ||||||
|                 print("Look Maa that first actor failed hard, hehh") |                 print("Look Maa that first actor failed hard, hehh") | ||||||
|                 raise |                 raise | ||||||
| 
 | 
 | ||||||
|         # here we should get a ``BaseExceptionGroup`` containing exceptions |         # here we should get a `trio.MultiError` containing exceptions | ||||||
|         # from both subactors |         # from both subactors | ||||||
| 
 | 
 | ||||||
|     with pytest.raises(BaseExceptionGroup): |     with pytest.raises(trio.MultiError): | ||||||
|         trio.run(main) |         tractor.run(main, arbiter_addr=arb_addr) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.mark.parametrize('delay', (0, 0.5)) | @pytest.mark.parametrize('delay', (0, 0.5)) | ||||||
|  | @ -131,15 +96,12 @@ def test_multierror(arb_addr): | ||||||
|     'num_subactors', range(25, 26), |     'num_subactors', range(25, 26), | ||||||
| ) | ) | ||||||
| def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | ||||||
|     """Verify we raise a ``BaseExceptionGroup`` out of a nursery where |     """Verify we raise a ``trio.MultiError`` out of a nursery where | ||||||
|     more then one actor errors and also with a delay before failure |     more then one actor errors and also with a delay before failure | ||||||
|     to test failure during an ongoing spawning. |     to test failure during an ongoing spawning. | ||||||
|     """ |     """ | ||||||
|     async def main(): |     async def main(): | ||||||
|         async with tractor.open_nursery( |         async with tractor.open_nursery() as nursery: | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|         ) as nursery: |  | ||||||
| 
 |  | ||||||
|             for i in range(num_subactors): |             for i in range(num_subactors): | ||||||
|                 await nursery.run_in_actor( |                 await nursery.run_in_actor( | ||||||
|                     assert_err, |                     assert_err, | ||||||
|  | @ -147,30 +109,18 @@ def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | ||||||
|                     delay=delay |                     delay=delay | ||||||
|                 ) |                 ) | ||||||
| 
 | 
 | ||||||
|     # with pytest.raises(trio.MultiError) as exc_info: |     with pytest.raises(trio.MultiError) as exc_info: | ||||||
|     with pytest.raises(BaseExceptionGroup) as exc_info: |         tractor.run(main, arbiter_addr=arb_addr) | ||||||
|         trio.run(main) |  | ||||||
| 
 | 
 | ||||||
|     assert exc_info.type == ExceptionGroup |     assert exc_info.type == tractor.MultiError | ||||||
|     err = exc_info.value |     err = exc_info.value | ||||||
|     exceptions = err.exceptions |     assert len(err.exceptions) == num_subactors | ||||||
| 
 |     for exc in err.exceptions: | ||||||
|     if len(exceptions) == 2: |  | ||||||
|         # sometimes oddly now there's an embedded BrokenResourceError ? |  | ||||||
|         for exc in exceptions: |  | ||||||
|             excs = getattr(exc, 'exceptions', None) |  | ||||||
|             if excs: |  | ||||||
|                 exceptions = excs |  | ||||||
|                 break |  | ||||||
| 
 |  | ||||||
|     assert len(exceptions) == num_subactors |  | ||||||
| 
 |  | ||||||
|     for exc in exceptions: |  | ||||||
|         assert isinstance(exc, tractor.RemoteActorError) |         assert isinstance(exc, tractor.RemoteActorError) | ||||||
|         assert exc.type == AssertionError |         assert exc.type == AssertionError | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def do_nothing(): | def do_nothing(): | ||||||
|     pass |     pass | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -182,12 +132,10 @@ def test_cancel_single_subactor(arb_addr, mechanism): | ||||||
|     async def spawn_actor(): |     async def spawn_actor(): | ||||||
|         """Spawn an actor that blocks indefinitely. |         """Spawn an actor that blocks indefinitely. | ||||||
|         """ |         """ | ||||||
|         async with tractor.open_nursery( |         async with tractor.open_nursery() as nursery: | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|         ) as nursery: |  | ||||||
| 
 | 
 | ||||||
|             portal = await nursery.start_actor( |             portal = await nursery.start_actor( | ||||||
|                 'nothin', enable_modules=[__name__], |                 'nothin', rpc_module_paths=[__name__], | ||||||
|             ) |             ) | ||||||
|             assert (await portal.run(do_nothing)) is None |             assert (await portal.run(do_nothing)) is None | ||||||
| 
 | 
 | ||||||
|  | @ -198,10 +146,10 @@ def test_cancel_single_subactor(arb_addr, mechanism): | ||||||
|                 raise mechanism |                 raise mechanism | ||||||
| 
 | 
 | ||||||
|     if mechanism == 'nursery_cancel': |     if mechanism == 'nursery_cancel': | ||||||
|         trio.run(spawn_actor) |         tractor.run(spawn_actor, arbiter_addr=arb_addr) | ||||||
|     else: |     else: | ||||||
|         with pytest.raises(mechanism): |         with pytest.raises(mechanism): | ||||||
|             trio.run(spawn_actor) |             tractor.run(spawn_actor, arbiter_addr=arb_addr) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def stream_forever(): | async def stream_forever(): | ||||||
|  | @ -220,13 +168,12 @@ async def test_cancel_infinite_streamer(start_method): | ||||||
|         async with tractor.open_nursery() as n: |         async with tractor.open_nursery() as n: | ||||||
|             portal = await n.start_actor( |             portal = await n.start_actor( | ||||||
|                 'donny', |                 'donny', | ||||||
|                 enable_modules=[__name__], |                 rpc_module_paths=[__name__], | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|             # this async for loop streams values from the above |             # this async for loop streams values from the above | ||||||
|             # async generator running in a separate process |             # async generator running in a separate process | ||||||
|             async with portal.open_stream_from(stream_forever) as stream: |             async for letter in await portal.run(stream_forever): | ||||||
|                 async for letter in stream: |  | ||||||
|                 print(letter) |                 print(letter) | ||||||
| 
 | 
 | ||||||
|     # we support trio's cancellation system |     # we support trio's cancellation system | ||||||
|  | @ -239,8 +186,8 @@ async def test_cancel_infinite_streamer(start_method): | ||||||
|     [ |     [ | ||||||
|         # daemon actors sit idle while single task actors error out |         # daemon actors sit idle while single task actors error out | ||||||
|         (1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None), |         (1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None), | ||||||
|         (2, BaseExceptionGroup, AssertionError, (assert_err, {}), None), |         (2, tractor.MultiError, AssertionError, (assert_err, {}), None), | ||||||
|         (3, BaseExceptionGroup, AssertionError, (assert_err, {}), None), |         (3, tractor.MultiError, AssertionError, (assert_err, {}), None), | ||||||
| 
 | 
 | ||||||
|         # 1 daemon actor errors out while single task actors sleep forever |         # 1 daemon actor errors out while single task actors sleep forever | ||||||
|         (3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}), |         (3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}), | ||||||
|  | @ -251,7 +198,7 @@ async def test_cancel_infinite_streamer(start_method): | ||||||
|          (do_nuthin, {}), (assert_err, {'delay': 1}, True)), |          (do_nuthin, {}), (assert_err, {'delay': 1}, True)), | ||||||
|         # daemon complete quickly delay while single task |         # daemon complete quickly delay while single task | ||||||
|         # actors error after brief delay |         # actors error after brief delay | ||||||
|         (3, BaseExceptionGroup, AssertionError, |         (3, tractor.MultiError, AssertionError, | ||||||
|          (assert_err, {'delay': 1}), (do_nuthin, {}, False)), |          (assert_err, {'delay': 1}), (do_nuthin, {}, False)), | ||||||
|     ], |     ], | ||||||
|     ids=[ |     ids=[ | ||||||
|  | @ -279,7 +226,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): | ||||||
|             for i in range(num_actors): |             for i in range(num_actors): | ||||||
|                 dactor_portals.append(await n.start_actor( |                 dactor_portals.append(await n.start_actor( | ||||||
|                     f'deamon_{i}', |                     f'deamon_{i}', | ||||||
|                     enable_modules=[__name__], |                     rpc_module_paths=[__name__], | ||||||
|                 )) |                 )) | ||||||
| 
 | 
 | ||||||
|             func, kwargs = ria_func |             func, kwargs = ria_func | ||||||
|  | @ -318,7 +265,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): | ||||||
|         # should error here with a ``RemoteActorError`` or ``MultiError`` |         # should error here with a ``RemoteActorError`` or ``MultiError`` | ||||||
| 
 | 
 | ||||||
|     except first_err as err: |     except first_err as err: | ||||||
|         if isinstance(err, BaseExceptionGroup): |         if isinstance(err, tractor.MultiError): | ||||||
|             assert len(err.exceptions) == num_actors |             assert len(err.exceptions) == num_actors | ||||||
|             for exc in err.exceptions: |             for exc in err.exceptions: | ||||||
|                 if isinstance(exc, tractor.RemoteActorError): |                 if isinstance(exc, tractor.RemoteActorError): | ||||||
|  | @ -361,12 +308,10 @@ async def spawn_and_error(breadth, depth) -> None: | ||||||
| 
 | 
 | ||||||
| @tractor_test | @tractor_test | ||||||
| async def test_nested_multierrors(loglevel, start_method): | async def test_nested_multierrors(loglevel, start_method): | ||||||
|     ''' |     """Test that failed actor sets are wrapped in `trio.MultiError`s. | ||||||
|     Test that failed actor sets are wrapped in `BaseExceptionGroup`s. This |     This test goes only 2 nurseries deep but we should eventually have tests | ||||||
|     test goes only 2 nurseries deep but we should eventually have tests |  | ||||||
|     for arbitrary n-depth actor trees. |     for arbitrary n-depth actor trees. | ||||||
| 
 |     """ | ||||||
|     ''' |  | ||||||
|     if start_method == 'trio': |     if start_method == 'trio': | ||||||
|         depth = 3 |         depth = 3 | ||||||
|         subactor_breadth = 2 |         subactor_breadth = 2 | ||||||
|  | @ -390,36 +335,24 @@ async def test_nested_multierrors(loglevel, start_method): | ||||||
|                         breadth=subactor_breadth, |                         breadth=subactor_breadth, | ||||||
|                         depth=depth, |                         depth=depth, | ||||||
|                     ) |                     ) | ||||||
|         except BaseExceptionGroup as err: |         except trio.MultiError as err: | ||||||
|             assert len(err.exceptions) == subactor_breadth |             assert len(err.exceptions) == subactor_breadth | ||||||
|             for subexc in err.exceptions: |             for subexc in err.exceptions: | ||||||
| 
 | 
 | ||||||
|                 # verify first level actor errors are wrapped as remote |                 # verify first level actor errors are wrapped as remote | ||||||
|                 if is_win(): |                 if platform.system() == 'Windows': | ||||||
| 
 | 
 | ||||||
|                     # windows is often too slow and cancellation seems |                     # windows is often too slow and cancellation seems | ||||||
|                     # to happen before an actor is spawned |                     # to happen before an actor is spawned | ||||||
|                     if isinstance(subexc, trio.Cancelled): |                     if isinstance(subexc, trio.Cancelled): | ||||||
|                         continue |                         continue | ||||||
| 
 |                     else: | ||||||
|                     elif isinstance(subexc, tractor.RemoteActorError): |  | ||||||
|                         # on windows it seems we can't exactly be sure wtf |                         # on windows it seems we can't exactly be sure wtf | ||||||
|                         # will happen.. |                         # will happen.. | ||||||
|                         assert subexc.type in ( |                         assert subexc.type in ( | ||||||
|                             tractor.RemoteActorError, |                             tractor.RemoteActorError, | ||||||
|                             trio.Cancelled, |                             trio.Cancelled, | ||||||
|                             BaseExceptionGroup, |                             trio.MultiError | ||||||
|                         ) |  | ||||||
| 
 |  | ||||||
|                     elif isinstance(subexc, BaseExceptionGroup): |  | ||||||
|                         for subsub in subexc.exceptions: |  | ||||||
| 
 |  | ||||||
|                             if subsub in (tractor.RemoteActorError,): |  | ||||||
|                                 subsub = subsub.type |  | ||||||
| 
 |  | ||||||
|                             assert type(subsub) in ( |  | ||||||
|                                 trio.Cancelled, |  | ||||||
|                                 BaseExceptionGroup, |  | ||||||
|                         ) |                         ) | ||||||
|                 else: |                 else: | ||||||
|                     assert isinstance(subexc, tractor.RemoteActorError) |                     assert isinstance(subexc, tractor.RemoteActorError) | ||||||
|  | @ -428,21 +361,14 @@ async def test_nested_multierrors(loglevel, start_method): | ||||||
|                     # XXX not sure what's up with this.. |                     # XXX not sure what's up with this.. | ||||||
|                     # on windows sometimes spawning is just too slow and |                     # on windows sometimes spawning is just too slow and | ||||||
|                     # we get back the (sent) cancel signal instead |                     # we get back the (sent) cancel signal instead | ||||||
|                     if is_win(): |                     if platform.system() == 'Windows': | ||||||
|                         if isinstance(subexc, tractor.RemoteActorError): |                         assert (subexc.type is trio.MultiError) or ( | ||||||
|                             assert subexc.type in ( |                             subexc.type is tractor.RemoteActorError) | ||||||
|                                 BaseExceptionGroup, |  | ||||||
|                                 tractor.RemoteActorError |  | ||||||
|                             ) |  | ||||||
|                     else: |                     else: | ||||||
|                             assert isinstance(subexc, BaseExceptionGroup) |                         assert subexc.type is trio.MultiError | ||||||
|                 else: |                 else: | ||||||
|                         assert subexc.type is ExceptionGroup |                     assert (subexc.type is tractor.RemoteActorError) or ( | ||||||
|                 else: |                         subexc.type is trio.Cancelled) | ||||||
|                     assert subexc.type in ( |  | ||||||
|                         tractor.RemoteActorError, |  | ||||||
|                         trio.Cancelled |  | ||||||
|                     ) |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @no_windows | @no_windows | ||||||
|  | @ -460,13 +386,13 @@ def test_cancel_via_SIGINT( | ||||||
|         with trio.fail_after(2): |         with trio.fail_after(2): | ||||||
|             async with tractor.open_nursery() as tn: |             async with tractor.open_nursery() as tn: | ||||||
|                 await tn.start_actor('sucka') |                 await tn.start_actor('sucka') | ||||||
|                 if 'mp' in spawn_backend: |                 if spawn_backend == 'mp': | ||||||
|                     time.sleep(0.1) |                     time.sleep(0.1) | ||||||
|                 os.kill(pid, signal.SIGINT) |                 os.kill(pid, signal.SIGINT) | ||||||
|                 await trio.sleep_forever() |                 await trio.sleep_forever() | ||||||
| 
 | 
 | ||||||
|     with pytest.raises(KeyboardInterrupt): |     with pytest.raises(KeyboardInterrupt): | ||||||
|         trio.run(main) |         tractor.run(main) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @no_windows | @no_windows | ||||||
|  | @ -480,9 +406,6 @@ def test_cancel_via_SIGINT_other_task( | ||||||
|     from a seperate ``trio`` child  task. |     from a seperate ``trio`` child  task. | ||||||
|     """ |     """ | ||||||
|     pid = os.getpid() |     pid = os.getpid() | ||||||
|     timeout: float = 2 |  | ||||||
|     if is_win():  # smh |  | ||||||
|         timeout += 1 |  | ||||||
| 
 | 
 | ||||||
|     async def spawn_and_sleep_forever(task_status=trio.TASK_STATUS_IGNORED): |     async def spawn_and_sleep_forever(task_status=trio.TASK_STATUS_IGNORED): | ||||||
|         async with tractor.open_nursery() as tn: |         async with tractor.open_nursery() as tn: | ||||||
|  | @ -496,15 +419,16 @@ def test_cancel_via_SIGINT_other_task( | ||||||
| 
 | 
 | ||||||
|     async def main(): |     async def main(): | ||||||
|         # should never timeout since SIGINT should cancel the current program |         # should never timeout since SIGINT should cancel the current program | ||||||
|         with trio.fail_after(timeout): |         with trio.fail_after(2): | ||||||
|             async with trio.open_nursery() as n: |             async with trio.open_nursery() as n: | ||||||
|                 await n.start(spawn_and_sleep_forever) |                 await n.start(spawn_and_sleep_forever) | ||||||
|                 if 'mp' in spawn_backend: |                 if spawn_backend == 'mp': | ||||||
|                     time.sleep(0.1) |                     time.sleep(0.1) | ||||||
|                 os.kill(pid, signal.SIGINT) |                 os.kill(pid, signal.SIGINT) | ||||||
| 
 | 
 | ||||||
|     with pytest.raises(KeyboardInterrupt): |     with pytest.raises(KeyboardInterrupt): | ||||||
|         trio.run(main) |         tractor.run(main) | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def spin_for(period=3): | async def spin_for(period=3): | ||||||
|  | @ -514,7 +438,7 @@ async def spin_for(period=3): | ||||||
| 
 | 
 | ||||||
| async def spawn(): | async def spawn(): | ||||||
|     async with tractor.open_nursery() as tn: |     async with tractor.open_nursery() as tn: | ||||||
|         await tn.run_in_actor( |         portal = await tn.run_in_actor( | ||||||
|             spin_for, |             spin_for, | ||||||
|             name='sleeper', |             name='sleeper', | ||||||
|         ) |         ) | ||||||
|  | @ -536,7 +460,7 @@ def test_cancel_while_childs_child_in_sync_sleep( | ||||||
|     async def main(): |     async def main(): | ||||||
|         with trio.fail_after(2): |         with trio.fail_after(2): | ||||||
|             async with tractor.open_nursery() as tn: |             async with tractor.open_nursery() as tn: | ||||||
|                 await tn.run_in_actor( |                 portal = await tn.run_in_actor( | ||||||
|                     spawn, |                     spawn, | ||||||
|                     name='spawn', |                     name='spawn', | ||||||
|                 ) |                 ) | ||||||
|  | @ -544,58 +468,4 @@ def test_cancel_while_childs_child_in_sync_sleep( | ||||||
|                 assert 0 |                 assert 0 | ||||||
| 
 | 
 | ||||||
|     with pytest.raises(AssertionError): |     with pytest.raises(AssertionError): | ||||||
|         trio.run(main) |         tractor.run(main) | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_fast_graceful_cancel_when_spawn_task_in_soft_proc_wait_for_daemon( |  | ||||||
|     start_method, |  | ||||||
| ): |  | ||||||
|     ''' |  | ||||||
|     This is a very subtle test which demonstrates how cancellation |  | ||||||
|     during process collection can result in non-optimal teardown |  | ||||||
|     performance on daemon actors. The fix for this test was to handle |  | ||||||
|     ``trio.Cancelled`` specially in the spawn task waiting in |  | ||||||
|     `proc.wait()` such that ``Portal.cancel_actor()`` is called before |  | ||||||
|     executing the "hard reap" sequence (which has an up to 3 second |  | ||||||
|     delay currently). |  | ||||||
| 
 |  | ||||||
|     In other words, if we can cancel the actor using a graceful remote |  | ||||||
|     cancellation, and it's faster, we might as well do it. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     kbi_delay = 0.5 |  | ||||||
|     timeout: float = 2.9 |  | ||||||
| 
 |  | ||||||
|     if is_win():  # smh |  | ||||||
|         timeout += 1 |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         start = time.time() |  | ||||||
|         try: |  | ||||||
|             async with trio.open_nursery() as nurse: |  | ||||||
|                 async with tractor.open_nursery() as tn: |  | ||||||
|                     p = await tn.start_actor( |  | ||||||
|                         'fast_boi', |  | ||||||
|                         enable_modules=[__name__], |  | ||||||
|                     ) |  | ||||||
| 
 |  | ||||||
|                     async def delayed_kbi(): |  | ||||||
|                         await trio.sleep(kbi_delay) |  | ||||||
|                         print(f'RAISING KBI after {kbi_delay} s') |  | ||||||
|                         raise KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
|                     # start task which raises a kbi **after** |  | ||||||
|                     # the actor nursery ``__aexit__()`` has |  | ||||||
|                     # been run. |  | ||||||
|                     nurse.start_soon(delayed_kbi) |  | ||||||
| 
 |  | ||||||
|                     await p.run(do_nuthin) |  | ||||||
|         finally: |  | ||||||
|             duration = time.time() - start |  | ||||||
|             if duration > timeout: |  | ||||||
|                 raise trio.TooSlowError( |  | ||||||
|                     'daemon cancel was slower then necessary..' |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(KeyboardInterrupt): |  | ||||||
|         trio.run(main) |  | ||||||
|  |  | ||||||
|  | @ -1,173 +0,0 @@ | ||||||
| ''' |  | ||||||
| Test a service style daemon that maintains a nursery for spawning |  | ||||||
| "remote async tasks" including both spawning other long living |  | ||||||
| sub-sub-actor daemons. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| from typing import Optional |  | ||||||
| import asyncio |  | ||||||
| from contextlib import asynccontextmanager as acm |  | ||||||
| 
 |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| from trio_typing import TaskStatus |  | ||||||
| import tractor |  | ||||||
| from tractor import RemoteActorError |  | ||||||
| from async_generator import aclosing |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def aio_streamer( |  | ||||||
|     from_trio: asyncio.Queue, |  | ||||||
|     to_trio: trio.abc.SendChannel, |  | ||||||
| ) -> trio.abc.ReceiveChannel: |  | ||||||
| 
 |  | ||||||
|     # required first msg to sync caller |  | ||||||
|     to_trio.send_nowait(None) |  | ||||||
| 
 |  | ||||||
|     from itertools import cycle |  | ||||||
|     for i in cycle(range(10)): |  | ||||||
|         to_trio.send_nowait(i) |  | ||||||
|         await asyncio.sleep(0.01) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def trio_streamer(): |  | ||||||
|     from itertools import cycle |  | ||||||
|     for i in cycle(range(10)): |  | ||||||
|         yield i |  | ||||||
|         await trio.sleep(0.01) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def trio_sleep_and_err(delay: float = 0.5): |  | ||||||
|     await trio.sleep(delay) |  | ||||||
|     # name error |  | ||||||
|     doggy()  # noqa |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| _cached_stream: Optional[ |  | ||||||
|     trio.abc.ReceiveChannel |  | ||||||
| ] = None |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @acm |  | ||||||
| async def wrapper_mngr( |  | ||||||
| ): |  | ||||||
|     from tractor.trionics import broadcast_receiver |  | ||||||
|     global _cached_stream |  | ||||||
|     in_aio = tractor.current_actor().is_infected_aio() |  | ||||||
| 
 |  | ||||||
|     if in_aio: |  | ||||||
|         if _cached_stream: |  | ||||||
| 
 |  | ||||||
|             from_aio = _cached_stream |  | ||||||
| 
 |  | ||||||
|             # if we already have a cached feed deliver a rx side clone |  | ||||||
|             # to consumer |  | ||||||
|             async with broadcast_receiver(from_aio, 6) as from_aio: |  | ||||||
|                 yield from_aio |  | ||||||
|                 return |  | ||||||
|         else: |  | ||||||
|             async with tractor.to_asyncio.open_channel_from( |  | ||||||
|                 aio_streamer, |  | ||||||
|             ) as (first, from_aio): |  | ||||||
|                 assert not first |  | ||||||
| 
 |  | ||||||
|                 # cache it so next task uses broadcast receiver |  | ||||||
|                 _cached_stream = from_aio |  | ||||||
| 
 |  | ||||||
|                 yield from_aio |  | ||||||
|     else: |  | ||||||
|         async with aclosing(trio_streamer()) as stream: |  | ||||||
|             # cache it so next task uses broadcast receiver |  | ||||||
|             _cached_stream = stream |  | ||||||
|             yield stream |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| _nursery: trio.Nursery = None |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def trio_main( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ): |  | ||||||
|     # sync |  | ||||||
|     await ctx.started() |  | ||||||
| 
 |  | ||||||
|     # stash a "service nursery" as "actor local" (aka a Python global) |  | ||||||
|     global _nursery |  | ||||||
|     n = _nursery |  | ||||||
|     assert n |  | ||||||
| 
 |  | ||||||
|     async def consume_stream(): |  | ||||||
|         async with wrapper_mngr() as stream: |  | ||||||
|             async for msg in stream: |  | ||||||
|                 print(msg) |  | ||||||
| 
 |  | ||||||
|     # run 2 tasks to ensure broadcaster chan use |  | ||||||
|     n.start_soon(consume_stream) |  | ||||||
|     n.start_soon(consume_stream) |  | ||||||
| 
 |  | ||||||
|     n.start_soon(trio_sleep_and_err) |  | ||||||
| 
 |  | ||||||
|     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def open_actor_local_nursery( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ): |  | ||||||
|     global _nursery |  | ||||||
|     async with trio.open_nursery() as n: |  | ||||||
|         _nursery = n |  | ||||||
|         await ctx.started() |  | ||||||
|         await trio.sleep(10) |  | ||||||
|         # await trio.sleep(1) |  | ||||||
| 
 |  | ||||||
|         # XXX: this causes the hang since |  | ||||||
|         # the caller does not unblock from its own |  | ||||||
|         # ``trio.sleep_forever()``. |  | ||||||
| 
 |  | ||||||
|         # TODO: we need to test a simple ctx task starting remote tasks |  | ||||||
|         # that error and then blocking on a ``Nursery.start()`` which |  | ||||||
|         # never yields back.. aka a scenario where the |  | ||||||
|         # ``tractor.context`` task IS NOT in the service n's cancel |  | ||||||
|         # scope. |  | ||||||
|         n.cancel_scope.cancel() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'asyncio_mode', |  | ||||||
|     [True, False], |  | ||||||
|     ids='asyncio_mode={}'.format, |  | ||||||
| ) |  | ||||||
| def test_actor_managed_trio_nursery_task_error_cancels_aio( |  | ||||||
|     asyncio_mode: bool, |  | ||||||
|     arb_addr |  | ||||||
| ): |  | ||||||
|     ''' |  | ||||||
|     Verify that a ``trio`` nursery created managed in a child actor |  | ||||||
|     correctly relays errors to the parent actor when one of its spawned |  | ||||||
|     tasks errors even when running in infected asyncio mode and using |  | ||||||
|     broadcast receivers for multi-task-per-actor subscription. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         # cancel the nursery shortly after boot |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             p = await n.start_actor( |  | ||||||
|                 'nursery_mngr', |  | ||||||
|                 infect_asyncio=asyncio_mode, |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
|             async with ( |  | ||||||
|                 p.open_context(open_actor_local_nursery) as (ctx, first), |  | ||||||
|                 p.open_context(trio_main) as (ctx, first), |  | ||||||
|             ): |  | ||||||
|                 await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(RemoteActorError) as excinfo: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
|     # verify boxed error |  | ||||||
|     err = excinfo.value |  | ||||||
|     assert isinstance(err.type(), NameError) |  | ||||||
|  | @ -1,84 +0,0 @@ | ||||||
| import itertools |  | ||||||
| 
 |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| from tractor import open_actor_cluster |  | ||||||
| from tractor.trionics import gather_contexts |  | ||||||
| 
 |  | ||||||
| from conftest import tractor_test |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| MESSAGE = 'tractoring at full speed' |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_empty_mngrs_input_raises() -> None: |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         with trio.fail_after(1): |  | ||||||
|             async with ( |  | ||||||
|                 open_actor_cluster( |  | ||||||
|                     modules=[__name__], |  | ||||||
| 
 |  | ||||||
|                     # NOTE: ensure we can passthrough runtime opts |  | ||||||
|                     loglevel='info', |  | ||||||
|                     # debug_mode=True, |  | ||||||
| 
 |  | ||||||
|                 ) as portals, |  | ||||||
| 
 |  | ||||||
|                 gather_contexts( |  | ||||||
|                     # NOTE: it's the use of inline-generator syntax |  | ||||||
|                     # here that causes the empty input. |  | ||||||
|                     mngrs=( |  | ||||||
|                         p.open_context(worker) for p in portals.values() |  | ||||||
|                     ), |  | ||||||
|                 ), |  | ||||||
|             ): |  | ||||||
|                 assert 0 |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(ValueError): |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def worker( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     await ctx.started() |  | ||||||
| 
 |  | ||||||
|     async with ctx.open_stream( |  | ||||||
|         backpressure=True, |  | ||||||
|     ) as stream: |  | ||||||
| 
 |  | ||||||
|         # TODO: this with the below assert causes a hang bug? |  | ||||||
|         # with trio.move_on_after(1): |  | ||||||
| 
 |  | ||||||
|         async for msg in stream: |  | ||||||
|             # do something with msg |  | ||||||
|             print(msg) |  | ||||||
|             assert msg == MESSAGE |  | ||||||
| 
 |  | ||||||
|         # TODO: does this ever cause a hang |  | ||||||
|         # assert 0 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor_test |  | ||||||
| async def test_streaming_to_actor_cluster() -> None: |  | ||||||
| 
 |  | ||||||
|     async with ( |  | ||||||
|         open_actor_cluster(modules=[__name__]) as portals, |  | ||||||
| 
 |  | ||||||
|         gather_contexts( |  | ||||||
|             mngrs=[p.open_context(worker) for p in portals.values()], |  | ||||||
|         ) as contexts, |  | ||||||
| 
 |  | ||||||
|         gather_contexts( |  | ||||||
|             mngrs=[ctx[0].open_stream() for ctx in contexts], |  | ||||||
|         ) as streams, |  | ||||||
| 
 |  | ||||||
|     ): |  | ||||||
|         with trio.move_on_after(1): |  | ||||||
|             for stream in itertools.cycle(streams): |  | ||||||
|                 await stream.send(MESSAGE) |  | ||||||
|  | @ -1,798 +0,0 @@ | ||||||
| ''' |  | ||||||
| ``async with ():`` inlined context-stream cancellation testing. |  | ||||||
| 
 |  | ||||||
| Verify the we raise errors when streams are opened prior to sync-opening |  | ||||||
| a ``tractor.Context`` beforehand. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| from contextlib import asynccontextmanager as acm |  | ||||||
| from itertools import count |  | ||||||
| import platform |  | ||||||
| from typing import Optional |  | ||||||
| 
 |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| from tractor._exceptions import StreamOverrun |  | ||||||
| 
 |  | ||||||
| from conftest import tractor_test |  | ||||||
| 
 |  | ||||||
| # ``Context`` semantics are as follows, |  | ||||||
| #  ------------------------------------ |  | ||||||
| 
 |  | ||||||
| # - standard setup/teardown: |  | ||||||
| #   ``Portal.open_context()`` starts a new |  | ||||||
| #   remote task context in another actor. The target actor's task must |  | ||||||
| #   call ``Context.started()`` to unblock this entry on the caller side. |  | ||||||
| #   the callee task executes until complete and returns a final value |  | ||||||
| #   which is delivered to the caller side and retreived via |  | ||||||
| #   ``Context.result()``. |  | ||||||
| 
 |  | ||||||
| # - cancel termination: |  | ||||||
| #   context can be cancelled on either side where either end's task can |  | ||||||
| #   call ``Context.cancel()`` which raises a local ``trio.Cancelled`` |  | ||||||
| #   and sends a task cancel request to the remote task which in turn |  | ||||||
| #   raises a ``trio.Cancelled`` in that scope, catches it, and re-raises |  | ||||||
| #   as ``ContextCancelled``. This is then caught by |  | ||||||
| #   ``Portal.open_context()``'s exit and we get a graceful termination |  | ||||||
| #   of the linked tasks. |  | ||||||
| 
 |  | ||||||
| # - error termination: |  | ||||||
| #   error is caught after all context-cancel-scope tasks are cancelled |  | ||||||
| #   via regular ``trio`` cancel scope semantics, error is sent to other |  | ||||||
| #   side and unpacked as a `RemoteActorError`. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # ``Context.open_stream() as stream: MsgStream:`` msg semantics are: |  | ||||||
| #  ----------------------------------------------------------------- |  | ||||||
| 
 |  | ||||||
| # - either side can ``.send()`` which emits a 'yield' msgs and delivers |  | ||||||
| #   a value to the a ``MsgStream.receive()`` call. |  | ||||||
| 
 |  | ||||||
| # - stream closure: one end relays a 'stop' message which terminates an |  | ||||||
| #   ongoing ``MsgStream`` iteration. |  | ||||||
| 
 |  | ||||||
| # - cancel/error termination: as per the context semantics above but |  | ||||||
| #   with implicit stream closure on the cancelling end. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| _state: bool = False |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def too_many_starteds( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Call ``Context.started()`` more then once (an error). |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
|     try: |  | ||||||
|         await ctx.started() |  | ||||||
|     except RuntimeError: |  | ||||||
|         raise |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def not_started_but_stream_opened( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Enter ``Context.open_stream()`` without calling ``.started()``. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     try: |  | ||||||
|         async with ctx.open_stream(): |  | ||||||
|             assert 0 |  | ||||||
|     except RuntimeError: |  | ||||||
|         raise |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'target', |  | ||||||
|     [too_many_starteds, not_started_but_stream_opened], |  | ||||||
|     ids='misuse_type={}'.format, |  | ||||||
| ) |  | ||||||
| def test_started_misuse(target): |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.start_actor( |  | ||||||
|                 target.__name__, |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             async with portal.open_context(target) as (ctx, sent): |  | ||||||
|                 await trio.sleep(1) |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(tractor.RemoteActorError): |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def simple_setup_teardown( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
|     data: int, |  | ||||||
|     block_forever: bool = False, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     # startup phase |  | ||||||
|     global _state |  | ||||||
|     _state = True |  | ||||||
| 
 |  | ||||||
|     # signal to parent that we're up |  | ||||||
|     await ctx.started(data + 1) |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         if block_forever: |  | ||||||
|             # block until cancelled |  | ||||||
|             await trio.sleep_forever() |  | ||||||
|         else: |  | ||||||
|             return 'yo' |  | ||||||
|     finally: |  | ||||||
|         _state = False |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def assert_state(value: bool): |  | ||||||
|     global _state |  | ||||||
|     assert _state == value |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'error_parent', |  | ||||||
|     [False, ValueError, KeyboardInterrupt], |  | ||||||
| ) |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'callee_blocks_forever', |  | ||||||
|     [False, True], |  | ||||||
|     ids=lambda item: f'callee_blocks_forever={item}' |  | ||||||
| ) |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'pointlessly_open_stream', |  | ||||||
|     [False, True], |  | ||||||
|     ids=lambda item: f'open_stream={item}' |  | ||||||
| ) |  | ||||||
| def test_simple_context( |  | ||||||
|     error_parent, |  | ||||||
|     callee_blocks_forever, |  | ||||||
|     pointlessly_open_stream, |  | ||||||
| ): |  | ||||||
| 
 |  | ||||||
|     timeout = 1.5 if not platform.system() == 'Windows' else 4 |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         with trio.fail_after(timeout): |  | ||||||
|             async with tractor.open_nursery() as nursery: |  | ||||||
| 
 |  | ||||||
|                 portal = await nursery.start_actor( |  | ||||||
|                     'simple_context', |  | ||||||
|                     enable_modules=[__name__], |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|                 try: |  | ||||||
|                     async with portal.open_context( |  | ||||||
|                         simple_setup_teardown, |  | ||||||
|                         data=10, |  | ||||||
|                         block_forever=callee_blocks_forever, |  | ||||||
|                     ) as (ctx, sent): |  | ||||||
| 
 |  | ||||||
|                         assert sent == 11 |  | ||||||
| 
 |  | ||||||
|                         if callee_blocks_forever: |  | ||||||
|                             await portal.run(assert_state, value=True) |  | ||||||
|                         else: |  | ||||||
|                             assert await ctx.result() == 'yo' |  | ||||||
| 
 |  | ||||||
|                         if not error_parent: |  | ||||||
|                             await ctx.cancel() |  | ||||||
| 
 |  | ||||||
|                         if pointlessly_open_stream: |  | ||||||
|                             async with ctx.open_stream(): |  | ||||||
|                                 if error_parent: |  | ||||||
|                                     raise error_parent |  | ||||||
| 
 |  | ||||||
|                                 if callee_blocks_forever: |  | ||||||
|                                     await ctx.cancel() |  | ||||||
|                                 else: |  | ||||||
|                                     # in this case the stream will send a |  | ||||||
|                                     # 'stop' msg to the far end which needs |  | ||||||
|                                     # to be ignored |  | ||||||
|                                     pass |  | ||||||
|                         else: |  | ||||||
|                             if error_parent: |  | ||||||
|                                 raise error_parent |  | ||||||
| 
 |  | ||||||
|                 finally: |  | ||||||
| 
 |  | ||||||
|                     # after cancellation |  | ||||||
|                     if not error_parent: |  | ||||||
|                         await portal.run(assert_state, value=False) |  | ||||||
| 
 |  | ||||||
|                     # shut down daemon |  | ||||||
|                     await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
|     if error_parent: |  | ||||||
|         try: |  | ||||||
|             trio.run(main) |  | ||||||
|         except error_parent: |  | ||||||
|             pass |  | ||||||
|         except trio.MultiError as me: |  | ||||||
|             # XXX: on windows it seems we may have to expect the group error |  | ||||||
|             from tractor._exceptions import is_multi_cancelled |  | ||||||
|             assert is_multi_cancelled(me) |  | ||||||
|     else: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # basic stream terminations: |  | ||||||
| # - callee context closes without using stream |  | ||||||
| # - caller context closes without using stream |  | ||||||
| # - caller context calls `Context.cancel()` while streaming |  | ||||||
| #   is ongoing resulting in callee being cancelled |  | ||||||
| # - callee calls `Context.cancel()` while streaming and caller |  | ||||||
| #   sees stream terminated in `RemoteActorError` |  | ||||||
| 
 |  | ||||||
| # TODO: future possible features |  | ||||||
| # - restart request: far end raises `ContextRestart` |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def close_ctx_immediately( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     await ctx.started() |  | ||||||
|     global _state |  | ||||||
| 
 |  | ||||||
|     async with ctx.open_stream(): |  | ||||||
|         pass |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor_test |  | ||||||
| async def test_callee_closes_ctx_after_stream_open(): |  | ||||||
|     'callee context closes without using stream' |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|         portal = await n.start_actor( |  | ||||||
|             'fast_stream_closer', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         with trio.fail_after(2): |  | ||||||
|             async with portal.open_context( |  | ||||||
|                 close_ctx_immediately, |  | ||||||
| 
 |  | ||||||
|                 # flag to avoid waiting the final result |  | ||||||
|                 # cancel_on_exit=True, |  | ||||||
| 
 |  | ||||||
|             ) as (ctx, sent): |  | ||||||
| 
 |  | ||||||
|                 assert sent is None |  | ||||||
| 
 |  | ||||||
|                 with trio.fail_after(0.5): |  | ||||||
|                     async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|                         # should fall through since ``StopAsyncIteration`` |  | ||||||
|                         # should be raised through translation of |  | ||||||
|                         # a ``trio.EndOfChannel`` by |  | ||||||
|                         # ``trio.abc.ReceiveChannel.__anext__()`` |  | ||||||
|                         async for _ in stream: |  | ||||||
|                             assert 0 |  | ||||||
|                         else: |  | ||||||
| 
 |  | ||||||
|                             # verify stream is now closed |  | ||||||
|                             try: |  | ||||||
|                                 await stream.receive() |  | ||||||
|                             except trio.EndOfChannel: |  | ||||||
|                                 pass |  | ||||||
| 
 |  | ||||||
|                 # TODO: should be just raise the closed resource err |  | ||||||
|                 # directly here to enforce not allowing a re-open |  | ||||||
|                 # of a stream to the context (at least until a time of |  | ||||||
|                 # if/when we decide that's a good idea?) |  | ||||||
|                 try: |  | ||||||
|                     with trio.fail_after(0.5): |  | ||||||
|                         async with ctx.open_stream() as stream: |  | ||||||
|                             pass |  | ||||||
|                 except trio.ClosedResourceError: |  | ||||||
|                     pass |  | ||||||
| 
 |  | ||||||
|         await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def expect_cancelled( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     global _state |  | ||||||
|     _state = True |  | ||||||
| 
 |  | ||||||
|     await ctx.started() |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         async with ctx.open_stream() as stream: |  | ||||||
|             async for msg in stream: |  | ||||||
|                 await stream.send(msg)  # echo server |  | ||||||
| 
 |  | ||||||
|     except trio.Cancelled: |  | ||||||
|         # expected case |  | ||||||
|         _state = False |  | ||||||
|         raise |  | ||||||
| 
 |  | ||||||
|     else: |  | ||||||
|         assert 0, "Wasn't cancelled!?" |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'use_ctx_cancel_method', |  | ||||||
|     [False, True], |  | ||||||
| ) |  | ||||||
| @tractor_test |  | ||||||
| async def test_caller_closes_ctx_after_callee_opens_stream( |  | ||||||
|     use_ctx_cancel_method: bool, |  | ||||||
| ): |  | ||||||
|     'caller context closes without using stream' |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|         portal = await n.start_actor( |  | ||||||
|             'ctx_cancelled', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         async with portal.open_context( |  | ||||||
|             expect_cancelled, |  | ||||||
|         ) as (ctx, sent): |  | ||||||
|             await portal.run(assert_state, value=True) |  | ||||||
| 
 |  | ||||||
|             assert sent is None |  | ||||||
| 
 |  | ||||||
|             # call cancel explicitly |  | ||||||
|             if use_ctx_cancel_method: |  | ||||||
| 
 |  | ||||||
|                 await ctx.cancel() |  | ||||||
| 
 |  | ||||||
|                 try: |  | ||||||
|                     async with ctx.open_stream() as stream: |  | ||||||
|                         async for msg in stream: |  | ||||||
|                             pass |  | ||||||
| 
 |  | ||||||
|                 except tractor.ContextCancelled: |  | ||||||
|                     raise  # XXX: must be propagated to __aexit__ |  | ||||||
| 
 |  | ||||||
|                 else: |  | ||||||
|                     assert 0, "Should have context cancelled?" |  | ||||||
| 
 |  | ||||||
|                 # channel should still be up |  | ||||||
|                 assert portal.channel.connected() |  | ||||||
| 
 |  | ||||||
|                 # ctx is closed here |  | ||||||
|                 await portal.run(assert_state, value=False) |  | ||||||
| 
 |  | ||||||
|             else: |  | ||||||
|                 try: |  | ||||||
|                     with trio.fail_after(0.2): |  | ||||||
|                         await ctx.result() |  | ||||||
|                         assert 0, "Callee should have blocked!?" |  | ||||||
|                 except trio.TooSlowError: |  | ||||||
|                     await ctx.cancel() |  | ||||||
|         try: |  | ||||||
|             async with ctx.open_stream() as stream: |  | ||||||
|                 async for msg in stream: |  | ||||||
|                     pass |  | ||||||
|         except tractor.ContextCancelled: |  | ||||||
|             pass |  | ||||||
|         else: |  | ||||||
|             assert 0, "Should have received closed resource error?" |  | ||||||
| 
 |  | ||||||
|         # ctx is closed here |  | ||||||
|         await portal.run(assert_state, value=False) |  | ||||||
| 
 |  | ||||||
|         # channel should not have been destroyed yet, only the |  | ||||||
|         # inter-actor-task context |  | ||||||
|         assert portal.channel.connected() |  | ||||||
| 
 |  | ||||||
|         # teardown the actor |  | ||||||
|         await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor_test |  | ||||||
| async def test_multitask_caller_cancels_from_nonroot_task(): |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|         portal = await n.start_actor( |  | ||||||
|             'ctx_cancelled', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         async with portal.open_context( |  | ||||||
|             expect_cancelled, |  | ||||||
|         ) as (ctx, sent): |  | ||||||
| 
 |  | ||||||
|             await portal.run(assert_state, value=True) |  | ||||||
|             assert sent is None |  | ||||||
| 
 |  | ||||||
|             async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|                 async def send_msg_then_cancel(): |  | ||||||
|                     await stream.send('yo') |  | ||||||
|                     await portal.run(assert_state, value=True) |  | ||||||
|                     await ctx.cancel() |  | ||||||
|                     await portal.run(assert_state, value=False) |  | ||||||
| 
 |  | ||||||
|                 async with trio.open_nursery() as n: |  | ||||||
|                     n.start_soon(send_msg_then_cancel) |  | ||||||
| 
 |  | ||||||
|                     try: |  | ||||||
|                         async for msg in stream: |  | ||||||
|                             assert msg == 'yo' |  | ||||||
| 
 |  | ||||||
|                     except tractor.ContextCancelled: |  | ||||||
|                         raise  # XXX: must be propagated to __aexit__ |  | ||||||
| 
 |  | ||||||
|                 # channel should still be up |  | ||||||
|                 assert portal.channel.connected() |  | ||||||
| 
 |  | ||||||
|                 # ctx is closed here |  | ||||||
|                 await portal.run(assert_state, value=False) |  | ||||||
| 
 |  | ||||||
|         # channel should not have been destroyed yet, only the |  | ||||||
|         # inter-actor-task context |  | ||||||
|         assert portal.channel.connected() |  | ||||||
| 
 |  | ||||||
|         # teardown the actor |  | ||||||
|         await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def cancel_self( |  | ||||||
| 
 |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     global _state |  | ||||||
|     _state = True |  | ||||||
| 
 |  | ||||||
|     await ctx.cancel() |  | ||||||
| 
 |  | ||||||
|     # should inline raise immediately |  | ||||||
|     try: |  | ||||||
|         async with ctx.open_stream(): |  | ||||||
|             pass |  | ||||||
|     except tractor.ContextCancelled: |  | ||||||
|         # suppress for now so we can do checkpoint tests below |  | ||||||
|         pass |  | ||||||
|     else: |  | ||||||
|         raise RuntimeError('Context didnt cancel itself?!') |  | ||||||
| 
 |  | ||||||
|     # check a real ``trio.Cancelled`` is raised on a checkpoint |  | ||||||
|     try: |  | ||||||
|         with trio.fail_after(0.1): |  | ||||||
|             await trio.sleep_forever() |  | ||||||
|     except trio.Cancelled: |  | ||||||
|         raise |  | ||||||
| 
 |  | ||||||
|     except trio.TooSlowError: |  | ||||||
|         # should never get here |  | ||||||
|         assert 0 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor_test |  | ||||||
| async def test_callee_cancels_before_started(): |  | ||||||
|     ''' |  | ||||||
|     Callee calls `Context.cancel()` while streaming and caller |  | ||||||
|     sees stream terminated in `ContextCancelled`. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async with tractor.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|         portal = await n.start_actor( |  | ||||||
|             'cancels_self', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
|         try: |  | ||||||
| 
 |  | ||||||
|             async with portal.open_context( |  | ||||||
|                 cancel_self, |  | ||||||
|             ) as (ctx, sent): |  | ||||||
|                 async with ctx.open_stream(): |  | ||||||
| 
 |  | ||||||
|                     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|         # raises a special cancel signal |  | ||||||
|         except tractor.ContextCancelled as ce: |  | ||||||
|             ce.type == trio.Cancelled |  | ||||||
| 
 |  | ||||||
|             # the traceback should be informative |  | ||||||
|             assert 'cancelled itself' in ce.msgdata['tb_str'] |  | ||||||
| 
 |  | ||||||
|         # teardown the actor |  | ||||||
|         await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def never_open_stream( |  | ||||||
| 
 |  | ||||||
|     ctx:  tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Context which never opens a stream and blocks. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
|     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def keep_sending_from_callee( |  | ||||||
| 
 |  | ||||||
|     ctx:  tractor.Context, |  | ||||||
|     msg_buffer_size: Optional[int] = None, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Send endlessly on the calleee stream. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
|     async with ctx.open_stream( |  | ||||||
|         msg_buffer_size=msg_buffer_size, |  | ||||||
|     ) as stream: |  | ||||||
|         for msg in count(): |  | ||||||
|             print(f'callee sending {msg}') |  | ||||||
|             await stream.send(msg) |  | ||||||
|             await trio.sleep(0.01) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'overrun_by', |  | ||||||
|     [ |  | ||||||
|         ('caller', 1, never_open_stream), |  | ||||||
|         ('cancel_caller_during_overrun', 1, never_open_stream), |  | ||||||
|         ('callee', 0, keep_sending_from_callee), |  | ||||||
|     ], |  | ||||||
|     ids='overrun_condition={}'.format, |  | ||||||
| ) |  | ||||||
| def test_one_end_stream_not_opened(overrun_by): |  | ||||||
|     ''' |  | ||||||
|     This should exemplify the bug from: |  | ||||||
|     https://github.com/goodboy/tractor/issues/265 |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     overrunner, buf_size_increase, entrypoint = overrun_by |  | ||||||
|     from tractor._runtime import Actor |  | ||||||
|     buf_size = buf_size_increase + Actor.msg_buffer_size |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.start_actor( |  | ||||||
|                 entrypoint.__name__, |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             async with portal.open_context( |  | ||||||
|                 entrypoint, |  | ||||||
|             ) as (ctx, sent): |  | ||||||
|                 assert sent is None |  | ||||||
| 
 |  | ||||||
|                 if 'caller' in overrunner: |  | ||||||
| 
 |  | ||||||
|                     async with ctx.open_stream() as stream: |  | ||||||
|                         for i in range(buf_size): |  | ||||||
|                             print(f'sending {i}') |  | ||||||
|                             await stream.send(i) |  | ||||||
| 
 |  | ||||||
|                         if 'cancel' in overrunner: |  | ||||||
|                             # without this we block waiting on the child side |  | ||||||
|                             await ctx.cancel() |  | ||||||
| 
 |  | ||||||
|                         else: |  | ||||||
|                             # expect overrun error to be relayed back |  | ||||||
|                             # and this sleep interrupted |  | ||||||
|                             await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|                 else: |  | ||||||
|                     # callee overruns caller case so we do nothing here |  | ||||||
|                     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|             await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
|     # 2 overrun cases and the no overrun case (which pushes right up to |  | ||||||
|     # the msg limit) |  | ||||||
|     if overrunner == 'caller' or 'cance' in overrunner: |  | ||||||
|         with pytest.raises(tractor.RemoteActorError) as excinfo: |  | ||||||
|             trio.run(main) |  | ||||||
| 
 |  | ||||||
|         assert excinfo.value.type == StreamOverrun |  | ||||||
| 
 |  | ||||||
|     elif overrunner == 'callee': |  | ||||||
|         with pytest.raises(tractor.RemoteActorError) as excinfo: |  | ||||||
|             trio.run(main) |  | ||||||
| 
 |  | ||||||
|         # TODO: embedded remote errors so that we can verify the source |  | ||||||
|         # error? the callee delivers an error which is an overrun |  | ||||||
|         # wrapped in a remote actor error. |  | ||||||
|         assert excinfo.value.type == tractor.RemoteActorError |  | ||||||
| 
 |  | ||||||
|     else: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def echo_back_sequence( |  | ||||||
| 
 |  | ||||||
|     ctx:  tractor.Context, |  | ||||||
|     seq: list[int], |  | ||||||
|     msg_buffer_size: Optional[int] = None, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Send endlessly on the calleee stream. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
|     async with ctx.open_stream( |  | ||||||
|         msg_buffer_size=msg_buffer_size, |  | ||||||
|     ) as stream: |  | ||||||
| 
 |  | ||||||
|         seq = list(seq)  # bleh, `msgpack`... |  | ||||||
|         count = 0 |  | ||||||
|         while count < 3: |  | ||||||
|             batch = [] |  | ||||||
|             async for msg in stream: |  | ||||||
|                 batch.append(msg) |  | ||||||
|                 if batch == seq: |  | ||||||
|                     break |  | ||||||
| 
 |  | ||||||
|             for msg in batch: |  | ||||||
|                 print(f'callee sending {msg}') |  | ||||||
|                 await stream.send(msg) |  | ||||||
| 
 |  | ||||||
|             count += 1 |  | ||||||
| 
 |  | ||||||
|         return 'yo' |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_stream_backpressure(): |  | ||||||
|     ''' |  | ||||||
|     Demonstrate small overruns of each task back and forth |  | ||||||
|     on a stream not raising any errors by default. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.start_actor( |  | ||||||
|                 'callee_sends_forever', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
|             seq = list(range(3)) |  | ||||||
|             async with portal.open_context( |  | ||||||
|                 echo_back_sequence, |  | ||||||
|                 seq=seq, |  | ||||||
|                 msg_buffer_size=1, |  | ||||||
|             ) as (ctx, sent): |  | ||||||
|                 assert sent is None |  | ||||||
| 
 |  | ||||||
|                 async with ctx.open_stream(msg_buffer_size=1) as stream: |  | ||||||
|                     count = 0 |  | ||||||
|                     while count < 3: |  | ||||||
|                         for msg in seq: |  | ||||||
|                             print(f'caller sending {msg}') |  | ||||||
|                             await stream.send(msg) |  | ||||||
|                             await trio.sleep(0.1) |  | ||||||
| 
 |  | ||||||
|                         batch = [] |  | ||||||
|                         async for msg in stream: |  | ||||||
|                             batch.append(msg) |  | ||||||
|                             if batch == seq: |  | ||||||
|                                 break |  | ||||||
| 
 |  | ||||||
|                         count += 1 |  | ||||||
| 
 |  | ||||||
|             # here the context should return |  | ||||||
|             assert await ctx.result() == 'yo' |  | ||||||
| 
 |  | ||||||
|             # cancel the daemon |  | ||||||
|             await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def sleep_forever( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ) -> None: |  | ||||||
|     await ctx.started() |  | ||||||
|     async with ctx.open_stream(): |  | ||||||
|         await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @acm |  | ||||||
| async def attach_to_sleep_forever(): |  | ||||||
|     ''' |  | ||||||
|     Cancel a context **before** any underlying error is raised in order |  | ||||||
|     to trigger a local reception of a ``ContextCancelled`` which **should not** |  | ||||||
|     be re-raised in the local surrounding ``Context`` *iff* the cancel was |  | ||||||
|     requested by **this** side of the context. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async with tractor.wait_for_actor('sleeper') as p2: |  | ||||||
|         async with ( |  | ||||||
|             p2.open_context(sleep_forever) as (peer_ctx, first), |  | ||||||
|             peer_ctx.open_stream(), |  | ||||||
|         ): |  | ||||||
|             try: |  | ||||||
|                 yield |  | ||||||
|             finally: |  | ||||||
|                 # XXX: previously this would trigger local |  | ||||||
|                 # ``ContextCancelled`` to be received and raised in the |  | ||||||
|                 # local context overriding any local error due to |  | ||||||
|                 # logic inside ``_invoke()`` which checked for |  | ||||||
|                 # an error set on ``Context._error`` and raised it in |  | ||||||
|                 # under a cancellation scenario. |  | ||||||
| 
 |  | ||||||
|                 # The problem is you can have a remote cancellation |  | ||||||
|                 # that is part of a local error and we shouldn't raise |  | ||||||
|                 # ``ContextCancelled`` **iff** we weren't the side of |  | ||||||
|                 # the context to initiate it, i.e. |  | ||||||
|                 # ``Context._cancel_called`` should **NOT** have been |  | ||||||
|                 # set. The special logic to handle this case is now |  | ||||||
|                 # inside ``Context._may_raise_from_remote_msg()`` XD |  | ||||||
|                 await peer_ctx.cancel() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def error_before_started( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     This simulates exactly an original bug discovered in: |  | ||||||
|     https://github.com/pikers/piker/issues/244 |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async with attach_to_sleep_forever(): |  | ||||||
|         # send an unserializable type which should raise a type error |  | ||||||
|         # here and **NOT BE SWALLOWED** by the surrounding acm!!?! |  | ||||||
|         await ctx.started(object()) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_do_not_swallow_error_before_started_by_remote_contextcancelled(): |  | ||||||
|     ''' |  | ||||||
|     Verify that an error raised in a remote context which itself opens another |  | ||||||
|     remote context, which it cancels, does not ovverride the original error that |  | ||||||
|     caused the cancellation of the secondardy context. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.start_actor( |  | ||||||
|                 'errorer', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
|             await n.start_actor( |  | ||||||
|                 'sleeper', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             async with ( |  | ||||||
|                 portal.open_context( |  | ||||||
|                     error_before_started |  | ||||||
|                 ) as (ctx, sent), |  | ||||||
|             ): |  | ||||||
|                 await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(tractor.RemoteActorError) as excinfo: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
|     assert excinfo.value.type == TypeError |  | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -20,11 +20,8 @@ async def test_reg_then_unreg(arb_addr): | ||||||
|     assert actor.is_arbiter |     assert actor.is_arbiter | ||||||
|     assert len(actor._registry) == 1  # only self is registered |     assert len(actor._registry) == 1  # only self is registered | ||||||
| 
 | 
 | ||||||
|     async with tractor.open_nursery( |     async with tractor.open_nursery() as n: | ||||||
|         arbiter_addr=arb_addr, |         portal = await n.start_actor('actor', rpc_module_paths=[__name__]) | ||||||
|     ) as n: |  | ||||||
| 
 |  | ||||||
|         portal = await n.start_actor('actor', enable_modules=[__name__]) |  | ||||||
|         uid = portal.channel.uid |         uid = portal.channel.uid | ||||||
| 
 | 
 | ||||||
|         async with tractor.get_arbiter(*arb_addr) as aportal: |         async with tractor.get_arbiter(*arb_addr) as aportal: | ||||||
|  | @ -42,7 +39,7 @@ async def test_reg_then_unreg(arb_addr): | ||||||
| 
 | 
 | ||||||
|         await trio.sleep(0.1) |         await trio.sleep(0.1) | ||||||
|         assert uid not in aportal.actor._registry |         assert uid not in aportal.actor._registry | ||||||
|         sockaddrs = actor._registry.get(uid) |         sockaddrs = actor._registry[uid] | ||||||
|         assert not sockaddrs |         assert not sockaddrs | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -69,7 +66,7 @@ async def say_hello_use_wait(other_actor): | ||||||
| 
 | 
 | ||||||
| @tractor_test | @tractor_test | ||||||
| @pytest.mark.parametrize('func', [say_hello, say_hello_use_wait]) | @pytest.mark.parametrize('func', [say_hello, say_hello_use_wait]) | ||||||
| async def test_trynamic_trio(func, start_method, arb_addr): | async def test_trynamic_trio(func, start_method): | ||||||
|     """Main tractor entry point, the "master" process (for now |     """Main tractor entry point, the "master" process (for now | ||||||
|     acts as the "director"). |     acts as the "director"). | ||||||
|     """ |     """ | ||||||
|  | @ -111,69 +108,49 @@ async def cancel(use_signal, delay=0): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def stream_from(portal): | async def stream_from(portal): | ||||||
|     async with portal.open_stream_from(stream_forever) as stream: |     async for value in await portal.result(): | ||||||
|         async for value in stream: |  | ||||||
|         print(value) |         print(value) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def unpack_reg(actor_or_portal): |  | ||||||
|     ''' |  | ||||||
|     Get and unpack a "registry" RPC request from the "arbiter" registry |  | ||||||
|     system. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     if getattr(actor_or_portal, 'get_registry', None): |  | ||||||
|         msg = await actor_or_portal.get_registry() |  | ||||||
|     else: |  | ||||||
|         msg = await actor_or_portal.run_from_ns('self', 'get_registry') |  | ||||||
| 
 |  | ||||||
|     return {tuple(key.split('.')): val for key, val in msg.items()} |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def spawn_and_check_registry( | async def spawn_and_check_registry( | ||||||
|     arb_addr: tuple, |     arb_addr: tuple, | ||||||
|     use_signal: bool, |     use_signal: bool, | ||||||
|     remote_arbiter: bool = False, |     remote_arbiter: bool = False, | ||||||
|     with_streaming: bool = False, |     with_streaming: bool = False, | ||||||
| 
 |  | ||||||
| ) -> None: | ) -> None: | ||||||
| 
 |  | ||||||
|     async with tractor.open_root_actor( |  | ||||||
|         arbiter_addr=arb_addr, |  | ||||||
|     ): |  | ||||||
|         async with tractor.get_arbiter(*arb_addr) as portal: |  | ||||||
|             # runtime needs to be up to call this |  | ||||||
|     actor = tractor.current_actor() |     actor = tractor.current_actor() | ||||||
| 
 | 
 | ||||||
|     if remote_arbiter: |     if remote_arbiter: | ||||||
|         assert not actor.is_arbiter |         assert not actor.is_arbiter | ||||||
| 
 | 
 | ||||||
|             if actor.is_arbiter: |     async with tractor.get_arbiter(*arb_addr) as portal: | ||||||
|                 extra = 1  # arbiter is local root actor |  | ||||||
|                 get_reg = partial(unpack_reg, actor) |  | ||||||
| 
 | 
 | ||||||
|  |         if actor.is_arbiter: | ||||||
|  | 
 | ||||||
|  |             async def get_reg(): | ||||||
|  |                 return actor._registry | ||||||
|  | 
 | ||||||
|  |             extra = 1  # arbiter is local root actor | ||||||
|         else: |         else: | ||||||
|                 get_reg = partial(unpack_reg, portal) |             get_reg = partial(portal.run_from_ns, 'self', 'get_registry') | ||||||
|             extra = 2  # local root actor + remote arbiter |             extra = 2  # local root actor + remote arbiter | ||||||
| 
 | 
 | ||||||
|         # ensure current actor is registered |         # ensure current actor is registered | ||||||
|         registry = await get_reg() |         registry = await get_reg() | ||||||
|         assert actor.uid in registry |         assert actor.uid in registry | ||||||
| 
 | 
 | ||||||
|  |         if with_streaming: | ||||||
|  |             to_run = stream_forever | ||||||
|  |         else: | ||||||
|  |             to_run = trio.sleep_forever | ||||||
|  | 
 | ||||||
|  |         async with trio.open_nursery() as trion: | ||||||
|             try: |             try: | ||||||
|                 async with tractor.open_nursery() as n: |                 async with tractor.open_nursery() as n: | ||||||
|                     async with trio.open_nursery() as trion: |  | ||||||
| 
 |  | ||||||
|                     portals = {} |                     portals = {} | ||||||
|                     for i in range(3): |                     for i in range(3): | ||||||
|                         name = f'a{i}' |                         name = f'a{i}' | ||||||
|                             if with_streaming: |                         portals[name] = await n.run_in_actor(to_run, name=name) | ||||||
|                                 portals[name] = await n.start_actor( |  | ||||||
|                                     name=name, enable_modules=[__name__]) |  | ||||||
| 
 |  | ||||||
|                             else:  # no streaming |  | ||||||
|                                 portals[name] = await n.run_in_actor( |  | ||||||
|                                     trio.sleep_forever, name=name) |  | ||||||
| 
 | 
 | ||||||
|                     # wait on last actor to come up |                     # wait on last actor to come up | ||||||
|                     async with tractor.wait_for_actor(name): |                     async with tractor.wait_for_actor(name): | ||||||
|  | @ -194,12 +171,13 @@ async def spawn_and_check_registry( | ||||||
|                         trion.start_soon(cancel, use_signal, 1) |                         trion.start_soon(cancel, use_signal, 1) | ||||||
| 
 | 
 | ||||||
|                         last_p = pts[-1] |                         last_p = pts[-1] | ||||||
|                             await stream_from(last_p) |                         async for value in await last_p.result(): | ||||||
| 
 |                             print(value) | ||||||
|                     else: |                     else: | ||||||
|                         await cancel(use_signal) |                         await cancel(use_signal) | ||||||
| 
 | 
 | ||||||
|             finally: |             finally: | ||||||
|  |                 with trio.CancelScope(shield=True): | ||||||
|                     await trio.sleep(0.5) |                     await trio.sleep(0.5) | ||||||
| 
 | 
 | ||||||
|                     # all subactors should have de-registered |                     # all subactors should have de-registered | ||||||
|  | @ -220,7 +198,7 @@ def test_subactors_unregister_on_cancel( | ||||||
|     deregistering themselves with the arbiter. |     deregistering themselves with the arbiter. | ||||||
|     """ |     """ | ||||||
|     with pytest.raises(KeyboardInterrupt): |     with pytest.raises(KeyboardInterrupt): | ||||||
|         trio.run( |         tractor.run( | ||||||
|             partial( |             partial( | ||||||
|                 spawn_and_check_registry, |                 spawn_and_check_registry, | ||||||
|                 arb_addr, |                 arb_addr, | ||||||
|  | @ -228,6 +206,7 @@ def test_subactors_unregister_on_cancel( | ||||||
|                 remote_arbiter=False, |                 remote_arbiter=False, | ||||||
|                 with_streaming=with_streaming, |                 with_streaming=with_streaming, | ||||||
|             ), |             ), | ||||||
|  |             arbiter_addr=arb_addr | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -245,7 +224,7 @@ def test_subactors_unregister_on_cancel_remote_daemon( | ||||||
|     tree) arbiter. |     tree) arbiter. | ||||||
|     """ |     """ | ||||||
|     with pytest.raises(KeyboardInterrupt): |     with pytest.raises(KeyboardInterrupt): | ||||||
|         trio.run( |         tractor.run( | ||||||
|             partial( |             partial( | ||||||
|                 spawn_and_check_registry, |                 spawn_and_check_registry, | ||||||
|                 arb_addr, |                 arb_addr, | ||||||
|  | @ -253,6 +232,8 @@ def test_subactors_unregister_on_cancel_remote_daemon( | ||||||
|                 remote_arbiter=True, |                 remote_arbiter=True, | ||||||
|                 with_streaming=with_streaming, |                 with_streaming=with_streaming, | ||||||
|             ), |             ), | ||||||
|  |             # XXX: required to use remote daemon! | ||||||
|  |             arbiter_addr=arb_addr | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -274,27 +255,20 @@ async def close_chans_before_nursery( | ||||||
|     else: |     else: | ||||||
|         entries_at_end = 1 |         entries_at_end = 1 | ||||||
| 
 | 
 | ||||||
|     async with tractor.open_root_actor( |  | ||||||
|         arbiter_addr=arb_addr, |  | ||||||
|     ): |  | ||||||
|     async with tractor.get_arbiter(*arb_addr) as aportal: |     async with tractor.get_arbiter(*arb_addr) as aportal: | ||||||
|         try: |         try: | ||||||
|                 get_reg = partial(unpack_reg, aportal) |             get_reg = partial(aportal.run_from_ns, 'self', 'get_registry') | ||||||
| 
 | 
 | ||||||
|             async with tractor.open_nursery() as tn: |             async with tractor.open_nursery() as tn: | ||||||
|                     portal1 = await tn.start_actor( |                 portal1 = await tn.run_in_actor( | ||||||
|                         name='consumer1', enable_modules=[__name__]) |                     stream_forever, | ||||||
|                     portal2 = await tn.start_actor( |                     name='consumer1', | ||||||
|                         'consumer2', enable_modules=[__name__]) |                 ) | ||||||
|  |                 agen1 = await portal1.result() | ||||||
|  | 
 | ||||||
|  |                 portal2 = await tn.start_actor('consumer2', rpc_module_paths=[__name__]) | ||||||
|  |                 agen2 = await portal2.run(stream_forever) | ||||||
| 
 | 
 | ||||||
|                     # TODO: compact this back as was in last commit once |  | ||||||
|                     # 3.9+, see https://github.com/goodboy/tractor/issues/207 |  | ||||||
|                     async with portal1.open_stream_from( |  | ||||||
|                         stream_forever |  | ||||||
|                     ) as agen1: |  | ||||||
|                         async with portal2.open_stream_from( |  | ||||||
|                             stream_forever |  | ||||||
|                         ) as agen2: |  | ||||||
|                 async with trio.open_nursery() as n: |                 async with trio.open_nursery() as n: | ||||||
|                     n.start_soon(streamer, agen1) |                     n.start_soon(streamer, agen1) | ||||||
|                     n.start_soon(cancel, use_signal, .5) |                     n.start_soon(cancel, use_signal, .5) | ||||||
|  | @ -307,16 +281,15 @@ async def close_chans_before_nursery( | ||||||
|                         # reliably triggered by an external SIGINT. |                         # reliably triggered by an external SIGINT. | ||||||
|                         # tractor.current_actor()._root_nursery.cancel_scope.cancel() |                         # tractor.current_actor()._root_nursery.cancel_scope.cancel() | ||||||
| 
 | 
 | ||||||
|                                     # XXX: THIS IS THE KEY THING that |                         # XXX: THIS IS THE KEY THING that happens | ||||||
|                                     # happens **before** exiting the |                         # **before** exiting the actor nursery block | ||||||
|                                     # actor nursery block |  | ||||||
| 
 | 
 | ||||||
|                         # also kill off channels cuz why not |                         # also kill off channels cuz why not | ||||||
|                         await agen1.aclose() |                         await agen1.aclose() | ||||||
|                         await agen2.aclose() |                         await agen2.aclose() | ||||||
|         finally: |         finally: | ||||||
|             with trio.CancelScope(shield=True): |             with trio.CancelScope(shield=True): | ||||||
|                     await trio.sleep(1) |                 await trio.sleep(.5) | ||||||
| 
 | 
 | ||||||
|                 # all subactors should have de-registered |                 # all subactors should have de-registered | ||||||
|                 registry = await get_reg() |                 registry = await get_reg() | ||||||
|  | @ -336,13 +309,15 @@ def test_close_channel_explicit( | ||||||
|     results in subactor(s) deregistering from the arbiter. |     results in subactor(s) deregistering from the arbiter. | ||||||
|     """ |     """ | ||||||
|     with pytest.raises(KeyboardInterrupt): |     with pytest.raises(KeyboardInterrupt): | ||||||
|         trio.run( |         tractor.run( | ||||||
|             partial( |             partial( | ||||||
|                 close_chans_before_nursery, |                 close_chans_before_nursery, | ||||||
|                 arb_addr, |                 arb_addr, | ||||||
|                 use_signal, |                 use_signal, | ||||||
|                 remote_arbiter=False, |                 remote_arbiter=False, | ||||||
|             ), |             ), | ||||||
|  |             # XXX: required to use remote daemon! | ||||||
|  |             arbiter_addr=arb_addr | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -358,11 +333,13 @@ def test_close_channel_explicit_remote_arbiter( | ||||||
|     results in subactor(s) deregistering from the arbiter. |     results in subactor(s) deregistering from the arbiter. | ||||||
|     """ |     """ | ||||||
|     with pytest.raises(KeyboardInterrupt): |     with pytest.raises(KeyboardInterrupt): | ||||||
|         trio.run( |         tractor.run( | ||||||
|             partial( |             partial( | ||||||
|                 close_chans_before_nursery, |                 close_chans_before_nursery, | ||||||
|                 arb_addr, |                 arb_addr, | ||||||
|                 use_signal, |                 use_signal, | ||||||
|                 remote_arbiter=True, |                 remote_arbiter=True, | ||||||
|             ), |             ), | ||||||
|  |             # XXX: required to use remote daemon! | ||||||
|  |             arbiter_addr=arb_addr | ||||||
|         ) |         ) | ||||||
|  |  | ||||||
|  | @ -1,7 +1,6 @@ | ||||||
| ''' | """ | ||||||
| Let's make sure them docs work yah? | Let's make sure them docs work yah? | ||||||
| 
 | """ | ||||||
| ''' |  | ||||||
| from contextlib import contextmanager | from contextlib import contextmanager | ||||||
| import itertools | import itertools | ||||||
| import os | import os | ||||||
|  | @ -12,17 +11,17 @@ import shutil | ||||||
| 
 | 
 | ||||||
| import pytest | import pytest | ||||||
| 
 | 
 | ||||||
| from conftest import ( | from conftest import repodir | ||||||
|     examples_dir, | 
 | ||||||
| ) | 
 | ||||||
|  | def examples_dir(): | ||||||
|  |     """Return the abspath to the examples directory. | ||||||
|  |     """ | ||||||
|  |     return os.path.join(repodir(), 'examples') | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.fixture | @pytest.fixture | ||||||
| def run_example_in_subproc( | def run_example_in_subproc(loglevel, testdir, arb_addr): | ||||||
|     loglevel: str, |  | ||||||
|     testdir, |  | ||||||
|     arb_addr: tuple[str, int], |  | ||||||
| ): |  | ||||||
| 
 | 
 | ||||||
|     @contextmanager |     @contextmanager | ||||||
|     def run(script_code): |     def run(script_code): | ||||||
|  | @ -32,8 +31,8 @@ def run_example_in_subproc( | ||||||
|             # on windows we need to create a special __main__.py which will |             # on windows we need to create a special __main__.py which will | ||||||
|             # be executed with ``python -m <modulename>`` on windows.. |             # be executed with ``python -m <modulename>`` on windows.. | ||||||
|             shutil.copyfile( |             shutil.copyfile( | ||||||
|                 examples_dir() / '__main__.py', |                 os.path.join(examples_dir(), '__main__.py'), | ||||||
|                 str(testdir / '__main__.py'), |                 os.path.join(str(testdir), '__main__.py') | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|             # drop the ``if __name__ == '__main__'`` guard onwards from |             # drop the ``if __name__ == '__main__'`` guard onwards from | ||||||
|  | @ -81,16 +80,12 @@ def run_example_in_subproc( | ||||||
|     'example_script', |     'example_script', | ||||||
| 
 | 
 | ||||||
|     # walk yields: (dirpath, dirnames, filenames) |     # walk yields: (dirpath, dirnames, filenames) | ||||||
|     [ |     [(p[0], f) for p in os.walk(examples_dir()) for f in p[2] | ||||||
|         (p[0], f) for p in os.walk(examples_dir()) for f in p[2] |  | ||||||
| 
 | 
 | ||||||
|         if '__' not in f |         if '__' not in f | ||||||
|         and f[0] != '_' |         and f[0] != '_' | ||||||
|         and 'debugging' not in p[0] |         and 'debugging' not in p[0] | ||||||
|         and 'integration' not in p[0] |  | ||||||
|         and 'advanced_faults' not in p[0] |  | ||||||
|     ], |     ], | ||||||
| 
 |  | ||||||
|     ids=lambda t: t[1], |     ids=lambda t: t[1], | ||||||
| ) | ) | ||||||
| def test_example(run_example_in_subproc, example_script): | def test_example(run_example_in_subproc, example_script): | ||||||
|  | @ -103,10 +98,6 @@ def test_example(run_example_in_subproc, example_script): | ||||||
|     test_example``. |     test_example``. | ||||||
|     """ |     """ | ||||||
|     ex_file = os.path.join(*example_script) |     ex_file = os.path.join(*example_script) | ||||||
| 
 |  | ||||||
|     if 'rpc_bidir_streaming' in ex_file and sys.version_info < (3, 9): |  | ||||||
|         pytest.skip("2-way streaming example requires py3.9 async with syntax") |  | ||||||
| 
 |  | ||||||
|     with open(ex_file, 'r') as ex: |     with open(ex_file, 'r') as ex: | ||||||
|         code = ex.read() |         code = ex.read() | ||||||
| 
 | 
 | ||||||
|  | @ -117,19 +108,9 @@ def test_example(run_example_in_subproc, example_script): | ||||||
|             # print(f'STDOUT: {out}') |             # print(f'STDOUT: {out}') | ||||||
| 
 | 
 | ||||||
|             # if we get some gnarly output let's aggregate and raise |             # if we get some gnarly output let's aggregate and raise | ||||||
|             if err: |  | ||||||
|             errmsg = err.decode() |             errmsg = err.decode() | ||||||
|             errlines = errmsg.splitlines() |             errlines = errmsg.splitlines() | ||||||
|                 last_error = errlines[-1] |             if err and 'Error' in errlines[-1]: | ||||||
|                 if ( |  | ||||||
|                     'Error' in last_error |  | ||||||
| 
 |  | ||||||
|                     # XXX: currently we print this to console, but maybe |  | ||||||
|                     # shouldn't eventually once we figure out what's |  | ||||||
|                     # a better way to be explicit about aio side |  | ||||||
|                     # cancels? |  | ||||||
|                     and 'asyncio.exceptions.CancelledError' not in last_error |  | ||||||
|                 ): |  | ||||||
|                 raise Exception(errmsg) |                 raise Exception(errmsg) | ||||||
| 
 | 
 | ||||||
|             assert proc.returncode == 0 |             assert proc.returncode == 0 | ||||||
|  |  | ||||||
|  | @ -1,564 +0,0 @@ | ||||||
| ''' |  | ||||||
| The hipster way to force SC onto the stdlib's "async": 'infection mode'. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| from typing import Optional, Iterable, Union |  | ||||||
| import asyncio |  | ||||||
| import builtins |  | ||||||
| import itertools |  | ||||||
| import importlib |  | ||||||
| 
 |  | ||||||
| from exceptiongroup import BaseExceptionGroup |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| from tractor import ( |  | ||||||
|     to_asyncio, |  | ||||||
|     RemoteActorError, |  | ||||||
| ) |  | ||||||
| from tractor.trionics import BroadcastReceiver |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def sleep_and_err( |  | ||||||
|     sleep_for: float = 0.1, |  | ||||||
| 
 |  | ||||||
|     # just signature placeholders for compat with |  | ||||||
|     # ``to_asyncio.open_channel_from()`` |  | ||||||
|     to_trio: Optional[trio.MemorySendChannel] = None, |  | ||||||
|     from_trio: Optional[asyncio.Queue] = None, |  | ||||||
| 
 |  | ||||||
| ): |  | ||||||
|     if to_trio: |  | ||||||
|         to_trio.send_nowait('start') |  | ||||||
| 
 |  | ||||||
|     await asyncio.sleep(sleep_for) |  | ||||||
|     assert 0 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def sleep_forever(): |  | ||||||
|     await asyncio.sleep(float('inf')) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def trio_cancels_single_aio_task(): |  | ||||||
| 
 |  | ||||||
|     # spawn an ``asyncio`` task to run a func and return result |  | ||||||
|     with trio.move_on_after(.2): |  | ||||||
|         await tractor.to_asyncio.run_task(sleep_forever) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_trio_cancels_aio_on_actor_side(arb_addr): |  | ||||||
|     ''' |  | ||||||
|     Spawn an infected actor that is cancelled by the ``trio`` side |  | ||||||
|     task using std cancel scope apis. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery( |  | ||||||
|             arbiter_addr=arb_addr |  | ||||||
|         ) as n: |  | ||||||
|             await n.run_in_actor( |  | ||||||
|                 trio_cancels_single_aio_task, |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def asyncio_actor( |  | ||||||
| 
 |  | ||||||
|     target: str, |  | ||||||
|     expect_err: Optional[Exception] = None |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     assert tractor.current_actor().is_infected_aio() |  | ||||||
|     target = globals()[target] |  | ||||||
| 
 |  | ||||||
|     if '.' in expect_err: |  | ||||||
|         modpath, _, name = expect_err.rpartition('.') |  | ||||||
|         mod = importlib.import_module(modpath) |  | ||||||
|         error_type = getattr(mod, name) |  | ||||||
| 
 |  | ||||||
|     else:  # toplevel builtin error type |  | ||||||
|         error_type = builtins.__dict__.get(expect_err) |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         # spawn an ``asyncio`` task to run a func and return result |  | ||||||
|         await tractor.to_asyncio.run_task(target) |  | ||||||
| 
 |  | ||||||
|     except BaseException as err: |  | ||||||
|         if expect_err: |  | ||||||
|             assert isinstance(err, error_type) |  | ||||||
| 
 |  | ||||||
|         raise |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_aio_simple_error(arb_addr): |  | ||||||
|     ''' |  | ||||||
|     Verify a simple remote asyncio error propagates back through trio |  | ||||||
|     to the parent actor. |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery( |  | ||||||
|             arbiter_addr=arb_addr |  | ||||||
|         ) as n: |  | ||||||
|             await n.run_in_actor( |  | ||||||
|                 asyncio_actor, |  | ||||||
|                 target='sleep_and_err', |  | ||||||
|                 expect_err='AssertionError', |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(RemoteActorError) as excinfo: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
|     err = excinfo.value |  | ||||||
|     assert isinstance(err, RemoteActorError) |  | ||||||
|     assert err.type == AssertionError |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_tractor_cancels_aio(arb_addr): |  | ||||||
|     ''' |  | ||||||
|     Verify we can cancel a spawned asyncio task gracefully. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.run_in_actor( |  | ||||||
|                 asyncio_actor, |  | ||||||
|                 target='sleep_forever', |  | ||||||
|                 expect_err='trio.Cancelled', |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
|             # cancel the entire remote runtime |  | ||||||
|             await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_trio_cancels_aio(arb_addr): |  | ||||||
|     ''' |  | ||||||
|     Much like the above test with ``tractor.Portal.cancel_actor()`` |  | ||||||
|     except we just use a standard ``trio`` cancellation api. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         with trio.move_on_after(1): |  | ||||||
|             # cancel the nursery shortly after boot |  | ||||||
| 
 |  | ||||||
|             async with tractor.open_nursery() as n: |  | ||||||
|                 await n.run_in_actor( |  | ||||||
|                     asyncio_actor, |  | ||||||
|                     target='sleep_forever', |  | ||||||
|                     expect_err='trio.Cancelled', |  | ||||||
|                     infect_asyncio=True, |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def trio_ctx( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ): |  | ||||||
| 
 |  | ||||||
|     await ctx.started('start') |  | ||||||
| 
 |  | ||||||
|     # this will block until the ``asyncio`` task sends a "first" |  | ||||||
|     # message. |  | ||||||
|     with trio.fail_after(2): |  | ||||||
|         async with ( |  | ||||||
|             trio.open_nursery() as n, |  | ||||||
| 
 |  | ||||||
|             tractor.to_asyncio.open_channel_from( |  | ||||||
|                 sleep_and_err, |  | ||||||
|             ) as (first, chan), |  | ||||||
|         ): |  | ||||||
| 
 |  | ||||||
|             assert first == 'start' |  | ||||||
| 
 |  | ||||||
|             # spawn another asyncio task for the cuck of it. |  | ||||||
|             n.start_soon( |  | ||||||
|                 tractor.to_asyncio.run_task, |  | ||||||
|                 sleep_forever, |  | ||||||
|             ) |  | ||||||
|             await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'parent_cancels', [False, True], |  | ||||||
|     ids='parent_actor_cancels_child={}'.format |  | ||||||
| ) |  | ||||||
| def test_context_spawns_aio_task_that_errors( |  | ||||||
|     arb_addr, |  | ||||||
|     parent_cancels: bool, |  | ||||||
| ): |  | ||||||
|     ''' |  | ||||||
|     Verify that spawning a task via an intertask channel ctx mngr that |  | ||||||
|     errors correctly propagates the error back from the `asyncio`-side |  | ||||||
|     task. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         with trio.fail_after(2): |  | ||||||
|             async with tractor.open_nursery() as n: |  | ||||||
|                 p = await n.start_actor( |  | ||||||
|                     'aio_daemon', |  | ||||||
|                     enable_modules=[__name__], |  | ||||||
|                     infect_asyncio=True, |  | ||||||
|                     # debug_mode=True, |  | ||||||
|                     loglevel='cancel', |  | ||||||
|                 ) |  | ||||||
|                 async with p.open_context( |  | ||||||
|                     trio_ctx, |  | ||||||
|                 ) as (ctx, first): |  | ||||||
| 
 |  | ||||||
|                     assert first == 'start' |  | ||||||
| 
 |  | ||||||
|                     if parent_cancels: |  | ||||||
|                         await p.cancel_actor() |  | ||||||
| 
 |  | ||||||
|                     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(RemoteActorError) as excinfo: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
|     err = excinfo.value |  | ||||||
|     assert isinstance(err, RemoteActorError) |  | ||||||
|     if parent_cancels: |  | ||||||
|         assert err.type == trio.Cancelled |  | ||||||
|     else: |  | ||||||
|         assert err.type == AssertionError |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def aio_cancel(): |  | ||||||
|     '''' |  | ||||||
|     Cancel urself boi. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await asyncio.sleep(0.5) |  | ||||||
|     task = asyncio.current_task() |  | ||||||
| 
 |  | ||||||
|     # cancel and enter sleep |  | ||||||
|     task.cancel() |  | ||||||
|     await sleep_forever() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_aio_cancelled_from_aio_causes_trio_cancelled(arb_addr): |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             await n.run_in_actor( |  | ||||||
|                 asyncio_actor, |  | ||||||
|                 target='aio_cancel', |  | ||||||
|                 expect_err='tractor.to_asyncio.AsyncioCancelled', |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(RemoteActorError) as excinfo: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
|     # ensure boxed error is correct |  | ||||||
|     assert excinfo.value.type == to_asyncio.AsyncioCancelled |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # TODO: verify open_channel_from will fail on this.. |  | ||||||
| async def no_to_trio_in_args(): |  | ||||||
|     pass |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def push_from_aio_task( |  | ||||||
| 
 |  | ||||||
|     sequence: Iterable, |  | ||||||
|     to_trio: trio.abc.SendChannel, |  | ||||||
|     expect_cancel: False, |  | ||||||
|     fail_early: bool, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         # sync caller ctx manager |  | ||||||
|         to_trio.send_nowait(True) |  | ||||||
| 
 |  | ||||||
|         for i in sequence: |  | ||||||
|             print(f'asyncio sending {i}') |  | ||||||
|             to_trio.send_nowait(i) |  | ||||||
|             await asyncio.sleep(0.001) |  | ||||||
| 
 |  | ||||||
|             if i == 50 and fail_early: |  | ||||||
|                 raise Exception |  | ||||||
| 
 |  | ||||||
|         print('asyncio streamer complete!') |  | ||||||
| 
 |  | ||||||
|     except asyncio.CancelledError: |  | ||||||
|         if not expect_cancel: |  | ||||||
|             pytest.fail("aio task was cancelled unexpectedly") |  | ||||||
|         raise |  | ||||||
|     else: |  | ||||||
|         if expect_cancel: |  | ||||||
|             pytest.fail("aio task wasn't cancelled as expected!?") |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def stream_from_aio( |  | ||||||
| 
 |  | ||||||
|     exit_early: bool = False, |  | ||||||
|     raise_err: bool = False, |  | ||||||
|     aio_raise_err: bool = False, |  | ||||||
|     fan_out: bool = False, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     seq = range(100) |  | ||||||
|     expect = list(seq) |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         pulled = [] |  | ||||||
| 
 |  | ||||||
|         async with to_asyncio.open_channel_from( |  | ||||||
|             push_from_aio_task, |  | ||||||
|             sequence=seq, |  | ||||||
|             expect_cancel=raise_err or exit_early, |  | ||||||
|             fail_early=aio_raise_err, |  | ||||||
|         ) as (first, chan): |  | ||||||
| 
 |  | ||||||
|             assert first is True |  | ||||||
| 
 |  | ||||||
|             async def consume( |  | ||||||
|                 chan: Union[ |  | ||||||
|                     to_asyncio.LinkedTaskChannel, |  | ||||||
|                     BroadcastReceiver, |  | ||||||
|                 ], |  | ||||||
|             ): |  | ||||||
|                 async for value in chan: |  | ||||||
|                     print(f'trio received {value}') |  | ||||||
|                     pulled.append(value) |  | ||||||
| 
 |  | ||||||
|                     if value == 50: |  | ||||||
|                         if raise_err: |  | ||||||
|                             raise Exception |  | ||||||
|                         elif exit_early: |  | ||||||
|                             break |  | ||||||
| 
 |  | ||||||
|             if fan_out: |  | ||||||
|                 # start second task that get's the same stream value set. |  | ||||||
|                 async with ( |  | ||||||
| 
 |  | ||||||
|                     # NOTE: this has to come first to avoid |  | ||||||
|                     # the channel being closed before the nursery |  | ||||||
|                     # tasks are joined.. |  | ||||||
|                     chan.subscribe() as br, |  | ||||||
| 
 |  | ||||||
|                     trio.open_nursery() as n, |  | ||||||
|                 ): |  | ||||||
|                     n.start_soon(consume, br) |  | ||||||
|                     await consume(chan) |  | ||||||
| 
 |  | ||||||
|             else: |  | ||||||
|                 await consume(chan) |  | ||||||
|     finally: |  | ||||||
| 
 |  | ||||||
|         if ( |  | ||||||
|             not raise_err and |  | ||||||
|             not exit_early and |  | ||||||
|             not aio_raise_err |  | ||||||
|         ): |  | ||||||
|             if fan_out: |  | ||||||
|                 # we get double the pulled values in the |  | ||||||
|                 # ``.subscribe()`` fan out case. |  | ||||||
|                 doubled = list(itertools.chain(*zip(expect, expect))) |  | ||||||
|                 expect = doubled[:len(pulled)] |  | ||||||
|                 assert list(sorted(pulled)) == expect |  | ||||||
| 
 |  | ||||||
|             else: |  | ||||||
|                 assert pulled == expect |  | ||||||
|         else: |  | ||||||
|             assert not fan_out |  | ||||||
|             assert pulled == expect[:51] |  | ||||||
| 
 |  | ||||||
|         print('trio guest mode task completed!') |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'fan_out', [False, True], |  | ||||||
|     ids='fan_out_w_chan_subscribe={}'.format |  | ||||||
| ) |  | ||||||
| def test_basic_interloop_channel_stream(arb_addr, fan_out): |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.run_in_actor( |  | ||||||
|                 stream_from_aio, |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|                 fan_out=fan_out, |  | ||||||
|             ) |  | ||||||
|             await portal.result() |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # TODO: parametrize the above test and avoid the duplication here? |  | ||||||
| def test_trio_error_cancels_intertask_chan(arb_addr): |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.run_in_actor( |  | ||||||
|                 stream_from_aio, |  | ||||||
|                 raise_err=True, |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
|             # should trigger remote actor error |  | ||||||
|             await portal.result() |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(BaseExceptionGroup) as excinfo: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
|     # ensure boxed errors |  | ||||||
|     for exc in excinfo.value.exceptions: |  | ||||||
|         assert exc.type == Exception |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_trio_closes_early_and_channel_exits(arb_addr): |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.run_in_actor( |  | ||||||
|                 stream_from_aio, |  | ||||||
|                 exit_early=True, |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
|             # should trigger remote actor error |  | ||||||
|             await portal.result() |  | ||||||
| 
 |  | ||||||
|     # should be a quiet exit on a simple channel exit |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_aio_errors_and_channel_propagates_and_closes(arb_addr): |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             portal = await n.run_in_actor( |  | ||||||
|                 stream_from_aio, |  | ||||||
|                 aio_raise_err=True, |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
|             # should trigger remote actor error |  | ||||||
|             await portal.result() |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(BaseExceptionGroup) as excinfo: |  | ||||||
|         trio.run(main) |  | ||||||
| 
 |  | ||||||
|     # ensure boxed errors |  | ||||||
|     for exc in excinfo.value.exceptions: |  | ||||||
|         assert exc.type == Exception |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def trio_to_aio_echo_server( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
| ): |  | ||||||
| 
 |  | ||||||
|     async def aio_echo_server( |  | ||||||
|         to_trio: trio.MemorySendChannel, |  | ||||||
|         from_trio: asyncio.Queue, |  | ||||||
|     ) -> None: |  | ||||||
| 
 |  | ||||||
|         to_trio.send_nowait('start') |  | ||||||
| 
 |  | ||||||
|         while True: |  | ||||||
|             msg = await from_trio.get() |  | ||||||
| 
 |  | ||||||
|             # echo the msg back |  | ||||||
|             to_trio.send_nowait(msg) |  | ||||||
| 
 |  | ||||||
|             # if we get the terminate sentinel |  | ||||||
|             # break the echo loop |  | ||||||
|             if msg is None: |  | ||||||
|                 print('breaking aio echo loop') |  | ||||||
|                 break |  | ||||||
| 
 |  | ||||||
|         print('exiting asyncio task') |  | ||||||
| 
 |  | ||||||
|     async with to_asyncio.open_channel_from( |  | ||||||
|         aio_echo_server, |  | ||||||
|     ) as (first, chan): |  | ||||||
| 
 |  | ||||||
|         assert first == 'start' |  | ||||||
|         await ctx.started(first) |  | ||||||
| 
 |  | ||||||
|         async with ctx.open_stream() as stream: |  | ||||||
| 
 |  | ||||||
|             async for msg in stream: |  | ||||||
|                 print(f'asyncio echoing {msg}') |  | ||||||
|                 await chan.send(msg) |  | ||||||
| 
 |  | ||||||
|                 out = await chan.receive() |  | ||||||
|                 # echo back to parent actor-task |  | ||||||
|                 await stream.send(out) |  | ||||||
| 
 |  | ||||||
|                 if out is None: |  | ||||||
|                     try: |  | ||||||
|                         out = await chan.receive() |  | ||||||
|                     except trio.EndOfChannel: |  | ||||||
|                         break |  | ||||||
|                     else: |  | ||||||
|                         raise RuntimeError('aio channel never stopped?') |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'raise_error_mid_stream', |  | ||||||
|     [False, Exception, KeyboardInterrupt], |  | ||||||
|     ids='raise_error={}'.format, |  | ||||||
| ) |  | ||||||
| def test_echoserver_detailed_mechanics( |  | ||||||
|     arb_addr, |  | ||||||
|     raise_error_mid_stream, |  | ||||||
| ): |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         async with tractor.open_nursery() as n: |  | ||||||
|             p = await n.start_actor( |  | ||||||
|                 'aio_server', |  | ||||||
|                 enable_modules=[__name__], |  | ||||||
|                 infect_asyncio=True, |  | ||||||
|             ) |  | ||||||
|             async with p.open_context( |  | ||||||
|                 trio_to_aio_echo_server, |  | ||||||
|             ) as (ctx, first): |  | ||||||
| 
 |  | ||||||
|                 assert first == 'start' |  | ||||||
| 
 |  | ||||||
|                 async with ctx.open_stream() as stream: |  | ||||||
|                     for i in range(100): |  | ||||||
|                         await stream.send(i) |  | ||||||
|                         out = await stream.receive() |  | ||||||
|                         assert i == out |  | ||||||
| 
 |  | ||||||
|                         if raise_error_mid_stream and i == 50: |  | ||||||
|                             raise raise_error_mid_stream |  | ||||||
| 
 |  | ||||||
|                     # send terminate msg |  | ||||||
|                     await stream.send(None) |  | ||||||
|                     out = await stream.receive() |  | ||||||
|                     assert out is None |  | ||||||
| 
 |  | ||||||
|                     if out is None: |  | ||||||
|                         # ensure the stream is stopped |  | ||||||
|                         # with trio.fail_after(0.1): |  | ||||||
|                         try: |  | ||||||
|                             await stream.receive() |  | ||||||
|                         except trio.EndOfChannel: |  | ||||||
|                             pass |  | ||||||
|                         else: |  | ||||||
|                             pytest.fail( |  | ||||||
|                                 "stream wasn't stopped after sentinel?!") |  | ||||||
| 
 |  | ||||||
|             # TODO: the case where this blocks and |  | ||||||
|             # is cancelled by kbi or out of task cancellation |  | ||||||
|             await p.cancel_actor() |  | ||||||
| 
 |  | ||||||
|     if raise_error_mid_stream: |  | ||||||
|         with pytest.raises(raise_error_mid_stream): |  | ||||||
|             trio.run(main) |  | ||||||
| 
 |  | ||||||
|     else: |  | ||||||
|         trio.run(main) |  | ||||||
|  | @ -11,18 +11,25 @@ from conftest import tractor_test | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.mark.trio | @pytest.mark.trio | ||||||
| async def test_no_runtime(): | async def test_no_arbitter(): | ||||||
|     """An arbitter must be established before any nurseries |     """An arbitter must be established before any nurseries | ||||||
|     can be created. |     can be created. | ||||||
| 
 | 
 | ||||||
|     (In other words ``tractor.open_root_actor()`` must be engaged at |     (In other words ``tractor.run`` must be used instead of ``trio.run`` as is | ||||||
|     some point?) |     done by the ``pytest-trio`` plugin.) | ||||||
|     """ |     """ | ||||||
|     with pytest.raises(RuntimeError) : |     with pytest.raises(RuntimeError): | ||||||
|         async with tractor.find_actor('doggy'): |         with tractor.open_nursery(): | ||||||
|             pass |             pass | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | def test_no_main(): | ||||||
|  |     """An async function **must** be passed to ``tractor.run()``. | ||||||
|  |     """ | ||||||
|  |     with pytest.raises(TypeError): | ||||||
|  |         tractor.run(None) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| @tractor_test | @tractor_test | ||||||
| async def test_self_is_registered(arb_addr): | async def test_self_is_registered(arb_addr): | ||||||
|     "Verify waiting on the arbiter to register itself using the standard api." |     "Verify waiting on the arbiter to register itself using the standard api." | ||||||
|  | @ -42,8 +49,7 @@ async def test_self_is_registered_localportal(arb_addr): | ||||||
|         assert isinstance(portal, tractor._portal.LocalPortal) |         assert isinstance(portal, tractor._portal.LocalPortal) | ||||||
| 
 | 
 | ||||||
|         with trio.fail_after(0.2): |         with trio.fail_after(0.2): | ||||||
|             sockaddr = await portal.run_from_ns( |             sockaddr = await portal.run_from_ns('self', 'wait_for_actor', name='root') | ||||||
|                     'self', 'wait_for_actor', name='root') |  | ||||||
|             assert sockaddr[0] == arb_addr |             assert sockaddr[0] == arb_addr | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -53,10 +59,6 @@ def test_local_actor_async_func(arb_addr): | ||||||
|     nums = [] |     nums = [] | ||||||
| 
 | 
 | ||||||
|     async def print_loop(): |     async def print_loop(): | ||||||
| 
 |  | ||||||
|         async with tractor.open_root_actor( |  | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|         ): |  | ||||||
|         # arbiter is started in-proc if dne |         # arbiter is started in-proc if dne | ||||||
|         assert tractor.current_actor().is_arbiter |         assert tractor.current_actor().is_arbiter | ||||||
| 
 | 
 | ||||||
|  | @ -65,7 +67,7 @@ def test_local_actor_async_func(arb_addr): | ||||||
|             await trio.sleep(0.1) |             await trio.sleep(0.1) | ||||||
| 
 | 
 | ||||||
|     start = time.time() |     start = time.time() | ||||||
|     trio.run(print_loop) |     tractor.run(print_loop, arbiter_addr=arb_addr) | ||||||
| 
 | 
 | ||||||
|     # ensure the sleeps were actually awaited |     # ensure the sleeps were actually awaited | ||||||
|     assert time.time() - start >= 1 |     assert time.time() - start >= 1 | ||||||
|  |  | ||||||
|  | @ -1,11 +1,10 @@ | ||||||
| """ | """ | ||||||
| Multiple python programs invoking the runtime. | Multiple python programs invoking ``tractor.run()`` | ||||||
| """ | """ | ||||||
| import platform | import platform | ||||||
| import time | import time | ||||||
| 
 | 
 | ||||||
| import pytest | import pytest | ||||||
| import trio |  | ||||||
| import tractor | import tractor | ||||||
| from conftest import ( | from conftest import ( | ||||||
|     tractor_test, |     tractor_test, | ||||||
|  | @ -46,13 +45,8 @@ async def test_cancel_remote_arbiter(daemon, arb_addr): | ||||||
| def test_register_duplicate_name(daemon, arb_addr): | def test_register_duplicate_name(daemon, arb_addr): | ||||||
| 
 | 
 | ||||||
|     async def main(): |     async def main(): | ||||||
| 
 |  | ||||||
|         async with tractor.open_nursery( |  | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|         ) as n: |  | ||||||
| 
 |  | ||||||
|         assert not tractor.current_actor().is_arbiter |         assert not tractor.current_actor().is_arbiter | ||||||
| 
 |         async with tractor.open_nursery() as n: | ||||||
|             p1 = await n.start_actor('doggy') |             p1 = await n.start_actor('doggy') | ||||||
|             p2 = await n.start_actor('doggy') |             p2 = await n.start_actor('doggy') | ||||||
| 
 | 
 | ||||||
|  | @ -63,4 +57,4 @@ def test_register_duplicate_name(daemon, arb_addr): | ||||||
| 
 | 
 | ||||||
|     # run it manually since we want to start **after** |     # run it manually since we want to start **after** | ||||||
|     # the other "daemon" program |     # the other "daemon" program | ||||||
|     trio.run(main) |     tractor.run(main, arbiter_addr=arb_addr) | ||||||
|  |  | ||||||
|  | @ -4,22 +4,20 @@ from itertools import cycle | ||||||
| import pytest | import pytest | ||||||
| import trio | import trio | ||||||
| import tractor | import tractor | ||||||
| from tractor.experimental import msgpub | from tractor.testing import tractor_test | ||||||
| 
 |  | ||||||
| from conftest import tractor_test |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_type_checks(): | def test_type_checks(): | ||||||
| 
 | 
 | ||||||
|     with pytest.raises(TypeError) as err: |     with pytest.raises(TypeError) as err: | ||||||
|         @msgpub |         @tractor.msg.pub | ||||||
|         async def no_get_topics(yo): |         async def no_get_topics(yo): | ||||||
|             yield |             yield | ||||||
| 
 | 
 | ||||||
|     assert "must define a `get_topics`" in str(err.value) |     assert "must define a `get_topics`" in str(err.value) | ||||||
| 
 | 
 | ||||||
|     with pytest.raises(TypeError) as err: |     with pytest.raises(TypeError) as err: | ||||||
|         @msgpub |         @tractor.msg.pub | ||||||
|         def not_async_gen(yo): |         def not_async_gen(yo): | ||||||
|             pass |             pass | ||||||
| 
 | 
 | ||||||
|  | @ -34,7 +32,7 @@ def is_even(i): | ||||||
| _get_topics = None | _get_topics = None | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @msgpub | @tractor.msg.pub | ||||||
| async def pubber(get_topics, seed=10): | async def pubber(get_topics, seed=10): | ||||||
| 
 | 
 | ||||||
|     # ensure topic subscriptions are as expected |     # ensure topic subscriptions are as expected | ||||||
|  | @ -48,9 +46,8 @@ async def pubber(get_topics, seed=10): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def subs( | async def subs( | ||||||
|     which, |     which, pub_actor_name, seed=10, | ||||||
|     pub_actor_name, |     portal=None, | ||||||
|     seed=10, |  | ||||||
|     task_status=trio.TASK_STATUS_IGNORED, |     task_status=trio.TASK_STATUS_IGNORED, | ||||||
| ): | ): | ||||||
|     if len(which) == 1: |     if len(which) == 1: | ||||||
|  | @ -63,15 +60,12 @@ async def subs( | ||||||
|         def pred(i): |         def pred(i): | ||||||
|             return isinstance(i, int) |             return isinstance(i, int) | ||||||
| 
 | 
 | ||||||
|     # TODO: https://github.com/goodboy/tractor/issues/207 |     async with tractor.find_actor(pub_actor_name) as portal: | ||||||
|     async with tractor.wait_for_actor(pub_actor_name) as portal: |         stream = await portal.run( | ||||||
|         assert portal |  | ||||||
| 
 |  | ||||||
|         async with portal.open_stream_from( |  | ||||||
|             pubber, |             pubber, | ||||||
|             topics=which, |             topics=which, | ||||||
|             seed=seed, |             seed=seed, | ||||||
|         ) as stream: |         ) | ||||||
|         task_status.started(stream) |         task_status.started(stream) | ||||||
|         times = 10 |         times = 10 | ||||||
|         count = 0 |         count = 0 | ||||||
|  | @ -85,11 +79,12 @@ async def subs( | ||||||
| 
 | 
 | ||||||
|         await stream.aclose() |         await stream.aclose() | ||||||
| 
 | 
 | ||||||
|         async with portal.open_stream_from( |         stream = await portal.run( | ||||||
|             pubber, |             pubber, | ||||||
|             topics=['odd'], |             topics=['odd'], | ||||||
|             seed=seed, |             seed=seed, | ||||||
|         ) as stream: |         ) | ||||||
|  | 
 | ||||||
|         await stream.__anext__() |         await stream.__anext__() | ||||||
|         count = 0 |         count = 0 | ||||||
|         # async with aclosing(stream) as stream: |         # async with aclosing(stream) as stream: | ||||||
|  | @ -105,7 +100,7 @@ async def subs( | ||||||
|             await stream.aclose() |             await stream.aclose() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @msgpub(tasks=['one', 'two']) | @tractor.msg.pub(tasks=['one', 'two']) | ||||||
| async def multilock_pubber(get_topics): | async def multilock_pubber(get_topics): | ||||||
|     yield {'doggy': 10} |     yield {'doggy': 10} | ||||||
| 
 | 
 | ||||||
|  | @ -133,10 +128,11 @@ async def test_required_args(callwith_expecterror): | ||||||
|             await func(**kwargs) |             await func(**kwargs) | ||||||
|     else: |     else: | ||||||
|         async with tractor.open_nursery() as n: |         async with tractor.open_nursery() as n: | ||||||
| 
 |             # await func(**kwargs) | ||||||
|             portal = await n.start_actor( |             portal = await n.run_in_actor( | ||||||
|  |                 multilock_pubber, | ||||||
|                 name='pubber', |                 name='pubber', | ||||||
|                 enable_modules=[__name__], |                 **kwargs | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|             async with tractor.wait_for_actor('pubber'): |             async with tractor.wait_for_actor('pubber'): | ||||||
|  | @ -144,15 +140,9 @@ async def test_required_args(callwith_expecterror): | ||||||
| 
 | 
 | ||||||
|             await trio.sleep(0.5) |             await trio.sleep(0.5) | ||||||
| 
 | 
 | ||||||
|             async with portal.open_stream_from( |             async for val in await portal.result(): | ||||||
|                 multilock_pubber, |  | ||||||
|                 **kwargs |  | ||||||
|             ) as stream: |  | ||||||
|                 async for val in stream: |  | ||||||
|                 assert val == {'doggy': 10} |                 assert val == {'doggy': 10} | ||||||
| 
 | 
 | ||||||
|             await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| @pytest.mark.parametrize( | @pytest.mark.parametrize( | ||||||
|     'pub_actor', |     'pub_actor', | ||||||
|  | @ -169,10 +159,7 @@ def test_multi_actor_subs_arbiter_pub( | ||||||
| 
 | 
 | ||||||
|     async def main(): |     async def main(): | ||||||
| 
 | 
 | ||||||
|         async with tractor.open_nursery( |         async with tractor.open_nursery() as n: | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) as n: |  | ||||||
| 
 | 
 | ||||||
|             name = 'root' |             name = 'root' | ||||||
| 
 | 
 | ||||||
|  | @ -180,9 +167,8 @@ def test_multi_actor_subs_arbiter_pub( | ||||||
|                 # start the publisher as a daemon |                 # start the publisher as a daemon | ||||||
|                 master_portal = await n.start_actor( |                 master_portal = await n.start_actor( | ||||||
|                     'streamer', |                     'streamer', | ||||||
|                     enable_modules=[__name__], |                     rpc_module_paths=[__name__], | ||||||
|                 ) |                 ) | ||||||
|                 name = 'streamer' |  | ||||||
| 
 | 
 | ||||||
|             even_portal = await n.run_in_actor( |             even_portal = await n.run_in_actor( | ||||||
|                 subs, |                 subs, | ||||||
|  | @ -241,6 +227,7 @@ def test_multi_actor_subs_arbiter_pub( | ||||||
|                 assert 'even' not in get_topics() |                 assert 'even' not in get_topics() | ||||||
| 
 | 
 | ||||||
|             await odd_portal.cancel_actor() |             await odd_portal.cancel_actor() | ||||||
|  |             await trio.sleep(2) | ||||||
| 
 | 
 | ||||||
|             if pub_actor == 'arbiter': |             if pub_actor == 'arbiter': | ||||||
|                 while get_topics(): |                 while get_topics(): | ||||||
|  | @ -250,7 +237,11 @@ def test_multi_actor_subs_arbiter_pub( | ||||||
|             else: |             else: | ||||||
|                 await master_portal.cancel_actor() |                 await master_portal.cancel_actor() | ||||||
| 
 | 
 | ||||||
|     trio.run(main) |     tractor.run( | ||||||
|  |         main, | ||||||
|  |         arbiter_addr=arb_addr, | ||||||
|  |         rpc_module_paths=[__name__], | ||||||
|  |     ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_single_subactor_pub_multitask_subs( | def test_single_subactor_pub_multitask_subs( | ||||||
|  | @ -259,14 +250,11 @@ def test_single_subactor_pub_multitask_subs( | ||||||
| ): | ): | ||||||
|     async def main(): |     async def main(): | ||||||
| 
 | 
 | ||||||
|         async with tractor.open_nursery( |         async with tractor.open_nursery() as n: | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) as n: |  | ||||||
| 
 | 
 | ||||||
|             portal = await n.start_actor( |             portal = await n.start_actor( | ||||||
|                 'streamer', |                 'streamer', | ||||||
|                 enable_modules=[__name__], |                 rpc_module_paths=[__name__], | ||||||
|             ) |             ) | ||||||
|             async with tractor.wait_for_actor('streamer'): |             async with tractor.wait_for_actor('streamer'): | ||||||
|                 # block until 2nd actor is initialized |                 # block until 2nd actor is initialized | ||||||
|  | @ -290,4 +278,8 @@ def test_single_subactor_pub_multitask_subs( | ||||||
| 
 | 
 | ||||||
|             await portal.cancel_actor() |             await portal.cancel_actor() | ||||||
| 
 | 
 | ||||||
|     trio.run(main) |     tractor.run( | ||||||
|  |         main, | ||||||
|  |         arbiter_addr=arb_addr, | ||||||
|  |         rpc_module_paths=[__name__], | ||||||
|  |     ) | ||||||
|  |  | ||||||
|  | @ -1,182 +0,0 @@ | ||||||
| ''' |  | ||||||
| Async context manager cache api testing: ``trionics.maybe_open_context():`` |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| from contextlib import asynccontextmanager as acm |  | ||||||
| import platform |  | ||||||
| from typing import Awaitable |  | ||||||
| 
 |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| _resource: int = 0 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @acm |  | ||||||
| async def maybe_increment_counter(task_name: str): |  | ||||||
|     global _resource |  | ||||||
| 
 |  | ||||||
|     _resource += 1 |  | ||||||
|     await trio.lowlevel.checkpoint() |  | ||||||
|     yield _resource |  | ||||||
|     await trio.lowlevel.checkpoint() |  | ||||||
|     _resource -= 1 |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'key_on', |  | ||||||
|     ['key_value', 'kwargs'], |  | ||||||
|     ids="key_on={}".format, |  | ||||||
| ) |  | ||||||
| def test_resource_only_entered_once(key_on): |  | ||||||
|     global _resource |  | ||||||
|     _resource = 0 |  | ||||||
| 
 |  | ||||||
|     kwargs = {} |  | ||||||
|     key = None |  | ||||||
|     if key_on == 'key_value': |  | ||||||
|         key = 'some_common_key' |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         cache_active: bool = False |  | ||||||
| 
 |  | ||||||
|         async def enter_cached_mngr(name: str): |  | ||||||
|             nonlocal cache_active |  | ||||||
| 
 |  | ||||||
|             if key_on == 'kwargs': |  | ||||||
|                 # make a common kwargs input to key on it |  | ||||||
|                 kwargs = {'task_name': 'same_task_name'} |  | ||||||
|                 assert key is None |  | ||||||
|             else: |  | ||||||
|                 # different task names per task will be used |  | ||||||
|                 kwargs = {'task_name': name} |  | ||||||
| 
 |  | ||||||
|             async with tractor.trionics.maybe_open_context( |  | ||||||
|                 maybe_increment_counter, |  | ||||||
|                 kwargs=kwargs, |  | ||||||
|                 key=key, |  | ||||||
| 
 |  | ||||||
|             ) as (cache_hit, resource): |  | ||||||
|                 if cache_hit: |  | ||||||
|                     try: |  | ||||||
|                         cache_active = True |  | ||||||
|                         assert resource == 1 |  | ||||||
|                         await trio.sleep_forever() |  | ||||||
|                     finally: |  | ||||||
|                         cache_active = False |  | ||||||
|                 else: |  | ||||||
|                     assert resource == 1 |  | ||||||
|                     await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|         with trio.move_on_after(0.5): |  | ||||||
|             async with ( |  | ||||||
|                 tractor.open_root_actor(), |  | ||||||
|                 trio.open_nursery() as n, |  | ||||||
|             ): |  | ||||||
| 
 |  | ||||||
|                 for i in range(10): |  | ||||||
|                     n.start_soon(enter_cached_mngr, f'task_{i}') |  | ||||||
|                     await trio.sleep(0.001) |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def streamer( |  | ||||||
|     ctx: tractor.Context, |  | ||||||
|     seq: list[int] = list(range(1000)), |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     await ctx.started() |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
|         for val in seq: |  | ||||||
|             await stream.send(val) |  | ||||||
|             await trio.sleep(0.001) |  | ||||||
| 
 |  | ||||||
|     print('producer finished') |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @acm |  | ||||||
| async def open_stream() -> Awaitable[tractor.MsgStream]: |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery() as tn: |  | ||||||
|         portal = await tn.start_actor('streamer', enable_modules=[__name__]) |  | ||||||
|         async with ( |  | ||||||
|             portal.open_context(streamer) as (ctx, first), |  | ||||||
|             ctx.open_stream() as stream, |  | ||||||
|         ): |  | ||||||
|             yield stream |  | ||||||
| 
 |  | ||||||
|         await portal.cancel_actor() |  | ||||||
|     print('CANCELLED STREAMER') |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @acm |  | ||||||
| async def maybe_open_stream(taskname: str): |  | ||||||
|     async with tractor.trionics.maybe_open_context( |  | ||||||
|         # NOTE: all secondary tasks should cache hit on the same key |  | ||||||
|         acm_func=open_stream, |  | ||||||
|     ) as (cache_hit, stream): |  | ||||||
| 
 |  | ||||||
|         if cache_hit: |  | ||||||
|             print(f'{taskname} loaded from cache') |  | ||||||
| 
 |  | ||||||
|             # add a new broadcast subscription for the quote stream |  | ||||||
|             # if this feed is already allocated by the first |  | ||||||
|             # task that entereed |  | ||||||
|             async with stream.subscribe() as bstream: |  | ||||||
|                 yield bstream |  | ||||||
|         else: |  | ||||||
|             # yield the actual stream |  | ||||||
|             yield stream |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_open_local_sub_to_stream(): |  | ||||||
|     ''' |  | ||||||
|     Verify a single inter-actor stream can can be fanned-out shared to |  | ||||||
|     N local tasks using ``trionics.maybe_open_context():``. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     timeout = 3 if platform.system() != "Windows" else 10 |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         full = list(range(1000)) |  | ||||||
| 
 |  | ||||||
|         async def get_sub_and_pull(taskname: str): |  | ||||||
|             async with ( |  | ||||||
|                 maybe_open_stream(taskname) as stream, |  | ||||||
|             ): |  | ||||||
|                 if '0' in taskname: |  | ||||||
|                     assert isinstance(stream, tractor.MsgStream) |  | ||||||
|                 else: |  | ||||||
|                     assert isinstance( |  | ||||||
|                         stream, |  | ||||||
|                         tractor.trionics.BroadcastReceiver |  | ||||||
|                     ) |  | ||||||
| 
 |  | ||||||
|                 first = await stream.receive() |  | ||||||
|                 print(f'{taskname} started with value {first}') |  | ||||||
|                 seq = [] |  | ||||||
|                 async for msg in stream: |  | ||||||
|                     seq.append(msg) |  | ||||||
| 
 |  | ||||||
|                 assert set(seq).issubset(set(full)) |  | ||||||
|             print(f'{taskname} finished') |  | ||||||
| 
 |  | ||||||
|         with trio.fail_after(timeout): |  | ||||||
|             # TODO: turns out this isn't multi-task entrant XD |  | ||||||
|             # We probably need an indepotent entry semantic? |  | ||||||
|             async with tractor.open_root_actor(): |  | ||||||
|                 async with ( |  | ||||||
|                     trio.open_nursery() as nurse, |  | ||||||
|                 ): |  | ||||||
|                     for i in range(10): |  | ||||||
|                         nurse.start_soon(get_sub_and_pull, f'task_{i}') |  | ||||||
|                         await trio.sleep(0.001) |  | ||||||
| 
 |  | ||||||
|                 print('all consumer tasks finished') |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
|  | @ -74,15 +74,11 @@ def test_rpc_errors(arb_addr, to_call, testdir): | ||||||
|         remote_err = inside_err |         remote_err = inside_err | ||||||
| 
 | 
 | ||||||
|     async def main(): |     async def main(): | ||||||
| 
 |  | ||||||
|         # spawn a subactor which calls us back |  | ||||||
|         async with tractor.open_nursery( |  | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
|             enable_modules=exposed_mods.copy(), |  | ||||||
|         ) as n: |  | ||||||
| 
 |  | ||||||
|         actor = tractor.current_actor() |         actor = tractor.current_actor() | ||||||
|         assert actor.is_arbiter |         assert actor.is_arbiter | ||||||
|  | 
 | ||||||
|  |         # spawn a subactor which calls us back | ||||||
|  |         async with tractor.open_nursery() as n: | ||||||
|             await n.run_in_actor( |             await n.run_in_actor( | ||||||
|                 sleep_back_actor, |                 sleep_back_actor, | ||||||
|                 actor_name=subactor_requests_to, |                 actor_name=subactor_requests_to, | ||||||
|  | @ -94,11 +90,15 @@ def test_rpc_errors(arb_addr, to_call, testdir): | ||||||
|                 func_name=funcname, |                 func_name=funcname, | ||||||
|                 exposed_mods=exposed_mods, |                 exposed_mods=exposed_mods, | ||||||
|                 func_defined=True if func_defined else False, |                 func_defined=True if func_defined else False, | ||||||
|                 enable_modules=subactor_exposed_mods, |                 rpc_module_paths=subactor_exposed_mods, | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|     def run(): |     def run(): | ||||||
|         trio.run(main) |         tractor.run( | ||||||
|  |             main, | ||||||
|  |             arbiter_addr=arb_addr, | ||||||
|  |             rpc_module_paths=exposed_mods.copy(), | ||||||
|  |         ) | ||||||
| 
 | 
 | ||||||
|     # handle both parameterized cases |     # handle both parameterized cases | ||||||
|     if exposed_mods and func_defined: |     if exposed_mods and func_defined: | ||||||
|  |  | ||||||
|  | @ -1,73 +0,0 @@ | ||||||
| """ |  | ||||||
| Verifying internal runtime state and undocumented extras. |  | ||||||
| 
 |  | ||||||
| """ |  | ||||||
| import os |  | ||||||
| 
 |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| from conftest import tractor_test |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| _file_path: str = '' |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def unlink_file(): |  | ||||||
|     print('Removing tmp file!') |  | ||||||
|     os.remove(_file_path) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def crash_and_clean_tmpdir( |  | ||||||
|     tmp_file_path: str, |  | ||||||
|     error: bool = True, |  | ||||||
| ): |  | ||||||
|     global _file_path |  | ||||||
|     _file_path = tmp_file_path |  | ||||||
| 
 |  | ||||||
|     actor = tractor.current_actor() |  | ||||||
|     actor.lifetime_stack.callback(unlink_file) |  | ||||||
| 
 |  | ||||||
|     assert os.path.isfile(tmp_file_path) |  | ||||||
|     await trio.sleep(0.1) |  | ||||||
|     if error: |  | ||||||
|         assert 0 |  | ||||||
|     else: |  | ||||||
|         actor.cancel_soon() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'error_in_child', |  | ||||||
|     [True, False], |  | ||||||
| ) |  | ||||||
| @tractor_test |  | ||||||
| async def test_lifetime_stack_wipes_tmpfile( |  | ||||||
|     tmp_path, |  | ||||||
|     error_in_child: bool, |  | ||||||
| ): |  | ||||||
|     child_tmp_file = tmp_path / "child.txt" |  | ||||||
|     child_tmp_file.touch() |  | ||||||
|     assert child_tmp_file.exists() |  | ||||||
|     path = str(child_tmp_file) |  | ||||||
| 
 |  | ||||||
|     try: |  | ||||||
|         with trio.move_on_after(0.5): |  | ||||||
|             async with tractor.open_nursery() as n: |  | ||||||
|                     await (  # inlined portal |  | ||||||
|                         await n.run_in_actor( |  | ||||||
|                             crash_and_clean_tmpdir, |  | ||||||
|                             tmp_file_path=path, |  | ||||||
|                             error=error_in_child, |  | ||||||
|                         ) |  | ||||||
|                     ).result() |  | ||||||
| 
 |  | ||||||
|     except ( |  | ||||||
|         tractor.RemoteActorError, |  | ||||||
|         tractor.BaseExceptionGroup, |  | ||||||
|     ): |  | ||||||
|         pass |  | ||||||
| 
 |  | ||||||
|     # tmp file should have been wiped by |  | ||||||
|     # teardown stack. |  | ||||||
|     assert not child_tmp_file.exists() |  | ||||||
|  | @ -1,8 +1,7 @@ | ||||||
| """ | """ | ||||||
| Spawning basics | Spawning basics | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| from typing import Optional | from functools import partial | ||||||
| 
 | 
 | ||||||
| import pytest | import pytest | ||||||
| import trio | import trio | ||||||
|  | @ -13,36 +12,23 @@ from conftest import tractor_test | ||||||
| data_to_pass_down = {'doggy': 10, 'kitty': 4} | data_to_pass_down = {'doggy': 10, 'kitty': 4} | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def spawn( | async def spawn(is_arbiter, data): | ||||||
|     is_arbiter: bool, |  | ||||||
|     data: dict, |  | ||||||
|     arb_addr: tuple[str, int], |  | ||||||
| ): |  | ||||||
|     namespaces = [__name__] |     namespaces = [__name__] | ||||||
| 
 | 
 | ||||||
|     await trio.sleep(0.1) |     await trio.sleep(0.1) | ||||||
| 
 |  | ||||||
|     async with tractor.open_root_actor( |  | ||||||
|         arbiter_addr=arb_addr, |  | ||||||
|     ): |  | ||||||
| 
 |  | ||||||
|     actor = tractor.current_actor() |     actor = tractor.current_actor() | ||||||
|     assert actor.is_arbiter == is_arbiter |     assert actor.is_arbiter == is_arbiter | ||||||
|         data = data_to_pass_down |     data == data_to_pass_down | ||||||
| 
 | 
 | ||||||
|     if actor.is_arbiter: |     if actor.is_arbiter: | ||||||
| 
 |         async with tractor.open_nursery() as nursery: | ||||||
|             async with tractor.open_nursery( |  | ||||||
|             ) as nursery: |  | ||||||
| 
 |  | ||||||
|             # forks here |             # forks here | ||||||
|             portal = await nursery.run_in_actor( |             portal = await nursery.run_in_actor( | ||||||
|                 spawn, |                 spawn, | ||||||
|                 is_arbiter=False, |                 is_arbiter=False, | ||||||
|                 name='sub-actor', |                 name='sub-actor', | ||||||
|                 data=data, |                 data=data, | ||||||
|                     arb_addr=arb_addr, |                 rpc_module_paths=namespaces, | ||||||
|                     enable_modules=namespaces, |  | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|             assert len(nursery._children) == 1 |             assert len(nursery._children) == 1 | ||||||
|  | @ -56,16 +42,16 @@ async def spawn( | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def test_local_arbiter_subactor_global_state(arb_addr): | def test_local_arbiter_subactor_global_state(arb_addr): | ||||||
|     result = trio.run( |     result = tractor.run( | ||||||
|         spawn, |         partial(spawn, data=data_to_pass_down), | ||||||
|         True, |         True, | ||||||
|         data_to_pass_down, |         name='arbiter', | ||||||
|         arb_addr, |         arbiter_addr=arb_addr, | ||||||
|     ) |     ) | ||||||
|     assert result == 10 |     assert result == 10 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def movie_theatre_question(): | def movie_theatre_question(): | ||||||
|     """A question asked in a dark theatre, in a tangent |     """A question asked in a dark theatre, in a tangent | ||||||
|     (errr, I mean different) process. |     (errr, I mean different) process. | ||||||
|     """ |     """ | ||||||
|  | @ -81,7 +67,7 @@ async def test_movie_theatre_convo(start_method): | ||||||
|         portal = await n.start_actor( |         portal = await n.start_actor( | ||||||
|             'frank', |             'frank', | ||||||
|             # enable the actor to run funcs from this current module |             # enable the actor to run funcs from this current module | ||||||
|             enable_modules=[__name__], |             rpc_module_paths=[__name__], | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|         print(await portal.run(movie_theatre_question)) |         print(await portal.run(movie_theatre_question)) | ||||||
|  | @ -94,38 +80,24 @@ async def test_movie_theatre_convo(start_method): | ||||||
|         await portal.cancel_actor() |         await portal.cancel_actor() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def cellar_door(return_value: Optional[str]): | def cellar_door(): | ||||||
|     return return_value |     return "Dang that's beautiful" | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'return_value', ["Dang that's beautiful", None], |  | ||||||
|     ids=['return_str', 'return_None'], |  | ||||||
| ) |  | ||||||
| @tractor_test | @tractor_test | ||||||
| async def test_most_beautiful_word( | async def test_most_beautiful_word(start_method): | ||||||
|     start_method, |     """The main ``tractor`` routine. | ||||||
|     return_value |     """ | ||||||
| ): |  | ||||||
|     ''' |  | ||||||
|     The main ``tractor`` routine. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     with trio.fail_after(1): |  | ||||||
|     async with tractor.open_nursery() as n: |     async with tractor.open_nursery() as n: | ||||||
| 
 | 
 | ||||||
|         portal = await n.run_in_actor( |         portal = await n.run_in_actor( | ||||||
|             cellar_door, |             cellar_door, | ||||||
|                 return_value=return_value, |  | ||||||
|             name='some_linguist', |             name='some_linguist', | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|             print(await portal.result()) |  | ||||||
|     # The ``async with`` will unblock here since the 'some_linguist' |     # The ``async with`` will unblock here since the 'some_linguist' | ||||||
|     # actor has completed its main task ``cellar_door``. |     # actor has completed its main task ``cellar_door``. | ||||||
| 
 | 
 | ||||||
|     # this should pull the cached final result already captured during |  | ||||||
|     # the nursery block exit. |  | ||||||
|     print(await portal.result()) |     print(await portal.result()) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -142,27 +114,26 @@ def test_loglevel_propagated_to_subactor( | ||||||
|     capfd, |     capfd, | ||||||
|     arb_addr, |     arb_addr, | ||||||
| ): | ): | ||||||
|     if start_method == 'mp_forkserver': |     if start_method == 'forkserver': | ||||||
|         pytest.skip( |         pytest.skip( | ||||||
|             "a bug with `capfd` seems to make forkserver capture not work?") |             "a bug with `capfd` seems to make forkserver capture not work?") | ||||||
| 
 | 
 | ||||||
|     level = 'critical' |     level = 'critical' | ||||||
| 
 | 
 | ||||||
|     async def main(): |     async def main(): | ||||||
|         async with tractor.open_nursery( |         async with tractor.open_nursery() as tn: | ||||||
|             name='arbiter', |  | ||||||
|             start_method=start_method, |  | ||||||
|             arbiter_addr=arb_addr, |  | ||||||
| 
 |  | ||||||
|         ) as tn: |  | ||||||
|             await tn.run_in_actor( |             await tn.run_in_actor( | ||||||
|                 check_loglevel, |                 check_loglevel, | ||||||
|                 loglevel=level, |  | ||||||
|                 level=level, |                 level=level, | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|     trio.run(main) |     tractor.run( | ||||||
| 
 |         main, | ||||||
|  |         name='arbiter', | ||||||
|  |         loglevel=level, | ||||||
|  |         start_method=start_method, | ||||||
|  |         arbiter_addr=arb_addr, | ||||||
|  |     ) | ||||||
|     # ensure subactor spits log message on stderr |     # ensure subactor spits log message on stderr | ||||||
|     captured = capfd.readouterr() |     captured = capfd.readouterr() | ||||||
|     assert 'yoyoyo' in captured.err |     assert 'yoyoyo' in captured.err | ||||||
|  |  | ||||||
|  | @ -7,10 +7,9 @@ import platform | ||||||
| 
 | 
 | ||||||
| import trio | import trio | ||||||
| import tractor | import tractor | ||||||
|  | from tractor.testing import tractor_test | ||||||
| import pytest | import pytest | ||||||
| 
 | 
 | ||||||
| from conftest import tractor_test |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| def test_must_define_ctx(): | def test_must_define_ctx(): | ||||||
| 
 | 
 | ||||||
|  | @ -33,16 +32,13 @@ async def async_gen_stream(sequence): | ||||||
| 
 | 
 | ||||||
|     # block indefinitely waiting to be cancelled by ``aclose()`` call |     # block indefinitely waiting to be cancelled by ``aclose()`` call | ||||||
|     with trio.CancelScope() as cs: |     with trio.CancelScope() as cs: | ||||||
|         await trio.sleep_forever() |         await trio.sleep(float('inf')) | ||||||
|         assert 0 |         assert 0 | ||||||
|     assert cs.cancelled_caught |     assert cs.cancelled_caught | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @tractor.stream | @tractor.stream | ||||||
| async def context_stream( | async def context_stream(ctx, sequence): | ||||||
|     ctx: tractor.Context, |  | ||||||
|     sequence |  | ||||||
| ): |  | ||||||
|     for i in sequence: |     for i in sequence: | ||||||
|         await ctx.send_yield(i) |         await ctx.send_yield(i) | ||||||
|         await trio.sleep(0.1) |         await trio.sleep(0.1) | ||||||
|  | @ -54,38 +50,26 @@ async def context_stream( | ||||||
|     assert cs.cancelled_caught |     assert cs.cancelled_caught | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def stream_from_single_subactor( | async def stream_from_single_subactor(stream_func): | ||||||
|     arb_addr, |  | ||||||
|     start_method, |  | ||||||
|     stream_func, |  | ||||||
| ): |  | ||||||
|     """Verify we can spawn a daemon actor and retrieve streamed data. |     """Verify we can spawn a daemon actor and retrieve streamed data. | ||||||
|     """ |     """ | ||||||
|     # only one per host address, spawns an actor if None |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery( |  | ||||||
|         arbiter_addr=arb_addr, |  | ||||||
|         start_method=start_method, |  | ||||||
|     ) as nursery: |  | ||||||
| 
 |  | ||||||
|     async with tractor.find_actor('streamerd') as portals: |     async with tractor.find_actor('streamerd') as portals: | ||||||
| 
 | 
 | ||||||
|         if not portals: |         if not portals: | ||||||
| 
 |             # only one per host address, spawns an actor if None | ||||||
|  |             async with tractor.open_nursery() as nursery: | ||||||
|                 # no brokerd actor found |                 # no brokerd actor found | ||||||
|                 portal = await nursery.start_actor( |                 portal = await nursery.start_actor( | ||||||
|                     'streamerd', |                     'streamerd', | ||||||
|                     enable_modules=[__name__], |                     rpc_module_paths=[__name__], | ||||||
|                 ) |                 ) | ||||||
| 
 | 
 | ||||||
|                 seq = range(10) |                 seq = range(10) | ||||||
| 
 | 
 | ||||||
|                 with trio.fail_after(5): |                 stream = await portal.run( | ||||||
|                     async with portal.open_stream_from( |                     stream_func,  # one of the funcs above | ||||||
|                         stream_func, |  | ||||||
|                     sequence=list(seq),  # has to be msgpack serializable |                     sequence=list(seq),  # has to be msgpack serializable | ||||||
|                     ) as stream: |                 ) | ||||||
| 
 |  | ||||||
|                 # it'd sure be nice to have an asyncitertools here... |                 # it'd sure be nice to have an asyncitertools here... | ||||||
|                 iseq = iter(seq) |                 iseq = iter(seq) | ||||||
|                 ival = next(iseq) |                 ival = next(iseq) | ||||||
|  | @ -102,14 +86,12 @@ async def stream_from_single_subactor( | ||||||
| 
 | 
 | ||||||
|                 await trio.sleep(0.3) |                 await trio.sleep(0.3) | ||||||
| 
 | 
 | ||||||
|                         # ensure EOC signalled-state translates |  | ||||||
|                         # XXX: not really sure this is correct, |  | ||||||
|                         # shouldn't it be a `ClosedResourceError`? |  | ||||||
|                 try: |                 try: | ||||||
|                     await stream.__anext__() |                     await stream.__anext__() | ||||||
|                 except StopAsyncIteration: |                 except StopAsyncIteration: | ||||||
|                     # stop all spawned subactors |                     # stop all spawned subactors | ||||||
|                     await portal.cancel_actor() |                     await portal.cancel_actor() | ||||||
|  |                 # await nursery.cancel() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @pytest.mark.parametrize( | @pytest.mark.parametrize( | ||||||
|  | @ -118,13 +100,13 @@ async def stream_from_single_subactor( | ||||||
| def test_stream_from_single_subactor(arb_addr, start_method, stream_func): | def test_stream_from_single_subactor(arb_addr, start_method, stream_func): | ||||||
|     """Verify streaming from a spawned async generator. |     """Verify streaming from a spawned async generator. | ||||||
|     """ |     """ | ||||||
|     trio.run( |     tractor.run( | ||||||
|         partial( |         partial( | ||||||
|             stream_from_single_subactor, |             stream_from_single_subactor, | ||||||
|             arb_addr, |  | ||||||
|             start_method, |  | ||||||
|             stream_func=stream_func, |             stream_func=stream_func, | ||||||
|         ), |         ), | ||||||
|  |         arbiter_addr=arb_addr, | ||||||
|  |         start_method=start_method, | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -136,7 +118,7 @@ async def stream_data(seed): | ||||||
|         yield i |         yield i | ||||||
| 
 | 
 | ||||||
|         # trigger scheduler to simulate practical usage |         # trigger scheduler to simulate practical usage | ||||||
|         await trio.sleep(0.0001) |         await trio.sleep(0) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # this is the third actor; the aggregator | # this is the third actor; the aggregator | ||||||
|  | @ -150,7 +132,7 @@ async def aggregate(seed): | ||||||
|             # fork point |             # fork point | ||||||
|             portal = await nursery.start_actor( |             portal = await nursery.start_actor( | ||||||
|                 name=f'streamer_{i}', |                 name=f'streamer_{i}', | ||||||
|                 enable_modules=[__name__], |                 rpc_module_paths=[__name__], | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|             portals.append(portal) |             portals.append(portal) | ||||||
|  | @ -159,12 +141,9 @@ async def aggregate(seed): | ||||||
| 
 | 
 | ||||||
|         async def push_to_chan(portal, send_chan): |         async def push_to_chan(portal, send_chan): | ||||||
|             async with send_chan: |             async with send_chan: | ||||||
| 
 |                 async for value in await portal.run( | ||||||
|                 async with portal.open_stream_from( |                     __name__, 'stream_data', seed=seed | ||||||
|                     stream_data, seed=seed, |                 ): | ||||||
|                 ) as stream: |  | ||||||
| 
 |  | ||||||
|                     async for value in stream: |  | ||||||
|                     # leverage trio's built-in backpressure |                     # leverage trio's built-in backpressure | ||||||
|                     await send_chan.send(value) |                     await send_chan.send(value) | ||||||
| 
 | 
 | ||||||
|  | @ -204,29 +183,26 @@ async def a_quadruple_example(): | ||||||
|         seed = int(1e3) |         seed = int(1e3) | ||||||
|         pre_start = time.time() |         pre_start = time.time() | ||||||
| 
 | 
 | ||||||
|         portal = await nursery.start_actor( |         portal = await nursery.run_in_actor( | ||||||
|  |             aggregate, | ||||||
|  |             seed=seed, | ||||||
|             name='aggregator', |             name='aggregator', | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|         start = time.time() |         start = time.time() | ||||||
|         # the portal call returns exactly what you'd expect |         # the portal call returns exactly what you'd expect | ||||||
|         # as if the remote "aggregate" function was called locally |         # as if the remote "aggregate" function was called locally | ||||||
|         result_stream = [] |         result_stream = [] | ||||||
| 
 |         async for value in await portal.result(): | ||||||
|         async with portal.open_stream_from(aggregate, seed=seed) as stream: |  | ||||||
|             async for value in stream: |  | ||||||
|             result_stream.append(value) |             result_stream.append(value) | ||||||
| 
 | 
 | ||||||
|         print(f"STREAM TIME = {time.time() - start}") |         print(f"STREAM TIME = {time.time() - start}") | ||||||
|         print(f"STREAM + SPAWN TIME = {time.time() - pre_start}") |         print(f"STREAM + SPAWN TIME = {time.time() - pre_start}") | ||||||
|         assert result_stream == list(range(seed)) |         assert result_stream == list(range(seed)) | ||||||
|         await portal.cancel_actor() |  | ||||||
|         return result_stream |         return result_stream | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def cancel_after(wait, arb_addr): | async def cancel_after(wait): | ||||||
|     async with tractor.open_root_actor(arbiter_addr=arb_addr): |  | ||||||
|     with trio.move_on_after(wait): |     with trio.move_on_after(wait): | ||||||
|         return await a_quadruple_example() |         return await a_quadruple_example() | ||||||
| 
 | 
 | ||||||
|  | @ -240,7 +216,7 @@ def time_quad_ex(arb_addr, ci_env, spawn_backend): | ||||||
| 
 | 
 | ||||||
|     timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4 |     timeout = 7 if platform.system() in ('Windows', 'Darwin') else 4 | ||||||
|     start = time.time() |     start = time.time() | ||||||
|     results = trio.run(cancel_after, timeout, arb_addr) |     results = tractor.run(cancel_after, timeout, arbiter_addr=arb_addr) | ||||||
|     diff = time.time() - start |     diff = time.time() - start | ||||||
|     assert results |     assert results | ||||||
|     return results, diff |     return results, diff | ||||||
|  | @ -251,7 +227,7 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend): | ||||||
| 
 | 
 | ||||||
|     results, diff = time_quad_ex |     results, diff = time_quad_ex | ||||||
|     assert results |     assert results | ||||||
|     this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 3 |     this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 2.5 | ||||||
|     assert diff < this_fast |     assert diff < this_fast | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -267,7 +243,7 @@ def test_not_fast_enough_quad( | ||||||
|     """ |     """ | ||||||
|     results, diff = time_quad_ex |     results, diff = time_quad_ex | ||||||
|     delay = max(diff - cancel_delay, 0) |     delay = max(diff - cancel_delay, 0) | ||||||
|     results = trio.run(cancel_after, delay, arb_addr) |     results = tractor.run(cancel_after, delay, arbiter_addr=arb_addr) | ||||||
|     system = platform.system() |     system = platform.system() | ||||||
|     if system in ('Windows', 'Darwin') and results is not None: |     if system in ('Windows', 'Darwin') and results is not None: | ||||||
|         # In CI envoirments it seems later runs are quicker then the first |         # In CI envoirments it seems later runs are quicker then the first | ||||||
|  | @ -296,14 +272,11 @@ async def test_respawn_consumer_task( | ||||||
| 
 | 
 | ||||||
|     async with tractor.open_nursery() as n: |     async with tractor.open_nursery() as n: | ||||||
| 
 | 
 | ||||||
|         portal = await n.start_actor( |         stream = await(await n.run_in_actor( | ||||||
|             name='streamer', |  | ||||||
|             enable_modules=[__name__] |  | ||||||
|         ) |  | ||||||
|         async with portal.open_stream_from( |  | ||||||
|             stream_data, |             stream_data, | ||||||
|             seed=11, |             seed=11, | ||||||
|         ) as stream: |             name='streamer', | ||||||
|  |         )).result() | ||||||
| 
 | 
 | ||||||
|         expect = set(range(11)) |         expect = set(range(11)) | ||||||
|         received = [] |         received = [] | ||||||
|  | @ -317,7 +290,7 @@ async def test_respawn_consumer_task( | ||||||
|                 task_status.started(cs) |                 task_status.started(cs) | ||||||
| 
 | 
 | ||||||
|                 # shield stream's underlying channel from cancellation |                 # shield stream's underlying channel from cancellation | ||||||
|                     # with stream.shield(): |                 with stream.shield(): | ||||||
| 
 | 
 | ||||||
|                     async for v in stream: |                     async for v in stream: | ||||||
|                         print(f'from stream: {v}') |                         print(f'from stream: {v}') | ||||||
|  | @ -344,9 +317,3 @@ async def test_respawn_consumer_task( | ||||||
|                 if not expect: |                 if not expect: | ||||||
|                     print("all values streamed, BREAKING") |                     print("all values streamed, BREAKING") | ||||||
|                     break |                     break | ||||||
| 
 |  | ||||||
|                 cs.cancel() |  | ||||||
| 
 |  | ||||||
|         # TODO: this is justification for a |  | ||||||
|         # ``ActorNursery.stream_from_actor()`` helper? |  | ||||||
|         await portal.cancel_actor() |  | ||||||
|  | @ -1,514 +0,0 @@ | ||||||
| """ |  | ||||||
| Broadcast channels for fan-out to local tasks. |  | ||||||
| 
 |  | ||||||
| """ |  | ||||||
| from contextlib import asynccontextmanager |  | ||||||
| from functools import partial |  | ||||||
| from itertools import cycle |  | ||||||
| import time |  | ||||||
| from typing import Optional |  | ||||||
| 
 |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| from trio.lowlevel import current_task |  | ||||||
| import tractor |  | ||||||
| from tractor.trionics import ( |  | ||||||
|     broadcast_receiver, |  | ||||||
|     Lagged, |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @tractor.context |  | ||||||
| async def echo_sequences( |  | ||||||
| 
 |  | ||||||
|     ctx:  tractor.Context, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     '''Bidir streaming endpoint which will stream |  | ||||||
|     back any sequence it is sent item-wise. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     await ctx.started() |  | ||||||
| 
 |  | ||||||
|     async with ctx.open_stream() as stream: |  | ||||||
|         async for sequence in stream: |  | ||||||
|             seq = list(sequence) |  | ||||||
|             for value in seq: |  | ||||||
|                 await stream.send(value) |  | ||||||
|                 print(f'producer sent {value}') |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def ensure_sequence( |  | ||||||
| 
 |  | ||||||
|     stream: tractor.MsgStream, |  | ||||||
|     sequence: list, |  | ||||||
|     delay: Optional[float] = None, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     name = current_task().name |  | ||||||
|     async with stream.subscribe() as bcaster: |  | ||||||
|         assert not isinstance(bcaster, type(stream)) |  | ||||||
|         async for value in bcaster: |  | ||||||
|             print(f'{name} rx: {value}') |  | ||||||
|             assert value == sequence[0] |  | ||||||
|             sequence.remove(value) |  | ||||||
| 
 |  | ||||||
|             if delay: |  | ||||||
|                 await trio.sleep(delay) |  | ||||||
| 
 |  | ||||||
|             if not sequence: |  | ||||||
|                 # fully consumed |  | ||||||
|                 break |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @asynccontextmanager |  | ||||||
| async def open_sequence_streamer( |  | ||||||
| 
 |  | ||||||
|     sequence: list[int], |  | ||||||
|     arb_addr: tuple[str, int], |  | ||||||
|     start_method: str, |  | ||||||
| 
 |  | ||||||
| ) -> tractor.MsgStream: |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery( |  | ||||||
|         arbiter_addr=arb_addr, |  | ||||||
|         start_method=start_method, |  | ||||||
|     ) as tn: |  | ||||||
| 
 |  | ||||||
|         portal = await tn.start_actor( |  | ||||||
|             'sequence_echoer', |  | ||||||
|             enable_modules=[__name__], |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         async with portal.open_context( |  | ||||||
|             echo_sequences, |  | ||||||
|         ) as (ctx, first): |  | ||||||
| 
 |  | ||||||
|             assert first is None |  | ||||||
|             async with ctx.open_stream(backpressure=True) as stream: |  | ||||||
|                 yield stream |  | ||||||
| 
 |  | ||||||
|         await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_stream_fan_out_to_local_subscriptions( |  | ||||||
|     arb_addr, |  | ||||||
|     start_method, |  | ||||||
| ): |  | ||||||
| 
 |  | ||||||
|     sequence = list(range(1000)) |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         async with open_sequence_streamer( |  | ||||||
|             sequence, |  | ||||||
|             arb_addr, |  | ||||||
|             start_method, |  | ||||||
|         ) as stream: |  | ||||||
| 
 |  | ||||||
|             async with trio.open_nursery() as n: |  | ||||||
|                 for i in range(10): |  | ||||||
|                     n.start_soon( |  | ||||||
|                         ensure_sequence, |  | ||||||
|                         stream, |  | ||||||
|                         sequence.copy(), |  | ||||||
|                         name=f'consumer_{i}', |  | ||||||
|                     ) |  | ||||||
| 
 |  | ||||||
|                 await stream.send(tuple(sequence)) |  | ||||||
| 
 |  | ||||||
|                 async for value in stream: |  | ||||||
|                     print(f'source stream rx: {value}') |  | ||||||
|                     assert value == sequence[0] |  | ||||||
|                     sequence.remove(value) |  | ||||||
| 
 |  | ||||||
|                     if not sequence: |  | ||||||
|                         # fully consumed |  | ||||||
|                         break |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'task_delays', |  | ||||||
|     [ |  | ||||||
|         (0.01, 0.001), |  | ||||||
|         (0.001, 0.01), |  | ||||||
|     ] |  | ||||||
| ) |  | ||||||
| def test_consumer_and_parent_maybe_lag( |  | ||||||
|     arb_addr, |  | ||||||
|     start_method, |  | ||||||
|     task_delays, |  | ||||||
| ): |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         sequence = list(range(300)) |  | ||||||
|         parent_delay, sub_delay = task_delays |  | ||||||
| 
 |  | ||||||
|         async with open_sequence_streamer( |  | ||||||
|             sequence, |  | ||||||
|             arb_addr, |  | ||||||
|             start_method, |  | ||||||
|         ) as stream: |  | ||||||
| 
 |  | ||||||
|             try: |  | ||||||
|                 async with trio.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|                     n.start_soon( |  | ||||||
|                         ensure_sequence, |  | ||||||
|                         stream, |  | ||||||
|                         sequence.copy(), |  | ||||||
|                         sub_delay, |  | ||||||
|                         name='consumer_task', |  | ||||||
|                     ) |  | ||||||
| 
 |  | ||||||
|                     await stream.send(tuple(sequence)) |  | ||||||
| 
 |  | ||||||
|                     # async for value in stream: |  | ||||||
|                     lagged = False |  | ||||||
|                     lag_count = 0 |  | ||||||
| 
 |  | ||||||
|                     while True: |  | ||||||
|                         try: |  | ||||||
|                             value = await stream.receive() |  | ||||||
|                             print(f'source stream rx: {value}') |  | ||||||
| 
 |  | ||||||
|                             if lagged: |  | ||||||
|                                 # re set the sequence starting at our last |  | ||||||
|                                 # value |  | ||||||
|                                 sequence = sequence[sequence.index(value) + 1:] |  | ||||||
|                             else: |  | ||||||
|                                 assert value == sequence[0] |  | ||||||
|                                 sequence.remove(value) |  | ||||||
| 
 |  | ||||||
|                             lagged = False |  | ||||||
| 
 |  | ||||||
|                         except Lagged: |  | ||||||
|                             lagged = True |  | ||||||
|                             print(f'source stream lagged after {value}') |  | ||||||
|                             lag_count += 1 |  | ||||||
|                             continue |  | ||||||
| 
 |  | ||||||
|                         # lag the parent |  | ||||||
|                         await trio.sleep(parent_delay) |  | ||||||
| 
 |  | ||||||
|                         if not sequence: |  | ||||||
|                             # fully consumed |  | ||||||
|                             break |  | ||||||
|                     print(f'parent + source stream lagged: {lag_count}') |  | ||||||
| 
 |  | ||||||
|                     if parent_delay > sub_delay: |  | ||||||
|                         assert lag_count > 0 |  | ||||||
| 
 |  | ||||||
|             except Lagged: |  | ||||||
|                 # child was lagged |  | ||||||
|                 assert parent_delay < sub_delay |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_faster_task_to_recv_is_cancelled_by_slower( |  | ||||||
|     arb_addr, |  | ||||||
|     start_method, |  | ||||||
| ): |  | ||||||
|     ''' |  | ||||||
|     Ensure that if a faster task consuming from a stream is cancelled |  | ||||||
|     the slower task can continue to receive all expected values. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         sequence = list(range(1000)) |  | ||||||
| 
 |  | ||||||
|         async with open_sequence_streamer( |  | ||||||
|             sequence, |  | ||||||
|             arb_addr, |  | ||||||
|             start_method, |  | ||||||
| 
 |  | ||||||
|         ) as stream: |  | ||||||
| 
 |  | ||||||
|             async with trio.open_nursery() as n: |  | ||||||
|                 n.start_soon( |  | ||||||
|                     ensure_sequence, |  | ||||||
|                     stream, |  | ||||||
|                     sequence.copy(), |  | ||||||
|                     0, |  | ||||||
|                     name='consumer_task', |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|                 await stream.send(tuple(sequence)) |  | ||||||
| 
 |  | ||||||
|                 # pull 3 values, cancel the subtask, then |  | ||||||
|                 # expect to be able to pull all values still |  | ||||||
|                 for i in range(20): |  | ||||||
|                     try: |  | ||||||
|                         value = await stream.receive() |  | ||||||
|                         print(f'source stream rx: {value}') |  | ||||||
|                         await trio.sleep(0.01) |  | ||||||
|                     except Lagged: |  | ||||||
|                         print(f'parent overrun after {value}') |  | ||||||
|                         continue |  | ||||||
| 
 |  | ||||||
|                 print('cancelling faster subtask') |  | ||||||
|                 n.cancel_scope.cancel() |  | ||||||
| 
 |  | ||||||
|             try: |  | ||||||
|                 value = await stream.receive() |  | ||||||
|                 print(f'source stream after cancel: {value}') |  | ||||||
|             except Lagged: |  | ||||||
|                 print(f'parent overrun after {value}') |  | ||||||
| 
 |  | ||||||
|             # expect to see all remaining values |  | ||||||
|             with trio.fail_after(0.5): |  | ||||||
|                 async for value in stream: |  | ||||||
|                     assert stream._broadcaster._state.recv_ready is None |  | ||||||
|                     print(f'source stream rx: {value}') |  | ||||||
|                     if value == 999: |  | ||||||
|                         # fully consumed and we missed no values once |  | ||||||
|                         # the faster subtask was cancelled |  | ||||||
|                         break |  | ||||||
| 
 |  | ||||||
|                 # await tractor.breakpoint() |  | ||||||
|                 # await stream.receive() |  | ||||||
|                 print(f'final value: {value}') |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_subscribe_errors_after_close(): |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         size = 1 |  | ||||||
|         tx, rx = trio.open_memory_channel(size) |  | ||||||
|         async with broadcast_receiver(rx, size) as brx: |  | ||||||
|             pass |  | ||||||
| 
 |  | ||||||
|         try: |  | ||||||
|             # open and close |  | ||||||
|             async with brx.subscribe(): |  | ||||||
|                 pass |  | ||||||
| 
 |  | ||||||
|         except trio.ClosedResourceError: |  | ||||||
|             assert brx.key not in brx._state.subs |  | ||||||
| 
 |  | ||||||
|         else: |  | ||||||
|             assert 0 |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_ensure_slow_consumers_lag_out( |  | ||||||
|     arb_addr, |  | ||||||
|     start_method, |  | ||||||
| ): |  | ||||||
|     '''This is a pure local task test; no tractor |  | ||||||
|     machinery is really required. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         # make sure it all works within the runtime |  | ||||||
|         async with tractor.open_root_actor(): |  | ||||||
| 
 |  | ||||||
|             num_laggers = 4 |  | ||||||
|             laggers: dict[str, int] = {} |  | ||||||
|             retries = 3 |  | ||||||
|             size = 100 |  | ||||||
|             tx, rx = trio.open_memory_channel(size) |  | ||||||
|             brx = broadcast_receiver(rx, size) |  | ||||||
| 
 |  | ||||||
|             async def sub_and_print( |  | ||||||
|                 delay: float, |  | ||||||
|             ) -> None: |  | ||||||
| 
 |  | ||||||
|                 task = current_task() |  | ||||||
|                 start = time.time() |  | ||||||
| 
 |  | ||||||
|                 async with brx.subscribe() as lbrx: |  | ||||||
|                     while True: |  | ||||||
|                         print(f'{task.name}: starting consume loop') |  | ||||||
|                         try: |  | ||||||
|                             async for value in lbrx: |  | ||||||
|                                 print(f'{task.name}: {value}') |  | ||||||
|                                 await trio.sleep(delay) |  | ||||||
| 
 |  | ||||||
|                             if task.name == 'sub_1': |  | ||||||
|                                 # trigger checkpoint to clean out other subs |  | ||||||
|                                 await trio.sleep(0.01) |  | ||||||
| 
 |  | ||||||
|                                 # the non-lagger got |  | ||||||
|                                 # a ``trio.EndOfChannel`` |  | ||||||
|                                 # because the ``tx`` below was closed |  | ||||||
|                                 assert len(lbrx._state.subs) == 1 |  | ||||||
| 
 |  | ||||||
|                                 await lbrx.aclose() |  | ||||||
| 
 |  | ||||||
|                                 assert len(lbrx._state.subs) == 0 |  | ||||||
| 
 |  | ||||||
|                         except trio.ClosedResourceError: |  | ||||||
|                             # only the fast sub will try to re-enter |  | ||||||
|                             # iteration on the now closed bcaster |  | ||||||
|                             assert task.name == 'sub_1' |  | ||||||
|                             return |  | ||||||
| 
 |  | ||||||
|                         except Lagged: |  | ||||||
|                             lag_time = time.time() - start |  | ||||||
|                             lags = laggers[task.name] |  | ||||||
|                             print( |  | ||||||
|                                 f'restarting slow task {task.name} ' |  | ||||||
|                                 f'that bailed out on {lags}:{value} ' |  | ||||||
|                                 f'after {lag_time:.3f}') |  | ||||||
|                             if lags <= retries: |  | ||||||
|                                 laggers[task.name] += 1 |  | ||||||
|                                 continue |  | ||||||
|                             else: |  | ||||||
|                                 print( |  | ||||||
|                                     f'{task.name} was too slow and terminated ' |  | ||||||
|                                     f'on {lags}:{value}') |  | ||||||
|                                 return |  | ||||||
| 
 |  | ||||||
|             async with trio.open_nursery() as nursery: |  | ||||||
| 
 |  | ||||||
|                 for i in range(1, num_laggers): |  | ||||||
| 
 |  | ||||||
|                     task_name = f'sub_{i}' |  | ||||||
|                     laggers[task_name] = 0 |  | ||||||
|                     nursery.start_soon( |  | ||||||
|                         partial( |  | ||||||
|                             sub_and_print, |  | ||||||
|                             delay=i*0.001, |  | ||||||
|                         ), |  | ||||||
|                         name=task_name, |  | ||||||
|                     ) |  | ||||||
| 
 |  | ||||||
|                 # allow subs to sched |  | ||||||
|                 await trio.sleep(0.1) |  | ||||||
| 
 |  | ||||||
|                 async with tx: |  | ||||||
|                     for i in cycle(range(size)): |  | ||||||
|                         await tx.send(i) |  | ||||||
|                         if len(brx._state.subs) == 2: |  | ||||||
|                             # only one, the non lagger, sub is left |  | ||||||
|                             break |  | ||||||
| 
 |  | ||||||
|                 # the non-lagger |  | ||||||
|                 assert laggers.pop('sub_1') == 0 |  | ||||||
| 
 |  | ||||||
|                 for n, v in laggers.items(): |  | ||||||
|                     assert v == 4 |  | ||||||
| 
 |  | ||||||
|                 assert tx._closed |  | ||||||
|                 assert not tx._state.open_send_channels |  | ||||||
| 
 |  | ||||||
|                 # check that "first" bcaster that we created |  | ||||||
|                 # above, never was iterated and is thus overrun |  | ||||||
|                 try: |  | ||||||
|                     await brx.receive() |  | ||||||
|                 except Lagged: |  | ||||||
|                     # expect tokio style index truncation |  | ||||||
|                     seq = brx._state.subs[brx.key] |  | ||||||
|                     assert seq == len(brx._state.queue) - 1 |  | ||||||
| 
 |  | ||||||
|                 # all backpressured entries in the underlying |  | ||||||
|                 # channel should have been copied into the caster |  | ||||||
|                 # queue trailing-window |  | ||||||
|                 async for i in rx: |  | ||||||
|                     print(f'bped: {i}') |  | ||||||
|                     assert i in brx._state.queue |  | ||||||
| 
 |  | ||||||
|                 # should be noop |  | ||||||
|                 await brx.aclose() |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_first_recver_is_cancelled(): |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         # make sure it all works within the runtime |  | ||||||
|         async with tractor.open_root_actor(): |  | ||||||
| 
 |  | ||||||
|             tx, rx = trio.open_memory_channel(1) |  | ||||||
|             brx = broadcast_receiver(rx, 1) |  | ||||||
|             cs = trio.CancelScope() |  | ||||||
| 
 |  | ||||||
|             async def sub_and_recv(): |  | ||||||
|                 with cs: |  | ||||||
|                     async with brx.subscribe() as bc: |  | ||||||
|                         async for value in bc: |  | ||||||
|                             print(value) |  | ||||||
| 
 |  | ||||||
|             async def cancel_and_send(): |  | ||||||
|                 await trio.sleep(0.2) |  | ||||||
|                 cs.cancel() |  | ||||||
|                 await tx.send(1) |  | ||||||
| 
 |  | ||||||
|             async with trio.open_nursery() as n: |  | ||||||
| 
 |  | ||||||
|                 n.start_soon(sub_and_recv) |  | ||||||
|                 await trio.sleep(0.1) |  | ||||||
|                 assert brx._state.recv_ready |  | ||||||
| 
 |  | ||||||
|                 n.start_soon(cancel_and_send) |  | ||||||
| 
 |  | ||||||
|                 # ensure that we don't hang because no-task is now |  | ||||||
|                 # waiting on the underlying receive.. |  | ||||||
|                 with trio.fail_after(0.5): |  | ||||||
|                     value = await brx.receive() |  | ||||||
|                     print(f'parent: {value}') |  | ||||||
|                     assert value == 1 |  | ||||||
| 
 |  | ||||||
|     trio.run(main) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def test_no_raise_on_lag(): |  | ||||||
|     ''' |  | ||||||
|     Run a simple 2-task broadcast where one task is slow but configured |  | ||||||
|     so that it does not raise `Lagged` on overruns using |  | ||||||
|     `raise_on_lasg=False` and verify that the task does not raise. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     size = 100 |  | ||||||
|     tx, rx = trio.open_memory_channel(size) |  | ||||||
|     brx = broadcast_receiver(rx, size) |  | ||||||
| 
 |  | ||||||
|     async def slow(): |  | ||||||
|         async with brx.subscribe( |  | ||||||
|             raise_on_lag=False, |  | ||||||
|         ) as br: |  | ||||||
|             async for msg in br: |  | ||||||
|                 print(f'slow task got: {msg}') |  | ||||||
|                 await trio.sleep(0.1) |  | ||||||
| 
 |  | ||||||
|     async def fast(): |  | ||||||
|         async with brx.subscribe() as br: |  | ||||||
|             async for msg in br: |  | ||||||
|                 print(f'fast task got: {msg}') |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
|         async with ( |  | ||||||
|             tractor.open_root_actor( |  | ||||||
|                 # NOTE: so we see the warning msg emitted by the bcaster |  | ||||||
|                 # internals when the no raise flag is set. |  | ||||||
|                 loglevel='warning', |  | ||||||
|             ), |  | ||||||
|             trio.open_nursery() as n, |  | ||||||
|         ): |  | ||||||
|             n.start_soon(slow) |  | ||||||
|             n.start_soon(fast) |  | ||||||
| 
 |  | ||||||
|             for i in range(1000): |  | ||||||
|                 await tx.send(i) |  | ||||||
| 
 |  | ||||||
|             # simulate user nailing ctl-c after realizing |  | ||||||
|             # there's a lag in the slow task. |  | ||||||
|             await trio.sleep(1) |  | ||||||
|             raise KeyboardInterrupt |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(KeyboardInterrupt): |  | ||||||
|         trio.run(main) |  | ||||||
|  | @ -1,82 +0,0 @@ | ||||||
| ''' |  | ||||||
| Reminders for oddities in `trio` that we need to stay aware of and/or |  | ||||||
| want to see changed. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| import pytest |  | ||||||
| import trio |  | ||||||
| from trio_typing import TaskStatus |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @pytest.mark.parametrize( |  | ||||||
|     'use_start_soon', [ |  | ||||||
|         pytest.param( |  | ||||||
|             True, |  | ||||||
|             marks=pytest.mark.xfail(reason="see python-trio/trio#2258") |  | ||||||
|         ), |  | ||||||
|         False, |  | ||||||
|     ] |  | ||||||
| ) |  | ||||||
| def test_stashed_child_nursery(use_start_soon): |  | ||||||
| 
 |  | ||||||
|     _child_nursery = None |  | ||||||
| 
 |  | ||||||
|     async def waits_on_signal( |  | ||||||
|         ev: trio.Event(), |  | ||||||
|         task_status: TaskStatus[trio.Nursery] = trio.TASK_STATUS_IGNORED, |  | ||||||
|     ): |  | ||||||
|         ''' |  | ||||||
|         Do some stuf, then signal other tasks, then yield back to "starter". |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         await ev.wait() |  | ||||||
|         task_status.started() |  | ||||||
| 
 |  | ||||||
|     async def mk_child_nursery( |  | ||||||
|         task_status: TaskStatus = trio.TASK_STATUS_IGNORED, |  | ||||||
|     ): |  | ||||||
|         ''' |  | ||||||
|         Allocate a child sub-nursery and stash it as a global. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         nonlocal _child_nursery |  | ||||||
| 
 |  | ||||||
|         async with trio.open_nursery() as cn: |  | ||||||
|             _child_nursery = cn |  | ||||||
|             task_status.started(cn) |  | ||||||
| 
 |  | ||||||
|             # block until cancelled by parent. |  | ||||||
|             await trio.sleep_forever() |  | ||||||
| 
 |  | ||||||
|     async def sleep_and_err( |  | ||||||
|         ev: trio.Event, |  | ||||||
|         task_status: TaskStatus = trio.TASK_STATUS_IGNORED, |  | ||||||
|     ): |  | ||||||
|         await trio.sleep(0.5) |  | ||||||
|         doggy()  # noqa |  | ||||||
|         ev.set() |  | ||||||
|         task_status.started() |  | ||||||
| 
 |  | ||||||
|     async def main(): |  | ||||||
| 
 |  | ||||||
|         async with ( |  | ||||||
|             trio.open_nursery() as pn, |  | ||||||
|         ): |  | ||||||
|             cn = await pn.start(mk_child_nursery) |  | ||||||
|             assert cn |  | ||||||
| 
 |  | ||||||
|             ev = trio.Event() |  | ||||||
| 
 |  | ||||||
|             if use_start_soon: |  | ||||||
|                 # this causes inf hang |  | ||||||
|                 cn.start_soon(sleep_and_err, ev) |  | ||||||
| 
 |  | ||||||
|             else: |  | ||||||
|                 # this does not. |  | ||||||
|                 await cn.start(sleep_and_err, ev) |  | ||||||
| 
 |  | ||||||
|             with trio.fail_after(1): |  | ||||||
|                 await cn.start(waits_on_signal, ev) |  | ||||||
| 
 |  | ||||||
|     with pytest.raises(NameError): |  | ||||||
|         trio.run(main) |  | ||||||
|  | @ -1,86 +1,39 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| tractor: structured concurrent "actors". | tractor: An actor model micro-framework built on | ||||||
| 
 |          ``trio`` and ``multiprocessing``. | ||||||
| """ | """ | ||||||
| from exceptiongroup import BaseExceptionGroup | from trio import MultiError | ||||||
| 
 | 
 | ||||||
| from ._clustering import open_actor_cluster |  | ||||||
| from ._ipc import Channel | from ._ipc import Channel | ||||||
| from ._streaming import ( | from ._streaming import Context, stream | ||||||
|     Context, | from ._discovery import get_arbiter, find_actor, wait_for_actor | ||||||
|     MsgStream, | from ._trionics import open_nursery | ||||||
|     stream, | from ._state import current_actor, is_root_process | ||||||
|     context, | from ._exceptions import RemoteActorError, ModuleNotExposed | ||||||
| ) | from ._debug import breakpoint, post_mortem | ||||||
| from ._discovery import ( |  | ||||||
|     get_arbiter, |  | ||||||
|     find_actor, |  | ||||||
|     wait_for_actor, |  | ||||||
|     query_actor, |  | ||||||
| ) |  | ||||||
| from ._supervise import open_nursery |  | ||||||
| from ._state import ( |  | ||||||
|     current_actor, |  | ||||||
|     is_root_process, |  | ||||||
| ) |  | ||||||
| from ._exceptions import ( |  | ||||||
|     RemoteActorError, |  | ||||||
|     ModuleNotExposed, |  | ||||||
|     ContextCancelled, |  | ||||||
| ) |  | ||||||
| from ._debug import ( |  | ||||||
|     breakpoint, |  | ||||||
|     post_mortem, |  | ||||||
| ) |  | ||||||
| from . import msg | from . import msg | ||||||
| from ._root import ( | from ._root import run, run_daemon, open_root_actor | ||||||
|     run_daemon, |  | ||||||
|     open_root_actor, |  | ||||||
| ) |  | ||||||
| from ._portal import Portal |  | ||||||
| from ._runtime import Actor |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| __all__ = [ | __all__ = [ | ||||||
|     'Actor', |  | ||||||
|     'Channel', |     'Channel', | ||||||
|     'Context', |     'Context', | ||||||
|     'ContextCancelled', |  | ||||||
|     'ModuleNotExposed', |     'ModuleNotExposed', | ||||||
|     'MsgStream', |     'MultiError', | ||||||
|     'BaseExceptionGroup', |  | ||||||
|     'Portal', |  | ||||||
|     'RemoteActorError', |     'RemoteActorError', | ||||||
|     'breakpoint', |     'breakpoint', | ||||||
|     'context', |  | ||||||
|     'current_actor', |     'current_actor', | ||||||
|     'find_actor', |     'find_actor', | ||||||
|     'get_arbiter', |     'get_arbiter', | ||||||
|     'is_root_process', |     'is_root_process', | ||||||
|     'msg', |     'msg', | ||||||
|     'open_actor_cluster', |  | ||||||
|     'open_nursery', |     'open_nursery', | ||||||
|     'open_root_actor', |     'open_root_actor', | ||||||
|     'post_mortem', |     'post_mortem', | ||||||
|     'query_actor', |     'run', | ||||||
|     'run_daemon', |     'run_daemon', | ||||||
|     'stream', |     'stream', | ||||||
|  |     'wait_for_actor', | ||||||
|     'to_asyncio', |     'to_asyncio', | ||||||
|     'wait_for_actor', |     'wait_for_actor', | ||||||
| ] | ] | ||||||
|  |  | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -1,22 +1,4 @@ | ||||||
| # tractor: structured concurrent "actors". | """This is the "bootloader" for actors started using the native trio backend. | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ |  | ||||||
| This is the "bootloader" for actors started using the native trio backend. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| import sys | import sys | ||||||
| import trio | import trio | ||||||
|  | @ -24,7 +6,7 @@ import argparse | ||||||
| 
 | 
 | ||||||
| from ast import literal_eval | from ast import literal_eval | ||||||
| 
 | 
 | ||||||
| from ._runtime import Actor | from ._actor import Actor | ||||||
| from ._entry import _trio_main | from ._entry import _trio_main | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -37,15 +19,12 @@ def parse_ipaddr(arg): | ||||||
|     return (str(host), int(port)) |     return (str(host), int(port)) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| from ._entry import _trio_main |  | ||||||
| 
 |  | ||||||
| if __name__ == "__main__": | if __name__ == "__main__": | ||||||
| 
 | 
 | ||||||
|     parser = argparse.ArgumentParser() |     parser = argparse.ArgumentParser() | ||||||
|     parser.add_argument("--uid", type=parse_uid) |     parser.add_argument("--uid", type=parse_uid) | ||||||
|     parser.add_argument("--loglevel", type=str) |     parser.add_argument("--loglevel", type=str) | ||||||
|     parser.add_argument("--parent_addr", type=parse_ipaddr) |     parser.add_argument("--parent_addr", type=parse_ipaddr) | ||||||
|     parser.add_argument("--asyncio", action='store_true') |  | ||||||
|     args = parser.parse_args() |     args = parser.parse_args() | ||||||
| 
 | 
 | ||||||
|     subactor = Actor( |     subactor = Actor( | ||||||
|  | @ -57,6 +36,5 @@ if __name__ == "__main__": | ||||||
| 
 | 
 | ||||||
|     _trio_main( |     _trio_main( | ||||||
|         subactor, |         subactor, | ||||||
|         parent_addr=args.parent_addr, |         parent_addr=args.parent_addr | ||||||
|         infect_asyncio=args.asyncio, |  | ||||||
|     ) |     ) | ||||||
|  | @ -1,74 +0,0 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| Actor cluster helpers. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| from __future__ import annotations |  | ||||||
| 
 |  | ||||||
| from contextlib import asynccontextmanager as acm |  | ||||||
| from multiprocessing import cpu_count |  | ||||||
| from typing import AsyncGenerator, Optional |  | ||||||
| 
 |  | ||||||
| import trio |  | ||||||
| import tractor |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @acm |  | ||||||
| async def open_actor_cluster( |  | ||||||
|     modules: list[str], |  | ||||||
|     count: int = cpu_count(), |  | ||||||
|     names: list[str] | None = None, |  | ||||||
|     hard_kill: bool = False, |  | ||||||
| 
 |  | ||||||
|     # passed through verbatim to ``open_root_actor()`` |  | ||||||
|     **runtime_kwargs, |  | ||||||
| 
 |  | ||||||
| ) -> AsyncGenerator[ |  | ||||||
|     dict[str, tractor.Portal], |  | ||||||
|     None, |  | ||||||
| ]: |  | ||||||
| 
 |  | ||||||
|     portals: dict[str, tractor.Portal] = {} |  | ||||||
| 
 |  | ||||||
|     if not names: |  | ||||||
|         names = [f'worker_{i}' for i in range(count)] |  | ||||||
| 
 |  | ||||||
|     if not len(names) == count: |  | ||||||
|         raise ValueError( |  | ||||||
|             'Number of names is {len(names)} but count it {count}') |  | ||||||
| 
 |  | ||||||
|     async with tractor.open_nursery( |  | ||||||
|         **runtime_kwargs, |  | ||||||
|     ) as an: |  | ||||||
|         async with trio.open_nursery() as n: |  | ||||||
|             uid = tractor.current_actor().uid |  | ||||||
| 
 |  | ||||||
|             async def _start(name: str) -> None: |  | ||||||
|                 name = f'{uid[0]}.{name}' |  | ||||||
|                 portals[name] = await an.start_actor( |  | ||||||
|                     enable_modules=modules, |  | ||||||
|                     name=name, |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|             for name in names: |  | ||||||
|                 n.start_soon(_start, name) |  | ||||||
| 
 |  | ||||||
|         assert len(portals) == count |  | ||||||
|         yield portals |  | ||||||
| 
 |  | ||||||
|         await an.cancel(hard_kill=hard_kill) |  | ||||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -1,29 +1,9 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| Actor discovery API. | Actor discovery API. | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| from typing import ( | import typing | ||||||
|     Optional, | from typing import Tuple, Optional, Union | ||||||
|     Union, | from async_generator import asynccontextmanager | ||||||
|     AsyncGenerator, |  | ||||||
| ) |  | ||||||
| from contextlib import asynccontextmanager as acm |  | ||||||
| 
 | 
 | ||||||
| from ._ipc import _connect_chan, Channel | from ._ipc import _connect_chan, Channel | ||||||
| from ._portal import ( | from ._portal import ( | ||||||
|  | @ -34,16 +14,14 @@ from ._portal import ( | ||||||
| from ._state import current_actor, _runtime_vars | from ._state import current_actor, _runtime_vars | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @acm | @asynccontextmanager | ||||||
| async def get_arbiter( | async def get_arbiter( | ||||||
| 
 |  | ||||||
|     host: str, |     host: str, | ||||||
|     port: int, |     port: int, | ||||||
| 
 | ) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]: | ||||||
| ) -> AsyncGenerator[Union[Portal, LocalPortal], None]: |     """Return a portal instance connected to a local or remote | ||||||
|     '''Return a portal instance connected to a local or remote |  | ||||||
|     arbiter. |     arbiter. | ||||||
|     ''' |     """ | ||||||
|     actor = current_actor() |     actor = current_actor() | ||||||
| 
 | 
 | ||||||
|     if not actor: |     if not actor: | ||||||
|  | @ -55,75 +33,39 @@ async def get_arbiter( | ||||||
|         yield LocalPortal(actor, Channel((host, port))) |         yield LocalPortal(actor, Channel((host, port))) | ||||||
|     else: |     else: | ||||||
|         async with _connect_chan(host, port) as chan: |         async with _connect_chan(host, port) as chan: | ||||||
| 
 |  | ||||||
|             async with open_portal(chan) as arb_portal: |             async with open_portal(chan) as arb_portal: | ||||||
| 
 |  | ||||||
|                 yield arb_portal |                 yield arb_portal | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @acm | @asynccontextmanager | ||||||
| async def get_root( | async def get_root( | ||||||
|     **kwargs, | **kwargs, | ||||||
| ) -> AsyncGenerator[Portal, None]: | ) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]: | ||||||
| 
 |  | ||||||
|     host, port = _runtime_vars['_root_mailbox'] |     host, port = _runtime_vars['_root_mailbox'] | ||||||
|     assert host is not None |     assert host is not None | ||||||
| 
 |  | ||||||
|     async with _connect_chan(host, port) as chan: |     async with _connect_chan(host, port) as chan: | ||||||
|         async with open_portal(chan, **kwargs) as portal: |         async with open_portal(chan, **kwargs) as portal: | ||||||
|             yield portal |             yield portal | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @acm | @asynccontextmanager | ||||||
| async def query_actor( | async def find_actor( | ||||||
|     name: str, |     name: str, | ||||||
|     arbiter_sockaddr: Optional[tuple[str, int]] = None, |     arbiter_sockaddr: Tuple[str, int] = None | ||||||
|  | ) -> typing.AsyncGenerator[Optional[Portal], None]: | ||||||
|  |     """Ask the arbiter to find actor(s) by name. | ||||||
| 
 | 
 | ||||||
| ) -> AsyncGenerator[tuple[str, int], None]: |     Returns a connected portal to the last registered matching actor | ||||||
|     ''' |     known to the arbiter. | ||||||
|     Simple address lookup for a given actor name. |     """ | ||||||
| 
 |  | ||||||
|     Returns the (socket) address or ``None``. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     actor = current_actor() |     actor = current_actor() | ||||||
|     async with get_arbiter( |     async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal: | ||||||
|         *arbiter_sockaddr or actor._arb_addr |         sockaddr = await arb_portal.run_from_ns('self', 'find_actor', name=name) | ||||||
|     ) as arb_portal: |  | ||||||
| 
 |  | ||||||
|         sockaddr = await arb_portal.run_from_ns( |  | ||||||
|             'self', |  | ||||||
|             'find_actor', |  | ||||||
|             name=name, |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         # TODO: return portals to all available actors - for now just |         # TODO: return portals to all available actors - for now just | ||||||
|         # the last one that registered |         # the last one that registered | ||||||
|         if name == 'arbiter' and actor.is_arbiter: |         if name == 'arbiter' and actor.is_arbiter: | ||||||
|             raise RuntimeError("The current actor is the arbiter") |             raise RuntimeError("The current actor is the arbiter") | ||||||
| 
 |         elif sockaddr: | ||||||
|         yield sockaddr if sockaddr else None |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @acm |  | ||||||
| async def find_actor( |  | ||||||
|     name: str, |  | ||||||
|     arbiter_sockaddr: tuple[str, int] | None = None |  | ||||||
| 
 |  | ||||||
| ) -> AsyncGenerator[Optional[Portal], None]: |  | ||||||
|     ''' |  | ||||||
|     Ask the arbiter to find actor(s) by name. |  | ||||||
| 
 |  | ||||||
|     Returns a connected portal to the last registered matching actor |  | ||||||
|     known to the arbiter. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     async with query_actor( |  | ||||||
|         name=name, |  | ||||||
|         arbiter_sockaddr=arbiter_sockaddr, |  | ||||||
|     ) as sockaddr: |  | ||||||
| 
 |  | ||||||
|         if sockaddr: |  | ||||||
|             async with _connect_chan(*sockaddr) as chan: |             async with _connect_chan(*sockaddr) as chan: | ||||||
|                 async with open_portal(chan) as portal: |                 async with open_portal(chan) as portal: | ||||||
|                     yield portal |                     yield portal | ||||||
|  | @ -131,27 +73,19 @@ async def find_actor( | ||||||
|             yield None |             yield None | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @acm | @asynccontextmanager | ||||||
| async def wait_for_actor( | async def wait_for_actor( | ||||||
|     name: str, |     name: str, | ||||||
|     arbiter_sockaddr: tuple[str, int] | None = None |     arbiter_sockaddr: Tuple[str, int] = None | ||||||
| ) -> AsyncGenerator[Portal, None]: | ) -> typing.AsyncGenerator[Portal, None]: | ||||||
|     """Wait on an actor to register with the arbiter. |     """Wait on an actor to register with the arbiter. | ||||||
| 
 | 
 | ||||||
|     A portal to the first registered actor is returned. |     A portal to the first registered actor is returned. | ||||||
|     """ |     """ | ||||||
|     actor = current_actor() |     actor = current_actor() | ||||||
| 
 |     async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal: | ||||||
|     async with get_arbiter( |         sockaddrs = await arb_portal.run_from_ns('self', 'wait_for_actor', name=name) | ||||||
|         *arbiter_sockaddr or actor._arb_addr, |  | ||||||
|     ) as arb_portal: |  | ||||||
|         sockaddrs = await arb_portal.run_from_ns( |  | ||||||
|             'self', |  | ||||||
|             'wait_for_actor', |  | ||||||
|             name=name, |  | ||||||
|         ) |  | ||||||
|         sockaddr = sockaddrs[-1] |         sockaddr = sockaddrs[-1] | ||||||
| 
 |  | ||||||
|         async with _connect_chan(*sockaddr) as chan: |         async with _connect_chan(*sockaddr) as chan: | ||||||
|             async with open_portal(chan) as portal: |             async with open_portal(chan) as portal: | ||||||
|                 yield portal |                 yield portal | ||||||
|  |  | ||||||
|  | @ -1,64 +1,28 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| Sub-process entry points. | Sub-process entry points. | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| from __future__ import annotations |  | ||||||
| from functools import partial | from functools import partial | ||||||
| from typing import ( | from typing import Tuple, Any | ||||||
|     Any, | import signal | ||||||
|     TYPE_CHECKING, |  | ||||||
| ) |  | ||||||
| 
 | 
 | ||||||
| import trio  # type: ignore | import trio  # type: ignore | ||||||
| 
 | 
 | ||||||
| from .log import ( | from .log import get_console_log, get_logger | ||||||
|     get_console_log, |  | ||||||
|     get_logger, |  | ||||||
| ) |  | ||||||
| from . import _state | from . import _state | ||||||
| from .to_asyncio import run_as_asyncio_guest |  | ||||||
| from ._runtime import ( |  | ||||||
|     async_main, |  | ||||||
|     Actor, |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| if TYPE_CHECKING: |  | ||||||
|     from ._spawn import SpawnMethodKey |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| log = get_logger(__name__) | log = get_logger(__name__) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _mp_main( | def _mp_main( | ||||||
| 
 |     actor: 'Actor',  # type: ignore | ||||||
|     actor: Actor,  # type: ignore |     accept_addr: Tuple[str, int], | ||||||
|     accept_addr: tuple[str, int], |     forkserver_info: Tuple[Any, Any, Any, Any, Any], | ||||||
|     forkserver_info: tuple[Any, Any, Any, Any, Any], |     start_method: str, | ||||||
|     start_method: SpawnMethodKey, |     parent_addr: Tuple[str, int] = None, | ||||||
|     parent_addr: tuple[str, int] | None = None, |  | ||||||
|     infect_asyncio: bool = False, |  | ||||||
| 
 |  | ||||||
| ) -> None: | ) -> None: | ||||||
|     ''' |     """The routine called *after fork* which invokes a fresh ``trio.run`` | ||||||
|     The routine called *after fork* which invokes a fresh ``trio.run`` |     """ | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     actor._forkserver_info = forkserver_info |     actor._forkserver_info = forkserver_info | ||||||
|     from ._spawn import try_set_start_method |     from ._spawn import try_set_start_method | ||||||
|     spawn_ctx = try_set_start_method(start_method) |     spawn_ctx = try_set_start_method(start_method) | ||||||
|  | @ -76,16 +40,11 @@ def _mp_main( | ||||||
| 
 | 
 | ||||||
|     log.debug(f"parent_addr is {parent_addr}") |     log.debug(f"parent_addr is {parent_addr}") | ||||||
|     trio_main = partial( |     trio_main = partial( | ||||||
|         async_main, |         actor._async_main, | ||||||
|         actor, |  | ||||||
|         accept_addr, |         accept_addr, | ||||||
|         parent_addr=parent_addr |         parent_addr=parent_addr | ||||||
|     ) |     ) | ||||||
|     try: |     try: | ||||||
|         if infect_asyncio: |  | ||||||
|             actor._infected_aio = True |  | ||||||
|             run_as_asyncio_guest(trio_main) |  | ||||||
|         else: |  | ||||||
|         trio.run(trio_main) |         trio.run(trio_main) | ||||||
|     except KeyboardInterrupt: |     except KeyboardInterrupt: | ||||||
|         pass  # handle it the same way trio does? |         pass  # handle it the same way trio does? | ||||||
|  | @ -95,17 +54,16 @@ def _mp_main( | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _trio_main( | def _trio_main( | ||||||
| 
 |     actor: 'Actor',  # type: ignore | ||||||
|     actor: Actor,  # type: ignore |  | ||||||
|     *, |     *, | ||||||
|     parent_addr: tuple[str, int] | None = None, |     parent_addr: Tuple[str, int] = None, | ||||||
|     infect_asyncio: bool = False, |  | ||||||
| 
 |  | ||||||
| ) -> None: | ) -> None: | ||||||
|     ''' |     """Entry point for a `trio_run_in_process` subactor. | ||||||
|     Entry point for a `trio_run_in_process` subactor. |     """ | ||||||
|  |     # Disable sigint handling in children; | ||||||
|  |     # we don't need it thanks to our cancellation machinery. | ||||||
|  |     signal.signal(signal.SIGINT, signal.SIG_IGN) | ||||||
| 
 | 
 | ||||||
|     ''' |  | ||||||
|     log.info(f"Started new trio process for {actor.uid}") |     log.info(f"Started new trio process for {actor.uid}") | ||||||
| 
 | 
 | ||||||
|     if actor.loglevel is not None: |     if actor.loglevel is not None: | ||||||
|  | @ -120,16 +78,11 @@ def _trio_main( | ||||||
| 
 | 
 | ||||||
|     log.debug(f"parent_addr is {parent_addr}") |     log.debug(f"parent_addr is {parent_addr}") | ||||||
|     trio_main = partial( |     trio_main = partial( | ||||||
|         async_main, |         actor._async_main, | ||||||
|         actor, |  | ||||||
|         parent_addr=parent_addr |         parent_addr=parent_addr | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     try: |     try: | ||||||
|         if infect_asyncio: |  | ||||||
|             actor._infected_aio = True |  | ||||||
|             run_as_asyncio_guest(trio_main) |  | ||||||
|         else: |  | ||||||
|         trio.run(trio_main) |         trio.run(trio_main) | ||||||
|     except KeyboardInterrupt: |     except KeyboardInterrupt: | ||||||
|         log.warning(f"Actor {actor.uid} received KBI") |         log.warning(f"Actor {actor.uid} received KBI") | ||||||
|  |  | ||||||
|  | @ -1,58 +1,36 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| Our classy exception set. | Our classy exception set. | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| from typing import ( | from typing import Dict, Any | ||||||
|     Any, |  | ||||||
|     Optional, |  | ||||||
|     Type, |  | ||||||
| ) |  | ||||||
| import importlib | import importlib | ||||||
| import builtins | import builtins | ||||||
| import traceback | import traceback | ||||||
| 
 | 
 | ||||||
| import exceptiongroup as eg |  | ||||||
| import trio | import trio | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| _this_mod = importlib.import_module(__name__) | _this_mod = importlib.import_module(__name__) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class ActorFailure(Exception): |  | ||||||
|     "General actor failure" |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class RemoteActorError(Exception): | class RemoteActorError(Exception): | ||||||
|     # TODO: local recontruction of remote exception deats |     # TODO: local recontruction of remote exception deats | ||||||
|     "Remote actor exception bundled locally" |     "Remote actor exception bundled locally" | ||||||
|     def __init__( |     def __init__(self, message, type_str, **msgdata) -> None: | ||||||
|         self, |  | ||||||
|         message: str, |  | ||||||
|         suberror_type: Optional[Type[BaseException]] = None, |  | ||||||
|         **msgdata |  | ||||||
| 
 |  | ||||||
|     ) -> None: |  | ||||||
|         super().__init__(message) |         super().__init__(message) | ||||||
|  |         for ns in [builtins, _this_mod, trio]: | ||||||
|  |             try: | ||||||
|  |                 self.type = getattr(ns, type_str) | ||||||
|  |                 break | ||||||
|  |             except AttributeError: | ||||||
|  |                 continue | ||||||
|  |         else: | ||||||
|  |             self.type = Exception | ||||||
| 
 | 
 | ||||||
|         self.type = suberror_type |  | ||||||
|         self.msgdata = msgdata |         self.msgdata = msgdata | ||||||
| 
 | 
 | ||||||
|  |     # TODO: a trio.MultiError.catch like context manager | ||||||
|  |     # for catching underlying remote errors of a particular type | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| class InternalActorError(RemoteActorError): | class InternalActorError(RemoteActorError): | ||||||
|     """Remote internal ``tractor`` error indicating |     """Remote internal ``tractor`` error indicating | ||||||
|  | @ -60,14 +38,6 @@ class InternalActorError(RemoteActorError): | ||||||
|     """ |     """ | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class TransportClosed(trio.ClosedResourceError): |  | ||||||
|     "Underlying channel transport was closed prior to use" |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class ContextCancelled(RemoteActorError): |  | ||||||
|     "Inter-actor task context cancelled itself on the callee side." |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class NoResult(RuntimeError): | class NoResult(RuntimeError): | ||||||
|     "No final result is expected for this actor" |     "No final result is expected for this actor" | ||||||
| 
 | 
 | ||||||
|  | @ -76,102 +46,40 @@ class ModuleNotExposed(ModuleNotFoundError): | ||||||
|     "The requested module is not exposed for RPC" |     "The requested module is not exposed for RPC" | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class NoRuntime(RuntimeError): | def pack_error(exc: BaseException) -> Dict[str, Any]: | ||||||
|     "The root actor has not been initialized yet" |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class StreamOverrun(trio.TooSlowError): |  | ||||||
|     "This stream was overrun by sender" |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class AsyncioCancelled(Exception): |  | ||||||
|     ''' |  | ||||||
|     Asyncio cancelled translation (non-base) error |  | ||||||
|     for use with the ``to_asyncio`` module |  | ||||||
|     to be raised in the ``trio`` side task |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def pack_error( |  | ||||||
|     exc: BaseException, |  | ||||||
|     tb=None, |  | ||||||
| 
 |  | ||||||
| ) -> dict[str, Any]: |  | ||||||
|     """Create an "error message" for tranmission over |     """Create an "error message" for tranmission over | ||||||
|     a channel (aka the wire). |     a channel (aka the wire). | ||||||
|     """ |     """ | ||||||
|     if tb: |  | ||||||
|         tb_str = ''.join(traceback.format_tb(tb)) |  | ||||||
|     else: |  | ||||||
|         tb_str = traceback.format_exc() |  | ||||||
| 
 |  | ||||||
|     return { |     return { | ||||||
|         'error': { |         'error': { | ||||||
|             'tb_str': tb_str, |             'tb_str': traceback.format_exc(), | ||||||
|             'type_str': type(exc).__name__, |             'type_str': type(exc).__name__, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def unpack_error( | def unpack_error( | ||||||
| 
 |     msg: Dict[str, Any], | ||||||
|     msg: dict[str, Any], |  | ||||||
|     chan=None, |     chan=None, | ||||||
|     err_type=RemoteActorError |     err_type=RemoteActorError | ||||||
| 
 |  | ||||||
| ) -> Exception: | ) -> Exception: | ||||||
|     ''' |     """Unpack an 'error' message from the wire | ||||||
|     Unpack an 'error' message from the wire |  | ||||||
|     into a local ``RemoteActorError``. |     into a local ``RemoteActorError``. | ||||||
| 
 |     """ | ||||||
|     ''' |     tb_str = msg['error'].get('tb_str', '') | ||||||
|     __tracebackhide__ = True |     return err_type( | ||||||
|     error = msg['error'] |         f"{chan.uid}\n" + tb_str, | ||||||
| 
 |  | ||||||
|     tb_str = error.get('tb_str', '') |  | ||||||
|     message = f"{chan.uid}\n" + tb_str |  | ||||||
|     type_name = error['type_str'] |  | ||||||
|     suberror_type: Type[BaseException] = Exception |  | ||||||
| 
 |  | ||||||
|     if type_name == 'ContextCancelled': |  | ||||||
|         err_type = ContextCancelled |  | ||||||
|         suberror_type = trio.Cancelled |  | ||||||
| 
 |  | ||||||
|     else:  # try to lookup a suitable local error type |  | ||||||
|         for ns in [ |  | ||||||
|             builtins, |  | ||||||
|             _this_mod, |  | ||||||
|             eg, |  | ||||||
|             trio, |  | ||||||
|         ]: |  | ||||||
|             try: |  | ||||||
|                 suberror_type = getattr(ns, type_name) |  | ||||||
|                 break |  | ||||||
|             except AttributeError: |  | ||||||
|                 continue |  | ||||||
| 
 |  | ||||||
|     exc = err_type( |  | ||||||
|         message, |  | ||||||
|         suberror_type=suberror_type, |  | ||||||
| 
 |  | ||||||
|         # unpack other fields into error type init |  | ||||||
|         **msg['error'], |         **msg['error'], | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|     return exc |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| def is_multi_cancelled(exc: BaseException) -> bool: | def is_multi_cancelled(exc: BaseException) -> bool: | ||||||
|     ''' |     """Predicate to determine if a ``trio.MultiError`` contains only | ||||||
|     Predicate to determine if a possible ``eg.BaseExceptionGroup`` contains |     ``trio.Cancelled`` sub-exceptions (and is likely the result of | ||||||
|     only ``trio.Cancelled`` sub-exceptions (and is likely the result of |  | ||||||
|     cancelling a collection of subtasks. |     cancelling a collection of subtasks. | ||||||
| 
 | 
 | ||||||
|     ''' |     """ | ||||||
|     if isinstance(exc, eg.BaseExceptionGroup): |     return not trio.MultiError.filter( | ||||||
|         return exc.subgroup( |         lambda exc: exc if not isinstance(exc, trio.Cancelled) else None, | ||||||
|             lambda exc: isinstance(exc, trio.Cancelled) |         exc, | ||||||
|         ) is not None |     ) | ||||||
| 
 |  | ||||||
|     return False |  | ||||||
|  |  | ||||||
|  | @ -1,19 +1,3 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| This is near-copy of the 3.8 stdlib's ``multiprocessing.forkserver.py`` | This is near-copy of the 3.8 stdlib's ``multiprocessing.forkserver.py`` | ||||||
| with some hackery to prevent any more then a single forkserver and | with some hackery to prevent any more then a single forkserver and | ||||||
|  |  | ||||||
							
								
								
									
										477
									
								
								tractor/_ipc.py
								
								
								
								
							
							
						
						
									
										477
									
								
								tractor/_ipc.py
								
								
								
								
							|  | @ -1,240 +1,84 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| Inter-process comms abstractions | Inter-process comms abstractions | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| from __future__ import annotations |  | ||||||
| import platform |  | ||||||
| import struct |  | ||||||
| import typing | import typing | ||||||
| from collections.abc import ( | from typing import Any, Tuple, Optional | ||||||
|     AsyncGenerator, | from functools import partial | ||||||
|     AsyncIterator, | import inspect | ||||||
| ) |  | ||||||
| from typing import ( |  | ||||||
|     Any, |  | ||||||
|     runtime_checkable, |  | ||||||
|     Optional, |  | ||||||
|     Protocol, |  | ||||||
|     Type, |  | ||||||
|     TypeVar, |  | ||||||
| ) |  | ||||||
| 
 | 
 | ||||||
| from tricycle import BufferedReceiveStream | import msgpack | ||||||
| import msgspec |  | ||||||
| import trio | import trio | ||||||
| from async_generator import asynccontextmanager | from async_generator import asynccontextmanager | ||||||
| 
 | 
 | ||||||
| from .log import get_logger | from .log import get_logger | ||||||
| from ._exceptions import TransportClosed | log = get_logger('ipc') | ||||||
| log = get_logger(__name__) | 
 | ||||||
|  | # :eyeroll: | ||||||
|  | try: | ||||||
|  |     import msgpack_numpy | ||||||
|  |     Unpacker = msgpack_numpy.Unpacker | ||||||
|  | except ImportError: | ||||||
|  |     # just plain ``msgpack`` requires tweaking key settings | ||||||
|  |     Unpacker = partial(msgpack.Unpacker, strict_map_key=False) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| _is_windows = platform.system() == 'Windows' | class MsgpackStream: | ||||||
| log = get_logger(__name__) |     """A ``trio.SocketStream`` delivering ``msgpack`` formatted data. | ||||||
| 
 |     """ | ||||||
| 
 |  | ||||||
| def get_stream_addrs(stream: trio.SocketStream) -> tuple: |  | ||||||
|     # should both be IP sockets |  | ||||||
|     lsockname = stream.socket.getsockname() |  | ||||||
|     rsockname = stream.socket.getpeername() |  | ||||||
|     return ( |  | ||||||
|         tuple(lsockname[:2]), |  | ||||||
|         tuple(rsockname[:2]), |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| MsgType = TypeVar("MsgType") |  | ||||||
| 
 |  | ||||||
| # TODO: consider using a generic def and indexing with our eventual |  | ||||||
| # msg definition/types? |  | ||||||
| # - https://docs.python.org/3/library/typing.html#typing.Protocol |  | ||||||
| # - https://jcristharif.com/msgspec/usage.html#structs |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @runtime_checkable |  | ||||||
| class MsgTransport(Protocol[MsgType]): |  | ||||||
| 
 |  | ||||||
|     stream: trio.SocketStream |  | ||||||
|     drained: list[MsgType] |  | ||||||
| 
 |  | ||||||
|     def __init__(self, stream: trio.SocketStream) -> None: |     def __init__(self, stream: trio.SocketStream) -> None: | ||||||
|         ... |  | ||||||
| 
 |  | ||||||
|     # XXX: should this instead be called `.sendall()`? |  | ||||||
|     async def send(self, msg: MsgType) -> None: |  | ||||||
|         ... |  | ||||||
| 
 |  | ||||||
|     async def recv(self) -> MsgType: |  | ||||||
|         ... |  | ||||||
| 
 |  | ||||||
|     def __aiter__(self) -> MsgType: |  | ||||||
|         ... |  | ||||||
| 
 |  | ||||||
|     def connected(self) -> bool: |  | ||||||
|         ... |  | ||||||
| 
 |  | ||||||
|     # defining this sync otherwise it causes a mypy error because it |  | ||||||
|     # can't figure out it's a generator i guess?..? |  | ||||||
|     def drain(self) -> AsyncIterator[dict]: |  | ||||||
|         ... |  | ||||||
| 
 |  | ||||||
|     @property |  | ||||||
|     def laddr(self) -> tuple[str, int]: |  | ||||||
|         ... |  | ||||||
| 
 |  | ||||||
|     @property |  | ||||||
|     def raddr(self) -> tuple[str, int]: |  | ||||||
|         ... |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # TODO: not sure why we have to inherit here, but it seems to be an |  | ||||||
| # issue with ``get_msg_transport()`` returning a ``Type[Protocol]``; |  | ||||||
| # probably should make a `mypy` issue? |  | ||||||
| class MsgpackTCPStream(MsgTransport): |  | ||||||
|     ''' |  | ||||||
|     A ``trio.SocketStream`` delivering ``msgpack`` formatted data |  | ||||||
|     using the ``msgspec`` codec lib. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     def __init__( |  | ||||||
|         self, |  | ||||||
|         stream: trio.SocketStream, |  | ||||||
|         prefix_size: int = 4, |  | ||||||
| 
 |  | ||||||
|     ) -> None: |  | ||||||
| 
 |  | ||||||
|         self.stream = stream |         self.stream = stream | ||||||
|         assert self.stream.socket |         assert self.stream.socket | ||||||
| 
 |  | ||||||
|         # should both be IP sockets |         # should both be IP sockets | ||||||
|         self._laddr, self._raddr = get_stream_addrs(stream) |         lsockname = stream.socket.getsockname() | ||||||
|  |         assert isinstance(lsockname, tuple) | ||||||
|  |         self._laddr = lsockname[:2] | ||||||
|  |         rsockname = stream.socket.getpeername() | ||||||
|  |         assert isinstance(rsockname, tuple) | ||||||
|  |         self._raddr = rsockname[:2] | ||||||
| 
 | 
 | ||||||
|         # create read loop instance |  | ||||||
|         self._agen = self._iter_packets() |         self._agen = self._iter_packets() | ||||||
|         self._send_lock = trio.StrictFIFOLock() |         self._send_lock = trio.StrictFIFOLock() | ||||||
| 
 | 
 | ||||||
|         # public i guess? |     async def _iter_packets(self) -> typing.AsyncGenerator[dict, None]: | ||||||
|         self.drained: list[dict] = [] |         """Yield packets from the underlying stream. | ||||||
| 
 |         """ | ||||||
|         self.recv_stream = BufferedReceiveStream(transport_stream=stream) |         unpacker = Unpacker( | ||||||
|         self.prefix_size = prefix_size |             raw=False, | ||||||
| 
 |             use_list=False, | ||||||
|         # TODO: struct aware messaging coders |         ) | ||||||
|         self.encode = msgspec.msgpack.Encoder().encode |  | ||||||
|         self.decode = msgspec.msgpack.Decoder().decode  # dict[str, Any]) |  | ||||||
| 
 |  | ||||||
|     async def _iter_packets(self) -> AsyncGenerator[dict, None]: |  | ||||||
|         '''Yield packets from the underlying stream. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         import msgspec  # noqa |  | ||||||
|         decodes_failed: int = 0 |  | ||||||
| 
 |  | ||||||
|         while True: |         while True: | ||||||
|             try: |             try: | ||||||
|                 header = await self.recv_stream.receive_exactly(4) |                 data = await self.stream.receive_some(2**10) | ||||||
|  |                 log.trace(f"received {data}")  # type: ignore | ||||||
|  |             except trio.BrokenResourceError: | ||||||
|  |                 log.warning(f"Stream connection {self.raddr} broke") | ||||||
|  |                 return | ||||||
| 
 | 
 | ||||||
|             except ( |             if data == b'': | ||||||
|                 ValueError, |                 log.debug(f"Stream connection {self.raddr} was closed") | ||||||
|                 ConnectionResetError, |                 return | ||||||
| 
 | 
 | ||||||
|                 # not sure entirely why we need this but without it we |             unpacker.feed(data) | ||||||
|                 # seem to be getting racy failures here on |             for packet in unpacker: | ||||||
|                 # arbiter/registry name subs.. |                 yield packet | ||||||
|                 trio.BrokenResourceError, |  | ||||||
|             ): |  | ||||||
|                 raise TransportClosed( |  | ||||||
|                     f'transport {self} was already closed prior ro read' |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|             if header == b'': |  | ||||||
|                 raise TransportClosed( |  | ||||||
|                     f'transport {self} was already closed prior ro read' |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|             size, = struct.unpack("<I", header) |  | ||||||
| 
 |  | ||||||
|             log.transport(f'received header {size}')  # type: ignore |  | ||||||
| 
 |  | ||||||
|             msg_bytes = await self.recv_stream.receive_exactly(size) |  | ||||||
| 
 |  | ||||||
|             log.transport(f"received {msg_bytes}")  # type: ignore |  | ||||||
|             try: |  | ||||||
|                 yield self.decode(msg_bytes) |  | ||||||
|             except ( |  | ||||||
|                 msgspec.DecodeError, |  | ||||||
|                 UnicodeDecodeError, |  | ||||||
|             ): |  | ||||||
|                 if decodes_failed < 4: |  | ||||||
|                     # ignore decoding errors for now and assume they have to |  | ||||||
|                     # do with a channel drop - hope that receiving from the |  | ||||||
|                     # channel will raise an expected error and bubble up. |  | ||||||
|                     try: |  | ||||||
|                         msg_str: str | bytes = msg_bytes.decode() |  | ||||||
|                     except UnicodeDecodeError: |  | ||||||
|                         msg_str = msg_bytes |  | ||||||
| 
 |  | ||||||
|                     log.error( |  | ||||||
|                         '`msgspec` failed to decode!?\n' |  | ||||||
|                         'dumping bytes:\n' |  | ||||||
|                         f'{msg_str!r}' |  | ||||||
|                     ) |  | ||||||
|                     decodes_failed += 1 |  | ||||||
|                 else: |  | ||||||
|                     raise |  | ||||||
| 
 |  | ||||||
|     async def send(self, msg: Any) -> None: |  | ||||||
|         async with self._send_lock: |  | ||||||
| 
 |  | ||||||
|             bytes_data: bytes = self.encode(msg) |  | ||||||
| 
 |  | ||||||
|             # supposedly the fastest says, |  | ||||||
|             # https://stackoverflow.com/a/54027962 |  | ||||||
|             size: bytes = struct.pack("<I", len(bytes_data)) |  | ||||||
| 
 |  | ||||||
|             return await self.stream.send_all(size + bytes_data) |  | ||||||
| 
 | 
 | ||||||
|     @property |     @property | ||||||
|     def laddr(self) -> tuple[str, int]: |     def laddr(self) -> Tuple[Any, ...]: | ||||||
|         return self._laddr |         return self._laddr | ||||||
| 
 | 
 | ||||||
|     @property |     @property | ||||||
|     def raddr(self) -> tuple[str, int]: |     def raddr(self) -> Tuple[Any, ...]: | ||||||
|         return self._raddr |         return self._raddr | ||||||
| 
 | 
 | ||||||
|  |     # XXX: should this instead be called `.sendall()`? | ||||||
|  |     async def send(self, data: Any) -> None: | ||||||
|  |         async with self._send_lock: | ||||||
|  |             return await self.stream.send_all( | ||||||
|  |                 msgpack.dumps(data, use_bin_type=True)) | ||||||
|  | 
 | ||||||
|     async def recv(self) -> Any: |     async def recv(self) -> Any: | ||||||
|         return await self._agen.asend(None) |         return await self._agen.asend(None) | ||||||
| 
 | 
 | ||||||
|     async def drain(self) -> AsyncIterator[dict]: |  | ||||||
|         ''' |  | ||||||
|         Drain the stream's remaining messages sent from |  | ||||||
|         the far end until the connection is closed by |  | ||||||
|         the peer. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         try: |  | ||||||
|             async for msg in self._iter_packets(): |  | ||||||
|                 self.drained.append(msg) |  | ||||||
|         except TransportClosed: |  | ||||||
|             for msg in self.drained: |  | ||||||
|                 yield msg |  | ||||||
| 
 |  | ||||||
|     def __aiter__(self): |     def __aiter__(self): | ||||||
|         return self._agen |         return self._agen | ||||||
| 
 | 
 | ||||||
|  | @ -242,87 +86,32 @@ class MsgpackTCPStream(MsgTransport): | ||||||
|         return self.stream.socket.fileno() != -1 |         return self.stream.socket.fileno() != -1 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def get_msg_transport( |  | ||||||
| 
 |  | ||||||
|     key: tuple[str, str], |  | ||||||
| 
 |  | ||||||
| ) -> Type[MsgTransport]: |  | ||||||
| 
 |  | ||||||
|     return { |  | ||||||
|         ('msgpack', 'tcp'): MsgpackTCPStream, |  | ||||||
|     }[key] |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class Channel: | class Channel: | ||||||
|     ''' |     """An inter-process channel for communication between (remote) actors. | ||||||
|     An inter-process channel for communication between (remote) actors. |  | ||||||
| 
 | 
 | ||||||
|     Wraps a ``MsgStream``: transport + encoding IPC connection. |     Currently the only supported transport is a ``trio.SocketStream``. | ||||||
| 
 |     """ | ||||||
|     Currently we only support ``trio.SocketStream`` for transport |  | ||||||
|     (aka TCP) and the ``msgpack`` interchange format via the ``msgspec`` |  | ||||||
|     codec libary. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     def __init__( |     def __init__( | ||||||
| 
 |  | ||||||
|         self, |         self, | ||||||
|         destaddr: Optional[tuple[str, int]], |         destaddr: Optional[Tuple[str, int]] = None, | ||||||
| 
 |         on_reconnect: typing.Callable[..., typing.Awaitable] = None, | ||||||
|         msg_transport_type_key: tuple[str, str] = ('msgpack', 'tcp'), |         auto_reconnect: bool = False, | ||||||
| 
 |         stream: trio.SocketStream = None,  # expected to be active | ||||||
|         # TODO: optional reconnection support? |  | ||||||
|         # auto_reconnect: bool = False, |  | ||||||
|         # on_reconnect: typing.Callable[..., typing.Awaitable] = None, |  | ||||||
| 
 |  | ||||||
|     ) -> None: |     ) -> None: | ||||||
| 
 |         self._recon_seq = on_reconnect | ||||||
|         # self._recon_seq = on_reconnect |         self._autorecon = auto_reconnect | ||||||
|         # self._autorecon = auto_reconnect |         self.msgstream: Optional[MsgpackStream] = MsgpackStream( | ||||||
| 
 |             stream) if stream else None | ||||||
|         self._destaddr = destaddr |         if self.msgstream and destaddr: | ||||||
|         self._transport_key = msg_transport_type_key |             raise ValueError( | ||||||
| 
 |                 f"A stream was provided with local addr {self.laddr}" | ||||||
|         # Either created in ``.connect()`` or passed in by |             ) | ||||||
|         # user in ``.from_stream()``. |         self._destaddr = self.msgstream.raddr if self.msgstream else destaddr | ||||||
|         self._stream: Optional[trio.SocketStream] = None |  | ||||||
|         self.msgstream: Optional[MsgTransport] = None |  | ||||||
| 
 |  | ||||||
|         # set after handshake - always uid of far end |         # set after handshake - always uid of far end | ||||||
|         self.uid: Optional[tuple[str, str]] = None |         self.uid: Optional[Tuple[str, str]] = None | ||||||
| 
 |         # set if far end actor errors internally | ||||||
|  |         self._exc: Optional[Exception] = None | ||||||
|         self._agen = self._aiter_recv() |         self._agen = self._aiter_recv() | ||||||
|         self._exc: Optional[Exception] = None  # set if far end actor errors |  | ||||||
|         self._closed: bool = False |  | ||||||
|         # flag set on ``Portal.cancel_actor()`` indicating |  | ||||||
|         # remote (peer) cancellation of the far end actor runtime. |  | ||||||
|         self._cancel_called: bool = False  # set on ``Portal.cancel_actor()`` |  | ||||||
| 
 |  | ||||||
|     @classmethod |  | ||||||
|     def from_stream( |  | ||||||
|         cls, |  | ||||||
|         stream: trio.SocketStream, |  | ||||||
|         **kwargs, |  | ||||||
| 
 |  | ||||||
|     ) -> Channel: |  | ||||||
| 
 |  | ||||||
|         src, dst = get_stream_addrs(stream) |  | ||||||
|         chan = Channel(destaddr=dst, **kwargs) |  | ||||||
| 
 |  | ||||||
|         # set immediately here from provided instance |  | ||||||
|         chan._stream = stream |  | ||||||
|         chan.set_msg_transport(stream) |  | ||||||
|         return chan |  | ||||||
| 
 |  | ||||||
|     def set_msg_transport( |  | ||||||
|         self, |  | ||||||
|         stream: trio.SocketStream, |  | ||||||
|         type_key: Optional[tuple[str, str]] = None, |  | ||||||
| 
 |  | ||||||
|     ) -> MsgTransport: |  | ||||||
|         type_key = type_key or self._transport_key |  | ||||||
|         self.msgstream = get_msg_transport(type_key)(stream) |  | ||||||
|         return self.msgstream |  | ||||||
| 
 | 
 | ||||||
|     def __repr__(self) -> str: |     def __repr__(self) -> str: | ||||||
|         if self.msgstream: |         if self.msgstream: | ||||||
|  | @ -332,65 +121,43 @@ class Channel: | ||||||
|         return object.__repr__(self) |         return object.__repr__(self) | ||||||
| 
 | 
 | ||||||
|     @property |     @property | ||||||
|     def laddr(self) -> Optional[tuple[str, int]]: |     def laddr(self) -> Optional[Tuple[Any, ...]]: | ||||||
|         return self.msgstream.laddr if self.msgstream else None |         return self.msgstream.laddr if self.msgstream else None | ||||||
| 
 | 
 | ||||||
|     @property |     @property | ||||||
|     def raddr(self) -> Optional[tuple[str, int]]: |     def raddr(self) -> Optional[Tuple[Any, ...]]: | ||||||
|         return self.msgstream.raddr if self.msgstream else None |         return self.msgstream.raddr if self.msgstream else None | ||||||
| 
 | 
 | ||||||
|     async def connect( |     async def connect( | ||||||
|         self, |         self, destaddr: Tuple[Any, ...] = None, | ||||||
|         destaddr: tuple[Any, ...] | None = None, |  | ||||||
|         **kwargs |         **kwargs | ||||||
| 
 |     ) -> trio.SocketStream: | ||||||
|     ) -> MsgTransport: |  | ||||||
| 
 |  | ||||||
|         if self.connected(): |         if self.connected(): | ||||||
|             raise RuntimeError("channel is already connected?") |             raise RuntimeError("channel is already connected?") | ||||||
| 
 |  | ||||||
|         destaddr = destaddr or self._destaddr |         destaddr = destaddr or self._destaddr | ||||||
|         assert isinstance(destaddr, tuple) |         assert isinstance(destaddr, tuple) | ||||||
| 
 |         stream = await trio.open_tcp_stream(*destaddr, **kwargs) | ||||||
|         stream = await trio.open_tcp_stream( |         self.msgstream = MsgpackStream(stream) | ||||||
|             *destaddr, |         return stream | ||||||
|             **kwargs |  | ||||||
|         ) |  | ||||||
|         msgstream = self.set_msg_transport(stream) |  | ||||||
| 
 |  | ||||||
|         log.transport( |  | ||||||
|             f'Opened channel[{type(msgstream)}]: {self.laddr} -> {self.raddr}' |  | ||||||
|         ) |  | ||||||
|         return msgstream |  | ||||||
| 
 | 
 | ||||||
|     async def send(self, item: Any) -> None: |     async def send(self, item: Any) -> None: | ||||||
| 
 |         log.trace(f"send `{item}`")  # type: ignore | ||||||
|         log.transport(f"send `{item}`")  # type: ignore |  | ||||||
|         assert self.msgstream |         assert self.msgstream | ||||||
| 
 |  | ||||||
|         await self.msgstream.send(item) |         await self.msgstream.send(item) | ||||||
| 
 | 
 | ||||||
|     async def recv(self) -> Any: |     async def recv(self) -> Any: | ||||||
|         assert self.msgstream |         assert self.msgstream | ||||||
|  |         try: | ||||||
|             return await self.msgstream.recv() |             return await self.msgstream.recv() | ||||||
| 
 |         except trio.BrokenResourceError: | ||||||
|         # try: |             if self._autorecon: | ||||||
|         #     return await self.msgstream.recv() |                 await self._reconnect() | ||||||
|         # except trio.BrokenResourceError: |                 return await self.recv() | ||||||
|         #     if self._autorecon: |  | ||||||
|         #         await self._reconnect() |  | ||||||
|         #         return await self.recv() |  | ||||||
|         #     raise |  | ||||||
| 
 | 
 | ||||||
|     async def aclose(self) -> None: |     async def aclose(self) -> None: | ||||||
| 
 |         log.debug(f"Closing {self}") | ||||||
|         log.transport( |  | ||||||
|             f'Closing channel to {self.uid} ' |  | ||||||
|             f'{self.laddr} -> {self.raddr}' |  | ||||||
|         ) |  | ||||||
|         assert self.msgstream |         assert self.msgstream | ||||||
|         await self.msgstream.stream.aclose() |         await self.msgstream.stream.aclose() | ||||||
|         self._closed = True |  | ||||||
| 
 | 
 | ||||||
|     async def __aenter__(self): |     async def __aenter__(self): | ||||||
|         await self.connect() |         await self.connect() | ||||||
|  | @ -402,44 +169,40 @@ class Channel: | ||||||
|     def __aiter__(self): |     def __aiter__(self): | ||||||
|         return self._agen |         return self._agen | ||||||
| 
 | 
 | ||||||
|     # async def _reconnect(self) -> None: |     async def _reconnect(self) -> None: | ||||||
|     #     """Handle connection failures by polling until a reconnect can be |         """Handle connection failures by polling until a reconnect can be | ||||||
|     #     established. |         established. | ||||||
|     #     """ |         """ | ||||||
|     #     down = False |         down = False | ||||||
|     #     while True: |         while True: | ||||||
|     #         try: |             try: | ||||||
|     #             with trio.move_on_after(3) as cancel_scope: |                 with trio.move_on_after(3) as cancel_scope: | ||||||
|     #                 await self.connect() |                     await self.connect() | ||||||
|     #             cancelled = cancel_scope.cancelled_caught |                 cancelled = cancel_scope.cancelled_caught | ||||||
|     #             if cancelled: |                 if cancelled: | ||||||
|     #                 log.transport( |                     log.warning( | ||||||
|     #                     "Reconnect timed out after 3 seconds, retrying...") |                         "Reconnect timed out after 3 seconds, retrying...") | ||||||
|     #                 continue |                     continue | ||||||
|     #             else: |                 else: | ||||||
|     #                 log.transport("Stream connection re-established!") |                     log.warning("Stream connection re-established!") | ||||||
| 
 |                     # run any reconnection sequence | ||||||
|     #                 # TODO: run any reconnection sequence |                     on_recon = self._recon_seq | ||||||
|     #                 # on_recon = self._recon_seq |                     if on_recon: | ||||||
|     #                 # if on_recon: |                         await on_recon(self) | ||||||
|     #                 #     await on_recon(self) |                     break | ||||||
| 
 |             except (OSError, ConnectionRefusedError): | ||||||
|     #                 break |                 if not down: | ||||||
|     #         except (OSError, ConnectionRefusedError): |                     down = True | ||||||
|     #             if not down: |                     log.warning( | ||||||
|     #                 down = True |                         f"Connection to {self.raddr} went down, waiting" | ||||||
|     #                 log.transport( |                         " for re-establishment") | ||||||
|     #                     f"Connection to {self.raddr} went down, waiting" |                 await trio.sleep(1) | ||||||
|     #                     " for re-establishment") |  | ||||||
|     #             await trio.sleep(1) |  | ||||||
| 
 | 
 | ||||||
|     async def _aiter_recv( |     async def _aiter_recv( | ||||||
|         self |         self | ||||||
|     ) -> AsyncGenerator[Any, None]: |     ) -> typing.AsyncGenerator[Any, None]: | ||||||
|         ''' |         """Async iterate items from underlying stream. | ||||||
|         Async iterate items from underlying stream. |         """ | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         assert self.msgstream |         assert self.msgstream | ||||||
|         while True: |         while True: | ||||||
|             try: |             try: | ||||||
|  | @ -452,14 +215,16 @@ class Channel: | ||||||
|                     #     await self.msgstream.send(sent) |                     #     await self.msgstream.send(sent) | ||||||
|             except trio.BrokenResourceError: |             except trio.BrokenResourceError: | ||||||
| 
 | 
 | ||||||
|                 # if not self._autorecon: |                 if not self._autorecon: | ||||||
|                     raise |                     raise | ||||||
| 
 | 
 | ||||||
|             await self.aclose() |             await self.aclose() | ||||||
| 
 | 
 | ||||||
|             # if self._autorecon:  # attempt reconnect |             if self._autorecon:  # attempt reconnect | ||||||
|             #     await self._reconnect() |                 await self._reconnect() | ||||||
|             #     continue |                 continue | ||||||
|  |             else: | ||||||
|  |                 return | ||||||
| 
 | 
 | ||||||
|     def connected(self) -> bool: |     def connected(self) -> bool: | ||||||
|         return self.msgstream.connected() if self.msgstream else False |         return self.msgstream.connected() if self.msgstream else False | ||||||
|  | @ -469,11 +234,9 @@ class Channel: | ||||||
| async def _connect_chan( | async def _connect_chan( | ||||||
|     host: str, port: int |     host: str, port: int | ||||||
| ) -> typing.AsyncGenerator[Channel, None]: | ) -> typing.AsyncGenerator[Channel, None]: | ||||||
|     ''' |     """Create and connect a channel with disconnect on context manager | ||||||
|     Create and connect a channel with disconnect on context manager |  | ||||||
|     teardown. |     teardown. | ||||||
| 
 |     """ | ||||||
|     ''' |  | ||||||
|     chan = Channel((host, port)) |     chan = Channel((host, port)) | ||||||
|     await chan.connect() |     await chan.connect() | ||||||
|     yield chan |     yield chan | ||||||
|  |  | ||||||
|  | @ -1,39 +1,23 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| Helpers pulled mostly verbatim from ``multiprocessing.spawn`` | Helpers pulled mostly verbatim from ``multiprocessing.spawn`` | ||||||
| to aid with "fixing up" the ``__main__`` module in subprocesses. | to aid with "fixing up" the ``__main__`` module in subprocesses. | ||||||
| 
 | 
 | ||||||
| These helpers are needed for any spawing backend that doesn't already | These helpers are needed for any spawing backend that doesn't already handle this. | ||||||
| handle this. For example when using ``trio_run_in_process`` it is needed | For example when using ``trio_run_in_process`` it is needed but obviously not when | ||||||
| but obviously not when we're already using ``multiprocessing``. | we're already using ``multiprocessing``. | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| import os | import os | ||||||
| import sys | import sys | ||||||
| import platform | import platform | ||||||
| import types | import types | ||||||
| import runpy | import runpy | ||||||
|  | from typing import Dict | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| ORIGINAL_DIR = os.path.abspath(os.getcwd()) | ORIGINAL_DIR = os.path.abspath(os.getcwd()) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _mp_figure_out_main() -> dict[str, str]: | def _mp_figure_out_main() -> Dict[str, str]: | ||||||
|     """Taken from ``multiprocessing.spawn.get_preparation_data()``. |     """Taken from ``multiprocessing.spawn.get_preparation_data()``. | ||||||
| 
 | 
 | ||||||
|     Retrieve parent actor `__main__` module data. |     Retrieve parent actor `__main__` module data. | ||||||
|  |  | ||||||
|  | @ -1,145 +1,306 @@ | ||||||
| # tractor: structured concurrent "actors". | """ | ||||||
| # Copyright 2018-eternity Tyler Goodlet. | Portal api | ||||||
| 
 | """ | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| Memory boundary "Portals": an API for structured |  | ||||||
| concurrency linked tasks running in disparate memory domains. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| from __future__ import annotations |  | ||||||
| import importlib | import importlib | ||||||
| import inspect | import inspect | ||||||
| from typing import ( | import typing | ||||||
|     Any, Optional, | from typing import Tuple, Any, Dict, Optional, Set, Iterator | ||||||
|     Callable, AsyncGenerator, |  | ||||||
|     Type, |  | ||||||
| ) |  | ||||||
| from functools import partial | from functools import partial | ||||||
| from dataclasses import dataclass | from dataclasses import dataclass | ||||||
| from pprint import pformat | from contextlib import contextmanager | ||||||
| import warnings | import warnings | ||||||
| 
 | 
 | ||||||
| import trio | import trio | ||||||
| from async_generator import asynccontextmanager | from async_generator import asynccontextmanager | ||||||
| 
 | 
 | ||||||
| from .trionics import maybe_open_nursery |  | ||||||
| from ._state import current_actor | from ._state import current_actor | ||||||
| from ._ipc import Channel | from ._ipc import Channel | ||||||
| from .log import get_logger | from .log import get_logger | ||||||
| from .msg import NamespacePath | from ._exceptions import unpack_error, NoResult, RemoteActorError | ||||||
| from ._exceptions import ( |  | ||||||
|     unpack_error, |  | ||||||
|     NoResult, |  | ||||||
|     ContextCancelled, |  | ||||||
| ) |  | ||||||
| from ._streaming import ( |  | ||||||
|     Context, |  | ||||||
|     MsgStream, |  | ||||||
| ) |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| log = get_logger(__name__) | log = get_logger('tractor') | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _unwrap_msg( | @asynccontextmanager | ||||||
|     msg: dict[str, Any], | async def maybe_open_nursery( | ||||||
|     channel: Channel |     nursery: trio.Nursery = None, | ||||||
|  |     shield: bool = False, | ||||||
|  | ) -> typing.AsyncGenerator[trio.Nursery, Any]: | ||||||
|  |     """Create a new nursery if None provided. | ||||||
| 
 | 
 | ||||||
| ) -> Any: |     Blocks on exit as expected if no input nursery is provided. | ||||||
|     __tracebackhide__ = True |     """ | ||||||
|  |     if nursery is not None: | ||||||
|  |         yield nursery | ||||||
|  |     else: | ||||||
|  |         async with trio.open_nursery() as nursery: | ||||||
|  |             nursery.cancel_scope.shield = shield | ||||||
|  |             yield nursery | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class ReceiveStream(trio.abc.ReceiveChannel): | ||||||
|  |     """A wrapper around a ``trio._channel.MemoryReceiveChannel`` with | ||||||
|  |     special behaviour for signalling stream termination across an | ||||||
|  |     inter-actor ``Channel``. This is the type returned to a local task | ||||||
|  |     which invoked a remote streaming function using `Portal.run()`. | ||||||
|  | 
 | ||||||
|  |     Termination rules: | ||||||
|  |     - if the local task signals stop iteration a cancel signal is | ||||||
|  |       relayed to the remote task indicating to stop streaming | ||||||
|  |     - if the remote task signals the end of a stream, raise a | ||||||
|  |       ``StopAsyncIteration`` to terminate the local ``async for`` | ||||||
|  | 
 | ||||||
|  |     """ | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         cid: str, | ||||||
|  |         rx_chan: trio.abc.ReceiveChannel, | ||||||
|  |         portal: 'Portal', | ||||||
|  |     ) -> None: | ||||||
|  |         self._cid = cid | ||||||
|  |         self._rx_chan = rx_chan | ||||||
|  |         self._portal = portal | ||||||
|  |         self._shielded = False | ||||||
|  | 
 | ||||||
|  |     # delegate directly to underlying mem channel | ||||||
|  |     def receive_nowait(self): | ||||||
|  |         return self._rx_chan.receive_nowait() | ||||||
|  | 
 | ||||||
|  |     async def receive(self): | ||||||
|  |         try: | ||||||
|  |             msg = await self._rx_chan.receive() | ||||||
|  |             return msg['yield'] | ||||||
|  | 
 | ||||||
|  |         except trio.ClosedResourceError: | ||||||
|  |             # when the send is closed we assume the stream has | ||||||
|  |             # terminated and signal this local iterator to stop | ||||||
|  |             await self.aclose() | ||||||
|  |             raise StopAsyncIteration | ||||||
|  | 
 | ||||||
|  |         except trio.Cancelled: | ||||||
|  |             # relay cancels to the remote task | ||||||
|  |             await self.aclose() | ||||||
|  |             raise | ||||||
|  | 
 | ||||||
|  |         except KeyError: | ||||||
|  |             # internal error should never get here | ||||||
|  |             assert msg.get('cid'), ( | ||||||
|  |                 "Received internal error at portal?") | ||||||
|  |             raise unpack_error(msg, self._portal.channel) | ||||||
|  | 
 | ||||||
|  |     @contextmanager | ||||||
|  |     def shield( | ||||||
|  |         self | ||||||
|  |     ) -> Iterator['ReceiveStream']:  # noqa | ||||||
|  |         """Shield this stream's underlying channel such that a local consumer task | ||||||
|  |         can be cancelled (and possibly restarted) using ``trio.Cancelled``. | ||||||
|  | 
 | ||||||
|  |         """ | ||||||
|  |         self._shielded = True | ||||||
|  |         yield self | ||||||
|  |         self._shielded = False | ||||||
|  | 
 | ||||||
|  |     async def aclose(self): | ||||||
|  |         """Cancel associated remote actor task and local memory channel | ||||||
|  |         on close. | ||||||
|  |         """ | ||||||
|  |         rx_chan = self._rx_chan | ||||||
|  |         stats = rx_chan.statistics() | ||||||
|  | 
 | ||||||
|  |         if rx_chan._closed: | ||||||
|  |             log.warning(f"{self} is already closed") | ||||||
|  |             return | ||||||
|  | 
 | ||||||
|  |         if stats.open_receive_channels > 1: | ||||||
|  |             # if we've been cloned don't kill the stream | ||||||
|  |             log.debug("there are still consumers running keeping stream alive") | ||||||
|  |             return | ||||||
|  | 
 | ||||||
|  |         if self._shielded: | ||||||
|  |             log.warning(f"{self} is shielded, portal channel being kept alive") | ||||||
|  |             return | ||||||
|  | 
 | ||||||
|  |         # close the local mem chan | ||||||
|  |         rx_chan.close() | ||||||
|  | 
 | ||||||
|  |         cid = self._cid | ||||||
|  |         with trio.move_on_after(0.5) as cs: | ||||||
|  |             cs.shield = True | ||||||
|  |             log.warning( | ||||||
|  |                 f"Cancelling stream {cid} to " | ||||||
|  |                 f"{self._portal.channel.uid}") | ||||||
|  | 
 | ||||||
|  |             # NOTE: we're telling the far end actor to cancel a task | ||||||
|  |             # corresponding to *this actor*. The far end local channel | ||||||
|  |             # instance is passed to `Actor._cancel_task()` implicitly. | ||||||
|  |             await self._portal.run_from_ns('self', '_cancel_task', cid=cid) | ||||||
|  | 
 | ||||||
|  |         if cs.cancelled_caught: | ||||||
|  |             # XXX: there's no way to know if the remote task was indeed | ||||||
|  |             # cancelled in the case where the connection is broken or | ||||||
|  |             # some other network error occurred. | ||||||
|  |             if not self._portal.channel.connected(): | ||||||
|  |                 log.warning( | ||||||
|  |                     "May have failed to cancel remote task " | ||||||
|  |                     f"{cid} for {self._portal.channel.uid}") | ||||||
|  | 
 | ||||||
|  |     def clone(self): | ||||||
|  |         """Clone this receive channel allowing for multi-task | ||||||
|  |         consumption from the same channel. | ||||||
|  | 
 | ||||||
|  |         """ | ||||||
|  |         return ReceiveStream( | ||||||
|  |             self._cid, | ||||||
|  |             self._rx_chan.clone(), | ||||||
|  |             self._portal, | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class Portal: | ||||||
|  |     """A 'portal' to a(n) (remote) ``Actor``. | ||||||
|  | 
 | ||||||
|  |     Allows for invoking remote routines and receiving results through an | ||||||
|  |     underlying ``tractor.Channel`` as though the remote (async) | ||||||
|  |     function / generator was invoked locally. | ||||||
|  | 
 | ||||||
|  |     Think of this like a native async IPC API. | ||||||
|  |     """ | ||||||
|  |     def __init__(self, channel: Channel) -> None: | ||||||
|  |         self.channel = channel | ||||||
|  |         # when this is set to a tuple returned from ``_submit()`` then | ||||||
|  |         # it is expected that ``result()`` will be awaited at some point | ||||||
|  |         # during the portal's lifetime | ||||||
|  |         self._result: Optional[Any] = None | ||||||
|  |         # set when _submit_for_result is called | ||||||
|  |         self._expect_result: Optional[ | ||||||
|  |             Tuple[str, Any, str, Dict[str, Any]] | ||||||
|  |         ] = None | ||||||
|  |         self._streams: Set[ReceiveStream] = set() | ||||||
|  |         self.actor = current_actor() | ||||||
|  | 
 | ||||||
|  |     async def _submit( | ||||||
|  |         self, | ||||||
|  |         ns: str, | ||||||
|  |         func: str, | ||||||
|  |         kwargs, | ||||||
|  |     ) -> Tuple[str, trio.abc.ReceiveChannel, str, Dict[str, Any]]: | ||||||
|  |         """Submit a function to be scheduled and run by actor, return the | ||||||
|  |         associated caller id, response queue, response type str, | ||||||
|  |         first message packet as a tuple. | ||||||
|  | 
 | ||||||
|  |         This is an async call. | ||||||
|  |         """ | ||||||
|  |         # ship a function call request to the remote actor | ||||||
|  |         cid, recv_chan = await self.actor.send_cmd( | ||||||
|  |             self.channel, ns, func, kwargs) | ||||||
|  | 
 | ||||||
|  |         # wait on first response msg and handle (this should be | ||||||
|  |         # in an immediate response) | ||||||
|  | 
 | ||||||
|  |         first_msg = await recv_chan.receive() | ||||||
|  |         functype = first_msg.get('functype') | ||||||
|  | 
 | ||||||
|  |         if functype == 'function' or functype == 'asyncfunction': | ||||||
|  |             resp_type = 'return' | ||||||
|  |         elif functype == 'asyncgen': | ||||||
|  |             resp_type = 'yield' | ||||||
|  |         elif 'error' in first_msg: | ||||||
|  |             raise unpack_error(first_msg, self.channel) | ||||||
|  |         else: | ||||||
|  |             raise ValueError(f"{first_msg} is an invalid response packet?") | ||||||
|  | 
 | ||||||
|  |         return cid, recv_chan, resp_type, first_msg | ||||||
|  | 
 | ||||||
|  |     async def _submit_for_result(self, ns: str, func: str, **kwargs) -> None: | ||||||
|  |         assert self._expect_result is None, \ | ||||||
|  |                 "A pending main result has already been submitted" | ||||||
|  |         self._expect_result = await self._submit(ns, func, kwargs) | ||||||
|  | 
 | ||||||
|  |     async def run( | ||||||
|  |         self, | ||||||
|  |         func_or_ns: str, | ||||||
|  |         fn_name: Optional[str] = None, | ||||||
|  |         **kwargs | ||||||
|  |     ) -> Any: | ||||||
|  |         """Submit a remote function to be scheduled and run by actor, in | ||||||
|  |         a new task, wrap and return its (stream of) result(s). | ||||||
|  | 
 | ||||||
|  |         This is a blocking call and returns either a value from the | ||||||
|  |         remote rpc task or a local async generator instance. | ||||||
|  |         """ | ||||||
|  |         if isinstance(func_or_ns, str): | ||||||
|  |             warnings.warn( | ||||||
|  |                 "`Portal.run(namespace: str, funcname: str)` is now" | ||||||
|  |                 "deprecated, pass a function reference directly instead\n" | ||||||
|  |                 "If you still want to run a remote function by name use" | ||||||
|  |                 "`Portal.run_from_ns()`", | ||||||
|  |                 DeprecationWarning, | ||||||
|  |                 stacklevel=2, | ||||||
|  |             ) | ||||||
|  |             fn_mod_path = func_or_ns | ||||||
|  |             assert isinstance(fn_name, str) | ||||||
|  | 
 | ||||||
|  |         else:  # function reference was passed directly | ||||||
|  |             fn = func_or_ns | ||||||
|  |             fn_mod_path = fn.__module__ | ||||||
|  |             fn_name = fn.__name__ | ||||||
|  | 
 | ||||||
|  |         return await self._return_from_resptype( | ||||||
|  |             *(await self._submit(fn_mod_path, fn_name, kwargs)) | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |     async def run_from_ns( | ||||||
|  |         self, | ||||||
|  |         namespace_path: str, | ||||||
|  |         function_name: str, | ||||||
|  |         **kwargs, | ||||||
|  |     ) -> Any: | ||||||
|  |         """Run a function from a (remote) namespace in a new task on the far-end actor. | ||||||
|  | 
 | ||||||
|  |         This is a more explitcit way to run tasks in a remote-process | ||||||
|  |         actor using explicit object-path syntax. Hint: this is how | ||||||
|  |         `.run()` works underneath. | ||||||
|  | 
 | ||||||
|  |         Note:: | ||||||
|  | 
 | ||||||
|  |             A special namespace `self` can be used to invoke `Actor` | ||||||
|  |             instance methods in the remote runtime. Currently this should only | ||||||
|  |             be used for `tractor` internals. | ||||||
|  |         """ | ||||||
|  |         return await self._return_from_resptype( | ||||||
|  |             *(await self._submit(namespace_path, function_name, kwargs)) | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |     async def _return_from_resptype( | ||||||
|  |         self, | ||||||
|  |         cid: str, | ||||||
|  |         recv_chan: trio.abc.ReceiveChannel, | ||||||
|  |         resptype: str, | ||||||
|  |         first_msg: dict | ||||||
|  |     ) -> Any: | ||||||
|  |         # TODO: not this needs some serious work and thinking about how | ||||||
|  |         # to make async-generators the fundamental IPC API over channels! | ||||||
|  |         # (think `yield from`, `gen.send()`, and functional reactive stuff) | ||||||
|  |         if resptype == 'yield':  # stream response | ||||||
|  |             rchan = ReceiveStream(cid, recv_chan, self) | ||||||
|  |             self._streams.add(rchan) | ||||||
|  |             return rchan | ||||||
|  | 
 | ||||||
|  |         elif resptype == 'return':  # single response | ||||||
|  |             msg = await recv_chan.receive() | ||||||
|             try: |             try: | ||||||
|                 return msg['return'] |                 return msg['return'] | ||||||
|             except KeyError: |             except KeyError: | ||||||
|                 # internal error should never get here |                 # internal error should never get here | ||||||
|                 assert msg.get('cid'), "Received internal error at portal?" |                 assert msg.get('cid'), "Received internal error at portal?" | ||||||
|         raise unpack_error(msg, channel) from None |                 raise unpack_error(msg, self.channel) | ||||||
| 
 |         else: | ||||||
| 
 |             raise ValueError(f"Unknown msg response type: {first_msg}") | ||||||
| class MessagingError(Exception): |  | ||||||
|     'Some kind of unexpected SC messaging dialog issue' |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class Portal: |  | ||||||
|     ''' |  | ||||||
|     A 'portal' to a(n) (remote) ``Actor``. |  | ||||||
| 
 |  | ||||||
|     A portal is "opened" (and eventually closed) by one side of an |  | ||||||
|     inter-actor communication context. The side which opens the portal |  | ||||||
|     is equivalent to a "caller" in function parlance and usually is |  | ||||||
|     either the called actor's parent (in process tree hierarchy terms) |  | ||||||
|     or a client interested in scheduling work to be done remotely in a |  | ||||||
|     far process. |  | ||||||
| 
 |  | ||||||
|     The portal api allows the "caller" actor to invoke remote routines |  | ||||||
|     and receive results through an underlying ``tractor.Channel`` as |  | ||||||
|     though the remote (async) function / generator was called locally. |  | ||||||
|     It may be thought of loosely as an RPC api where native Python |  | ||||||
|     function calling semantics are supported transparently; hence it is |  | ||||||
|     like having a "portal" between the seperate actor memory spaces. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     # the timeout for a remote cancel request sent to |  | ||||||
|     # a(n) (peer) actor. |  | ||||||
|     cancel_timeout = 0.5 |  | ||||||
| 
 |  | ||||||
|     def __init__(self, channel: Channel) -> None: |  | ||||||
|         self.channel = channel |  | ||||||
|         # during the portal's lifetime |  | ||||||
|         self._result_msg: Optional[dict] = None |  | ||||||
| 
 |  | ||||||
|         # When set to a ``Context`` (when _submit_for_result is called) |  | ||||||
|         # it is expected that ``result()`` will be awaited at some |  | ||||||
|         # point. |  | ||||||
|         self._expect_result: Optional[Context] = None |  | ||||||
|         self._streams: set[MsgStream] = set() |  | ||||||
|         self.actor = current_actor() |  | ||||||
| 
 |  | ||||||
|     async def _submit_for_result( |  | ||||||
|         self, |  | ||||||
|         ns: str, |  | ||||||
|         func: str, |  | ||||||
|         **kwargs |  | ||||||
|     ) -> None: |  | ||||||
| 
 |  | ||||||
|         assert self._expect_result is None, \ |  | ||||||
|                 "A pending main result has already been submitted" |  | ||||||
| 
 |  | ||||||
|         self._expect_result = await self.actor.start_remote_task( |  | ||||||
|             self.channel, |  | ||||||
|             ns, |  | ||||||
|             func, |  | ||||||
|             kwargs |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|     async def _return_once( |  | ||||||
|         self, |  | ||||||
|         ctx: Context, |  | ||||||
| 
 |  | ||||||
|     ) -> dict[str, Any]: |  | ||||||
| 
 |  | ||||||
|         assert ctx._remote_func_type == 'asyncfunc'  # single response |  | ||||||
|         msg = await ctx._recv_chan.receive() |  | ||||||
|         return msg |  | ||||||
| 
 | 
 | ||||||
|     async def result(self) -> Any: |     async def result(self) -> Any: | ||||||
|         ''' |         """Return the result(s) from the remote actor's "main" task. | ||||||
|         Return the result(s) from the remote actor's "main" task. |         """ | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         # __tracebackhide__ = True |  | ||||||
|         # Check for non-rpc errors slapped on the |         # Check for non-rpc errors slapped on the | ||||||
|         # channel for which we always raise |         # channel for which we always raise | ||||||
|         exc = self.channel._exc |         exc = self.channel._exc | ||||||
|  | @ -156,19 +317,25 @@ class Portal: | ||||||
| 
 | 
 | ||||||
|         # expecting a "main" result |         # expecting a "main" result | ||||||
|         assert self._expect_result |         assert self._expect_result | ||||||
| 
 |         if self._result is None: | ||||||
|         if self._result_msg is None: |             try: | ||||||
|             self._result_msg = await self._return_once( |                 self._result = await self._return_from_resptype( | ||||||
|                 self._expect_result |                     *self._expect_result | ||||||
|                 ) |                 ) | ||||||
|  |             except RemoteActorError as err: | ||||||
|  |                 self._result = err | ||||||
| 
 | 
 | ||||||
|         return _unwrap_msg(self._result_msg, self.channel) |         # re-raise error on every call | ||||||
|  |         if isinstance(self._result, RemoteActorError): | ||||||
|  |             raise self._result | ||||||
|  | 
 | ||||||
|  |         return self._result | ||||||
| 
 | 
 | ||||||
|     async def _cancel_streams(self): |     async def _cancel_streams(self): | ||||||
|         # terminate all locally running async generator |         # terminate all locally running async generator | ||||||
|         # IPC calls |         # IPC calls | ||||||
|         if self._streams: |         if self._streams: | ||||||
|             log.cancel( |             log.warning( | ||||||
|                 f"Cancelling all streams with {self.channel.uid}") |                 f"Cancelling all streams with {self.channel.uid}") | ||||||
|             for stream in self._streams.copy(): |             for stream in self._streams.copy(): | ||||||
|                 try: |                 try: | ||||||
|  | @ -187,407 +354,80 @@ class Portal: | ||||||
|         # we'll need to .aclose all those channels here |         # we'll need to .aclose all those channels here | ||||||
|         await self._cancel_streams() |         await self._cancel_streams() | ||||||
| 
 | 
 | ||||||
|     async def cancel_actor( |     async def cancel_actor(self): | ||||||
|         self, |         """Cancel the actor on the other end of this portal. | ||||||
|         timeout: float | None = None, |         """ | ||||||
| 
 |  | ||||||
|     ) -> bool: |  | ||||||
|         ''' |  | ||||||
|         Cancel the actor on the other end of this portal. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         if not self.channel.connected(): |         if not self.channel.connected(): | ||||||
|             log.cancel("This channel is already closed can't cancel") |             log.warning("This portal is already closed can't cancel") | ||||||
|             return False |             return False | ||||||
| 
 | 
 | ||||||
|         log.cancel( |         await self._cancel_streams() | ||||||
|  | 
 | ||||||
|  |         log.warning( | ||||||
|             f"Sending actor cancel request to {self.channel.uid} on " |             f"Sending actor cancel request to {self.channel.uid} on " | ||||||
|             f"{self.channel}") |             f"{self.channel}") | ||||||
| 
 |  | ||||||
|         self.channel._cancel_called = True |  | ||||||
| 
 |  | ||||||
|         try: |         try: | ||||||
|             # send cancel cmd - might not get response |             # send cancel cmd - might not get response | ||||||
|             # XXX: sure would be nice to make this work with a proper shield |             # XXX: sure would be nice to make this work with a proper shield | ||||||
|             with trio.move_on_after(timeout or self.cancel_timeout) as cs: |             # with trio.CancelScope() as cancel_scope: | ||||||
|                 cs.shield = True |             # with trio.CancelScope(shield=True) as cancel_scope: | ||||||
|  |             with trio.move_on_after(0.5) as cancel_scope: | ||||||
|  |                 cancel_scope.shield = True | ||||||
| 
 | 
 | ||||||
|                 await self.run_from_ns('self', 'cancel') |                 await self.run_from_ns('self', 'cancel') | ||||||
|                 return True |                 return True | ||||||
| 
 | 
 | ||||||
|             if cs.cancelled_caught: |             if cancel_scope.cancelled_caught: | ||||||
|                 log.cancel(f"May have failed to cancel {self.channel.uid}") |                 log.warning(f"May have failed to cancel {self.channel.uid}") | ||||||
| 
 | 
 | ||||||
|             # if we get here some weird cancellation case happened |             # if we get here some weird cancellation case happened | ||||||
|             return False |             return False | ||||||
| 
 | 
 | ||||||
|         except ( |  | ||||||
|             trio.ClosedResourceError, |  | ||||||
|             trio.BrokenResourceError, |  | ||||||
|         ): |  | ||||||
|             log.cancel( |  | ||||||
|                 f"{self.channel} for {self.channel.uid} was already " |  | ||||||
|                 "closed or broken?") |  | ||||||
|             return False |  | ||||||
| 
 |  | ||||||
|     async def run_from_ns( |  | ||||||
|         self, |  | ||||||
|         namespace_path: str, |  | ||||||
|         function_name: str, |  | ||||||
|         **kwargs, |  | ||||||
|     ) -> Any: |  | ||||||
|         ''' |  | ||||||
|         Run a function from a (remote) namespace in a new task on the |  | ||||||
|         far-end actor. |  | ||||||
| 
 |  | ||||||
|         This is a more explitcit way to run tasks in a remote-process |  | ||||||
|         actor using explicit object-path syntax. Hint: this is how |  | ||||||
|         `.run()` works underneath. |  | ||||||
| 
 |  | ||||||
|         Note:: |  | ||||||
| 
 |  | ||||||
|             A special namespace `self` can be used to invoke `Actor` |  | ||||||
|             instance methods in the remote runtime. Currently this |  | ||||||
|             should only be used solely for ``tractor`` runtime |  | ||||||
|             internals. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         ctx = await self.actor.start_remote_task( |  | ||||||
|             self.channel, |  | ||||||
|             namespace_path, |  | ||||||
|             function_name, |  | ||||||
|             kwargs, |  | ||||||
|         ) |  | ||||||
|         ctx._portal = self |  | ||||||
|         msg = await self._return_once(ctx) |  | ||||||
|         return _unwrap_msg(msg, self.channel) |  | ||||||
| 
 |  | ||||||
|     async def run( |  | ||||||
|         self, |  | ||||||
|         func: str, |  | ||||||
|         fn_name: Optional[str] = None, |  | ||||||
|         **kwargs |  | ||||||
|     ) -> Any: |  | ||||||
|         ''' |  | ||||||
|         Submit a remote function to be scheduled and run by actor, in |  | ||||||
|         a new task, wrap and return its (stream of) result(s). |  | ||||||
| 
 |  | ||||||
|         This is a blocking call and returns either a value from the |  | ||||||
|         remote rpc task or a local async generator instance. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         if isinstance(func, str): |  | ||||||
|             warnings.warn( |  | ||||||
|                 "`Portal.run(namespace: str, funcname: str)` is now" |  | ||||||
|                 "deprecated, pass a function reference directly instead\n" |  | ||||||
|                 "If you still want to run a remote function by name use" |  | ||||||
|                 "`Portal.run_from_ns()`", |  | ||||||
|                 DeprecationWarning, |  | ||||||
|                 stacklevel=2, |  | ||||||
|             ) |  | ||||||
|             fn_mod_path = func |  | ||||||
|             assert isinstance(fn_name, str) |  | ||||||
| 
 |  | ||||||
|         else:  # function reference was passed directly |  | ||||||
|             if ( |  | ||||||
|                 not inspect.iscoroutinefunction(func) or |  | ||||||
|                 ( |  | ||||||
|                     inspect.iscoroutinefunction(func) and |  | ||||||
|                     getattr(func, '_tractor_stream_function', False) |  | ||||||
|                 ) |  | ||||||
|             ): |  | ||||||
|                 raise TypeError( |  | ||||||
|                     f'{func} must be a non-streaming async function!') |  | ||||||
| 
 |  | ||||||
|             fn_mod_path, fn_name = NamespacePath.from_ref(func).to_tuple() |  | ||||||
| 
 |  | ||||||
|         ctx = await self.actor.start_remote_task( |  | ||||||
|             self.channel, |  | ||||||
|             fn_mod_path, |  | ||||||
|             fn_name, |  | ||||||
|             kwargs, |  | ||||||
|         ) |  | ||||||
|         ctx._portal = self |  | ||||||
|         return _unwrap_msg( |  | ||||||
|             await self._return_once(ctx), |  | ||||||
|             self.channel, |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|     @asynccontextmanager |  | ||||||
|     async def open_stream_from( |  | ||||||
|         self, |  | ||||||
|         async_gen_func: Callable,  # typing: ignore |  | ||||||
|         **kwargs, |  | ||||||
| 
 |  | ||||||
|     ) -> AsyncGenerator[MsgStream, None]: |  | ||||||
| 
 |  | ||||||
|         if not inspect.isasyncgenfunction(async_gen_func): |  | ||||||
|             if not ( |  | ||||||
|                 inspect.iscoroutinefunction(async_gen_func) and |  | ||||||
|                 getattr(async_gen_func, '_tractor_stream_function', False) |  | ||||||
|             ): |  | ||||||
|                 raise TypeError( |  | ||||||
|                     f'{async_gen_func} must be an async generator function!') |  | ||||||
| 
 |  | ||||||
|         fn_mod_path, fn_name = NamespacePath.from_ref( |  | ||||||
|             async_gen_func).to_tuple() |  | ||||||
|         ctx = await self.actor.start_remote_task( |  | ||||||
|             self.channel, |  | ||||||
|             fn_mod_path, |  | ||||||
|             fn_name, |  | ||||||
|             kwargs |  | ||||||
|         ) |  | ||||||
|         ctx._portal = self |  | ||||||
| 
 |  | ||||||
|         # ensure receive-only stream entrypoint |  | ||||||
|         assert ctx._remote_func_type == 'asyncgen' |  | ||||||
| 
 |  | ||||||
|         try: |  | ||||||
|             # deliver receive only stream |  | ||||||
|             async with MsgStream( |  | ||||||
|                 ctx, ctx._recv_chan, |  | ||||||
|             ) as rchan: |  | ||||||
|                 self._streams.add(rchan) |  | ||||||
|                 yield rchan |  | ||||||
| 
 |  | ||||||
|         finally: |  | ||||||
| 
 |  | ||||||
|             # cancel the far end task on consumer close |  | ||||||
|             # NOTE: this is a special case since we assume that if using |  | ||||||
|             # this ``.open_fream_from()`` api, the stream is one a one |  | ||||||
|             # time use and we couple the far end tasks's lifetime to |  | ||||||
|             # the consumer's scope; we don't ever send a `'stop'` |  | ||||||
|             # message right now since there shouldn't be a reason to |  | ||||||
|             # stop and restart the stream, right? |  | ||||||
|             try: |  | ||||||
|                 with trio.CancelScope(shield=True): |  | ||||||
|                     await ctx.cancel() |  | ||||||
| 
 |  | ||||||
|         except trio.ClosedResourceError: |         except trio.ClosedResourceError: | ||||||
|                 # if the far end terminates before we send a cancel the |  | ||||||
|                 # underlying transport-channel may already be closed. |  | ||||||
|                 log.cancel(f'Context {ctx} was already closed?') |  | ||||||
| 
 |  | ||||||
|             # XXX: should this always be done? |  | ||||||
|             # await recv_chan.aclose() |  | ||||||
|             self._streams.remove(rchan) |  | ||||||
| 
 |  | ||||||
|     @asynccontextmanager |  | ||||||
|     async def open_context( |  | ||||||
| 
 |  | ||||||
|         self, |  | ||||||
|         func: Callable, |  | ||||||
|         **kwargs, |  | ||||||
| 
 |  | ||||||
|     ) -> AsyncGenerator[tuple[Context, Any], None]: |  | ||||||
|         ''' |  | ||||||
|         Open an inter-actor task context. |  | ||||||
| 
 |  | ||||||
|         This is a synchronous API which allows for deterministic |  | ||||||
|         setup/teardown of a remote task. The yielded ``Context`` further |  | ||||||
|         allows for opening bidirectional streams, explicit cancellation |  | ||||||
|         and synchronized final result collection. See ``tractor.Context``. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         # conduct target func method structural checks |  | ||||||
|         if not inspect.iscoroutinefunction(func) and ( |  | ||||||
|             getattr(func, '_tractor_contex_function', False) |  | ||||||
|         ): |  | ||||||
|             raise TypeError( |  | ||||||
|                 f'{func} must be an async generator function!') |  | ||||||
| 
 |  | ||||||
|         fn_mod_path, fn_name = NamespacePath.from_ref(func).to_tuple() |  | ||||||
| 
 |  | ||||||
|         ctx = await self.actor.start_remote_task( |  | ||||||
|             self.channel, |  | ||||||
|             fn_mod_path, |  | ||||||
|             fn_name, |  | ||||||
|             kwargs |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|         assert ctx._remote_func_type == 'context' |  | ||||||
|         msg = await ctx._recv_chan.receive() |  | ||||||
| 
 |  | ||||||
|         try: |  | ||||||
|             # the "first" value here is delivered by the callee's |  | ||||||
|             # ``Context.started()`` call. |  | ||||||
|             first = msg['started'] |  | ||||||
|             ctx._started_called = True |  | ||||||
| 
 |  | ||||||
|         except KeyError: |  | ||||||
|             assert msg.get('cid'), ("Received internal error at context?") |  | ||||||
| 
 |  | ||||||
|             if msg.get('error'): |  | ||||||
|                 # raise kerr from unpack_error(msg, self.channel) |  | ||||||
|                 raise unpack_error(msg, self.channel) from None |  | ||||||
|             else: |  | ||||||
|                 raise MessagingError( |  | ||||||
|                     f'Context for {ctx.cid} was expecting a `started` message' |  | ||||||
|                     f' but received a non-error msg:\n{pformat(msg)}' |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|         _err: Optional[BaseException] = None |  | ||||||
|         ctx._portal = self |  | ||||||
| 
 |  | ||||||
|         uid = self.channel.uid |  | ||||||
|         cid = ctx.cid |  | ||||||
|         etype: Optional[Type[BaseException]] = None |  | ||||||
| 
 |  | ||||||
|         # deliver context instance and .started() msg value in open tuple. |  | ||||||
|         try: |  | ||||||
|             async with trio.open_nursery() as scope_nursery: |  | ||||||
|                 ctx._scope_nursery = scope_nursery |  | ||||||
| 
 |  | ||||||
|                 # do we need this? |  | ||||||
|                 # await trio.lowlevel.checkpoint() |  | ||||||
| 
 |  | ||||||
|                 yield ctx, first |  | ||||||
| 
 |  | ||||||
|         except ContextCancelled as err: |  | ||||||
|             _err = err |  | ||||||
|             if not ctx._cancel_called: |  | ||||||
|                 # context was cancelled at the far end but was |  | ||||||
|                 # not part of this end requesting that cancel |  | ||||||
|                 # so raise for the local task to respond and handle. |  | ||||||
|                 raise |  | ||||||
| 
 |  | ||||||
|             # if the context was cancelled by client code |  | ||||||
|             # then we don't need to raise since user code |  | ||||||
|             # is expecting this and the block should exit. |  | ||||||
|             else: |  | ||||||
|                 log.debug(f'Context {ctx} cancelled gracefully') |  | ||||||
| 
 |  | ||||||
|         except ( |  | ||||||
|             BaseException, |  | ||||||
| 
 |  | ||||||
|             # more specifically, we need to handle these but not |  | ||||||
|             # sure it's worth being pedantic: |  | ||||||
|             # Exception, |  | ||||||
|             # trio.Cancelled, |  | ||||||
|             # KeyboardInterrupt, |  | ||||||
| 
 |  | ||||||
|         ) as err: |  | ||||||
|             etype = type(err) |  | ||||||
|             # the context cancels itself on any cancel |  | ||||||
|             # causing error. |  | ||||||
| 
 |  | ||||||
|             if ctx.chan.connected(): |  | ||||||
|                 log.cancel( |  | ||||||
|                     'Context cancelled for task, sending cancel request..\n' |  | ||||||
|                     f'task:{cid}\n' |  | ||||||
|                     f'actor:{uid}' |  | ||||||
|                 ) |  | ||||||
|                 await ctx.cancel() |  | ||||||
|             else: |  | ||||||
|             log.warning( |             log.warning( | ||||||
|                     'IPC connection for context is broken?\n' |                 f"{self.channel} for {self.channel.uid} was already closed?") | ||||||
|                     f'task:{cid}\n' |             return False | ||||||
|                     f'actor:{uid}' |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|             raise |  | ||||||
| 
 |  | ||||||
|         finally: |  | ||||||
|             # in the case where a runtime nursery (due to internal bug) |  | ||||||
|             # or a remote actor transmits an error we want to be |  | ||||||
|             # sure we get the error the underlying feeder mem chan. |  | ||||||
|             # if it's not raised here it *should* be raised from the |  | ||||||
|             # msg loop nursery right? |  | ||||||
|             if ctx.chan.connected(): |  | ||||||
|                 log.info( |  | ||||||
|                     'Waiting on final context-task result for\n' |  | ||||||
|                     f'task: {cid}\n' |  | ||||||
|                     f'actor: {uid}' |  | ||||||
|                 ) |  | ||||||
|                 result = await ctx.result() |  | ||||||
|                 log.runtime( |  | ||||||
|                     f'Context {fn_name} returned ' |  | ||||||
|                     f'value from callee `{result}`' |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|             # though it should be impossible for any tasks |  | ||||||
|             # operating *in* this scope to have survived |  | ||||||
|             # we tear down the runtime feeder chan last |  | ||||||
|             # to avoid premature stream clobbers. |  | ||||||
|             if ctx._recv_chan is not None: |  | ||||||
|                 # should we encapsulate this in the context api? |  | ||||||
|                 await ctx._recv_chan.aclose() |  | ||||||
| 
 |  | ||||||
|             if etype: |  | ||||||
|                 if ctx._cancel_called: |  | ||||||
|                     log.cancel( |  | ||||||
|                         f'Context {fn_name} cancelled by caller with\n{etype}' |  | ||||||
|                     ) |  | ||||||
|                 elif _err is not None: |  | ||||||
|                     log.cancel( |  | ||||||
|                         f'Context for task cancelled by callee with {etype}\n' |  | ||||||
|                         f'target: `{fn_name}`\n' |  | ||||||
|                         f'task:{cid}\n' |  | ||||||
|                         f'actor:{uid}' |  | ||||||
|                     ) |  | ||||||
|             # XXX: (MEGA IMPORTANT) if this is a root opened process we |  | ||||||
|             # wait for any immediate child in debug before popping the |  | ||||||
|             # context from the runtime msg loop otherwise inside |  | ||||||
|             # ``Actor._push_result()`` the msg will be discarded and in |  | ||||||
|             # the case where that msg is global debugger unlock (via |  | ||||||
|             # a "stop" msg for a stream), this can result in a deadlock |  | ||||||
|             # where the root is waiting on the lock to clear but the |  | ||||||
|             # child has already cleared it and clobbered IPC. |  | ||||||
|             from ._debug import maybe_wait_for_debugger |  | ||||||
|             await maybe_wait_for_debugger() |  | ||||||
| 
 |  | ||||||
|             # remove the context from runtime tracking |  | ||||||
|             self.actor._contexts.pop( |  | ||||||
|                 (self.channel.uid, ctx.cid), |  | ||||||
|                 None, |  | ||||||
|             ) |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @dataclass | @dataclass | ||||||
| class LocalPortal: | class LocalPortal: | ||||||
|     ''' |     """A 'portal' to a local ``Actor``. | ||||||
|     A 'portal' to a local ``Actor``. |  | ||||||
| 
 | 
 | ||||||
|     A compatibility shim for normal portals but for invoking functions |     A compatibility shim for normal portals but for invoking functions | ||||||
|     using an in process actor instance. |     using an in process actor instance. | ||||||
| 
 |     """ | ||||||
|     ''' |  | ||||||
|     actor: 'Actor'  # type: ignore # noqa |     actor: 'Actor'  # type: ignore # noqa | ||||||
|     channel: Channel |     channel: Channel | ||||||
| 
 | 
 | ||||||
|     async def run_from_ns(self, ns: str, func_name: str, **kwargs) -> Any: |     async def run_from_ns(self, ns: str, func_name: str, **kwargs) -> Any: | ||||||
|         ''' |         """Run a requested local function from a namespace path and | ||||||
|         Run a requested local function from a namespace path and |  | ||||||
|         return it's result. |         return it's result. | ||||||
| 
 | 
 | ||||||
|         ''' |         """ | ||||||
|         obj = self.actor if ns == 'self' else importlib.import_module(ns) |         obj = self.actor if ns == 'self' else importlib.import_module(ns) | ||||||
|         func = getattr(obj, func_name) |         func = getattr(obj, func_name) | ||||||
|  |         if inspect.iscoroutinefunction(func): | ||||||
|             return await func(**kwargs) |             return await func(**kwargs) | ||||||
|  |         else: | ||||||
|  |             return func(**kwargs) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @asynccontextmanager | @asynccontextmanager | ||||||
| async def open_portal( | async def open_portal( | ||||||
| 
 |  | ||||||
|     channel: Channel, |     channel: Channel, | ||||||
|     nursery: Optional[trio.Nursery] = None, |     nursery: Optional[trio.Nursery] = None, | ||||||
|     start_msg_loop: bool = True, |     start_msg_loop: bool = True, | ||||||
|     shield: bool = False, |     shield: bool = False, | ||||||
|  | ) -> typing.AsyncGenerator[Portal, None]: | ||||||
|  |     """Open a ``Portal`` through the provided ``channel``. | ||||||
| 
 | 
 | ||||||
| ) -> AsyncGenerator[Portal, None]: |     Spawns a background task to handle message processing. | ||||||
|     ''' |     """ | ||||||
|     Open a ``Portal`` through the provided ``channel``. |  | ||||||
| 
 |  | ||||||
|     Spawns a background task to handle message processing (normally |  | ||||||
|     done by the actor-runtime implicitly). |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     actor = current_actor() |     actor = current_actor() | ||||||
|     assert actor |     assert actor | ||||||
|     was_connected = False |     was_connected = False | ||||||
| 
 | 
 | ||||||
|     async with maybe_open_nursery(nursery, shield=shield) as nursery: |     async with maybe_open_nursery(nursery, shield=shield) as nursery: | ||||||
| 
 |  | ||||||
|         if not channel.connected(): |         if not channel.connected(): | ||||||
|             await channel.connect() |             await channel.connect() | ||||||
|             was_connected = True |             was_connected = True | ||||||
|  | @ -597,11 +437,9 @@ async def open_portal( | ||||||
| 
 | 
 | ||||||
|         msg_loop_cs: Optional[trio.CancelScope] = None |         msg_loop_cs: Optional[trio.CancelScope] = None | ||||||
|         if start_msg_loop: |         if start_msg_loop: | ||||||
|             from ._runtime import process_messages |  | ||||||
|             msg_loop_cs = await nursery.start( |             msg_loop_cs = await nursery.start( | ||||||
|                 partial( |                 partial( | ||||||
|                     process_messages, |                     actor._process_messages, | ||||||
|                     actor, |  | ||||||
|                     channel, |                     channel, | ||||||
|                     # if the local task is cancelled we want to keep |                     # if the local task is cancelled we want to keep | ||||||
|                     # the msg loop running until our block ends |                     # the msg loop running until our block ends | ||||||
|  | @ -615,9 +453,8 @@ async def open_portal( | ||||||
|             await portal.aclose() |             await portal.aclose() | ||||||
| 
 | 
 | ||||||
|             if was_connected: |             if was_connected: | ||||||
|                 # gracefully signal remote channel-msg loop |                 # cancel remote channel-msg loop | ||||||
|                 await channel.send(None) |                 await channel.send(None) | ||||||
|                 # await channel.aclose() |  | ||||||
| 
 | 
 | ||||||
|             # cancel background msg loop task |             # cancel background msg loop task | ||||||
|             if msg_loop_cs: |             if msg_loop_cs: | ||||||
|  |  | ||||||
							
								
								
									
										231
									
								
								tractor/_root.py
								
								
								
								
							
							
						
						
									
										231
									
								
								tractor/_root.py
								
								
								
								
							|  | @ -1,53 +1,27 @@ | ||||||
| # tractor: structured concurrent "actors". | """ | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| ''' |  | ||||||
| Root actor runtime ignition(s). | Root actor runtime ignition(s). | ||||||
| 
 | """ | ||||||
| ''' |  | ||||||
| from contextlib import asynccontextmanager | from contextlib import asynccontextmanager | ||||||
| from functools import partial | from functools import partial | ||||||
| import importlib | import importlib | ||||||
| import logging |  | ||||||
| import signal |  | ||||||
| import sys |  | ||||||
| import os | import os | ||||||
|  | from typing import Tuple, Optional, List, Any | ||||||
| import typing | import typing | ||||||
| import warnings | import warnings | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
| from exceptiongroup import BaseExceptionGroup |  | ||||||
| import trio | import trio | ||||||
| 
 | 
 | ||||||
| from ._runtime import ( | from ._actor import Actor, Arbiter | ||||||
|     Actor, |  | ||||||
|     Arbiter, |  | ||||||
|     async_main, |  | ||||||
| ) |  | ||||||
| from . import _debug | from . import _debug | ||||||
| from . import _spawn | from . import _spawn | ||||||
| from . import _state | from . import _state | ||||||
| from . import log | from . import log | ||||||
| from ._ipc import _connect_chan | from ._ipc import _connect_chan | ||||||
| from ._exceptions import is_multi_cancelled |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # set at startup and after forks | # set at startup and after forks | ||||||
| _default_arbiter_host: str = '127.0.0.1' | _default_arbiter_host = '127.0.0.1' | ||||||
| _default_arbiter_port: int = 1616 | _default_arbiter_port = 1616 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| logger = log.get_logger('tractor') | logger = log.get_logger('tractor') | ||||||
|  | @ -56,45 +30,37 @@ logger = log.get_logger('tractor') | ||||||
| @asynccontextmanager | @asynccontextmanager | ||||||
| async def open_root_actor( | async def open_root_actor( | ||||||
| 
 | 
 | ||||||
|     *, |  | ||||||
|     # defaults are above |     # defaults are above | ||||||
|     arbiter_addr: tuple[str, int] | None = None, |     arbiter_addr: Tuple[str, int] = ( | ||||||
|  |         _default_arbiter_host, | ||||||
|  |         _default_arbiter_port, | ||||||
|  |     ), | ||||||
| 
 | 
 | ||||||
|     # defaults are above |     name: Optional[str] = 'root', | ||||||
|     registry_addr: tuple[str, int] | None = None, |  | ||||||
| 
 |  | ||||||
|     name: str | None = 'root', |  | ||||||
| 
 | 
 | ||||||
|     # either the `multiprocessing` start method: |     # either the `multiprocessing` start method: | ||||||
|     # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods |     # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods | ||||||
|     # OR `trio` (the new default). |     # OR `trio` (the new default). | ||||||
|     start_method: _spawn.SpawnMethodKey | None = None, |     start_method: Optional[str] = None, | ||||||
| 
 | 
 | ||||||
|     # enables the multi-process debugger support |     # enables the multi-process debugger support | ||||||
|     debug_mode: bool = False, |     debug_mode: bool = False, | ||||||
| 
 | 
 | ||||||
|     # internal logging |     # internal logging | ||||||
|     loglevel: str | None = None, |     loglevel: Optional[str] = None, | ||||||
| 
 | 
 | ||||||
|     enable_modules: list | None = None, |     enable_modules: Optional[List] = None, | ||||||
|     rpc_module_paths: list | None = None, |     rpc_module_paths: Optional[List] = None, | ||||||
| 
 | 
 | ||||||
| ) -> typing.Any: | ) -> typing.Any: | ||||||
|     ''' |     """Async entry point for ``tractor``. | ||||||
|     Runtime init entry point for ``tractor``. |  | ||||||
| 
 | 
 | ||||||
|     ''' |     """ | ||||||
|     # Override the global debugger hook to make it play nice with |     # Override the global debugger hook to make it play nice with | ||||||
|     # ``trio``, see much discussion in: |     # ``trio``, see: | ||||||
|     # https://github.com/python-trio/trio/issues/1155#issuecomment-742964018 |     # https://github.com/python-trio/trio/issues/1155#issuecomment-742964018 | ||||||
|     builtin_bp_handler = sys.breakpointhook |  | ||||||
|     orig_bp_path: str | None = os.environ.get('PYTHONBREAKPOINT', None) |  | ||||||
|     os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace' |     os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace' | ||||||
| 
 | 
 | ||||||
|     # attempt to retreive ``trio``'s sigint handler and stash it |  | ||||||
|     # on our debugger lock state. |  | ||||||
|     _debug.Lock._trio_handler = signal.getsignal(signal.SIGINT) |  | ||||||
| 
 |  | ||||||
|     # mark top most level process as root actor |     # mark top most level process as root actor | ||||||
|     _state._runtime_vars['_is_root'] = True |     _state._runtime_vars['_is_root'] = True | ||||||
| 
 | 
 | ||||||
|  | @ -113,25 +79,6 @@ async def open_root_actor( | ||||||
|     if start_method is not None: |     if start_method is not None: | ||||||
|         _spawn.try_set_start_method(start_method) |         _spawn.try_set_start_method(start_method) | ||||||
| 
 | 
 | ||||||
|     if arbiter_addr is not None: |  | ||||||
|         warnings.warn( |  | ||||||
|             '`arbiter_addr` is now deprecated and has been renamed to' |  | ||||||
|             '`registry_addr`.\nUse that instead..', |  | ||||||
|             DeprecationWarning, |  | ||||||
|             stacklevel=2, |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|     registry_addr = (host, port) = ( |  | ||||||
|         registry_addr |  | ||||||
|         or arbiter_addr |  | ||||||
|         or ( |  | ||||||
|             _default_arbiter_host, |  | ||||||
|             _default_arbiter_port, |  | ||||||
|         ) |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
|     loglevel = (loglevel or log._default_loglevel).upper() |  | ||||||
| 
 |  | ||||||
|     if debug_mode and _spawn._spawn_method == 'trio': |     if debug_mode and _spawn._spawn_method == 'trio': | ||||||
|         _state._runtime_vars['_debug_mode'] = True |         _state._runtime_vars['_debug_mode'] = True | ||||||
| 
 | 
 | ||||||
|  | @ -139,41 +86,30 @@ async def open_root_actor( | ||||||
|         # for use of ``await tractor.breakpoint()`` |         # for use of ``await tractor.breakpoint()`` | ||||||
|         enable_modules.append('tractor._debug') |         enable_modules.append('tractor._debug') | ||||||
| 
 | 
 | ||||||
|         # if debug mode get's enabled *at least* use that level of |  | ||||||
|         # logging for some informative console prompts. |  | ||||||
|         if ( |  | ||||||
|             logging.getLevelName( |  | ||||||
|                 # lul, need the upper case for the -> int map? |  | ||||||
|                 # sweet "dynamic function behaviour" stdlib... |  | ||||||
|                 loglevel, |  | ||||||
|             ) > logging.getLevelName('PDB') |  | ||||||
|         ): |  | ||||||
|             loglevel = 'PDB' |  | ||||||
| 
 |  | ||||||
|     elif debug_mode: |     elif debug_mode: | ||||||
|         raise RuntimeError( |         raise RuntimeError( | ||||||
|             "Debug mode is only supported for the `trio` backend!" |             "Debug mode is only supported for the `trio` backend!" | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|  |     arbiter_addr = (host, port) = arbiter_addr or ( | ||||||
|  |         _default_arbiter_host, | ||||||
|  |         _default_arbiter_port | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     loglevel = loglevel or log.get_loglevel() | ||||||
|  |     if loglevel is not None: | ||||||
|  |         log._default_loglevel = loglevel | ||||||
|         log.get_console_log(loglevel) |         log.get_console_log(loglevel) | ||||||
| 
 | 
 | ||||||
|     try: |     # make a temporary connection to see if an arbiter exists | ||||||
|         # make a temporary connection to see if an arbiter exists, |  | ||||||
|         # if one can't be made quickly we assume none exists. |  | ||||||
|     arbiter_found = False |     arbiter_found = False | ||||||
| 
 | 
 | ||||||
|         # TODO: this connect-and-bail forces us to have to carefully |     try: | ||||||
|         # rewrap TCP 104-connection-reset errors as EOF so as to avoid |  | ||||||
|         # propagating cancel-causing errors to the channel-msg loop |  | ||||||
|         # machinery.  Likely it would be better to eventually have |  | ||||||
|         # a "discovery" protocol with basic handshake instead. |  | ||||||
|         with trio.move_on_after(1): |  | ||||||
|         async with _connect_chan(host, port): |         async with _connect_chan(host, port): | ||||||
|             arbiter_found = True |             arbiter_found = True | ||||||
| 
 | 
 | ||||||
|     except OSError: |     except OSError: | ||||||
|         # TODO: make this a "discovery" log level? |         logger.warning(f"No actor could be found @ {host}:{port}") | ||||||
|         logger.warning(f"No actor registry found @ {host}:{port}") |  | ||||||
| 
 | 
 | ||||||
|     # create a local actor and start up its main routine/task |     # create a local actor and start up its main routine/task | ||||||
|     if arbiter_found: |     if arbiter_found: | ||||||
|  | @ -183,7 +119,7 @@ async def open_root_actor( | ||||||
| 
 | 
 | ||||||
|         actor = Actor( |         actor = Actor( | ||||||
|             name or 'anonymous', |             name or 'anonymous', | ||||||
|             arbiter_addr=registry_addr, |             arbiter_addr=arbiter_addr, | ||||||
|             loglevel=loglevel, |             loglevel=loglevel, | ||||||
|             enable_modules=enable_modules, |             enable_modules=enable_modules, | ||||||
|         ) |         ) | ||||||
|  | @ -199,7 +135,7 @@ async def open_root_actor( | ||||||
| 
 | 
 | ||||||
|         actor = Arbiter( |         actor = Arbiter( | ||||||
|             name or 'arbiter', |             name or 'arbiter', | ||||||
|             arbiter_addr=registry_addr, |             arbiter_addr=arbiter_addr, | ||||||
|             loglevel=loglevel, |             loglevel=loglevel, | ||||||
|             enable_modules=enable_modules, |             enable_modules=enable_modules, | ||||||
|         ) |         ) | ||||||
|  | @ -215,14 +151,13 @@ async def open_root_actor( | ||||||
|         # start the actor runtime in a new task |         # start the actor runtime in a new task | ||||||
|         async with trio.open_nursery() as nursery: |         async with trio.open_nursery() as nursery: | ||||||
| 
 | 
 | ||||||
|             # ``_runtime.async_main()`` creates an internal nursery and |             # ``Actor._async_main()`` creates an internal nursery and | ||||||
|             # thus blocks here until the entire underlying actor tree has |             # thus blocks here until the entire underlying actor tree has | ||||||
|             # terminated thereby conducting structured concurrency. |             # terminated thereby conducting structured concurrency. | ||||||
| 
 | 
 | ||||||
|             await nursery.start( |             await nursery.start( | ||||||
|                 partial( |                 partial( | ||||||
|                     async_main, |                     actor._async_main, | ||||||
|                     actor, |  | ||||||
|                     accept_addr=(host, port), |                     accept_addr=(host, port), | ||||||
|                     parent_addr=None |                     parent_addr=None | ||||||
|                 ) |                 ) | ||||||
|  | @ -230,83 +165,77 @@ async def open_root_actor( | ||||||
|             try: |             try: | ||||||
|                 yield actor |                 yield actor | ||||||
| 
 | 
 | ||||||
|             except ( |             except (Exception, trio.MultiError) as err: | ||||||
|                 Exception, |                 logger.exception("Actor crashed:") | ||||||
|                 BaseExceptionGroup, |                 await _debug._maybe_enter_pm(err) | ||||||
|             ) as err: |  | ||||||
| 
 | 
 | ||||||
|                 entered = await _debug._maybe_enter_pm(err) |  | ||||||
| 
 |  | ||||||
|                 if not entered and not is_multi_cancelled(err): |  | ||||||
|                     logger.exception("Root actor crashed:") |  | ||||||
| 
 |  | ||||||
|                 # always re-raise |  | ||||||
|                 raise |                 raise | ||||||
| 
 |  | ||||||
|             finally: |             finally: | ||||||
|                 # NOTE: not sure if we'll ever need this but it's |                 logger.info("Shutting down root actor") | ||||||
|                 # possibly better for even more determinism? |  | ||||||
|                 # logger.cancel( |  | ||||||
|                 #     f'Waiting on {len(nurseries)} nurseries in root..') |  | ||||||
|                 # nurseries = actor._actoruid2nursery.values() |  | ||||||
|                 # async with trio.open_nursery() as tempn: |  | ||||||
|                 #     for an in nurseries: |  | ||||||
|                 #         tempn.start_soon(an.exited.wait) |  | ||||||
| 
 |  | ||||||
|                 logger.cancel("Shutting down root actor") |  | ||||||
|                 await actor.cancel() |                 await actor.cancel() | ||||||
|     finally: |     finally: | ||||||
|         _state._current_actor = None |         _state._current_actor = None | ||||||
| 
 |         logger.info("Root actor terminated") | ||||||
|         # restore breakpoint hook state |  | ||||||
|         sys.breakpointhook = builtin_bp_handler |  | ||||||
|         if orig_bp_path is not None: |  | ||||||
|             os.environ['PYTHONBREAKPOINT'] = orig_bp_path |  | ||||||
|         else: |  | ||||||
|             # clear env back to having no entry |  | ||||||
|             os.environ.pop('PYTHONBREAKPOINT') |  | ||||||
| 
 |  | ||||||
|         logger.runtime("Root actor terminated") |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def run_daemon( | def run( | ||||||
|     enable_modules: list[str], | 
 | ||||||
|  |     # target | ||||||
|  |     async_fn: typing.Callable[..., typing.Awaitable], | ||||||
|  |     *args, | ||||||
| 
 | 
 | ||||||
|     # runtime kwargs |     # runtime kwargs | ||||||
|     name: str | None = 'root', |     name: Optional[str] = 'root', | ||||||
|     registry_addr: tuple[str, int] = ( |     arbiter_addr: Tuple[str, int] = ( | ||||||
|         _default_arbiter_host, |         _default_arbiter_host, | ||||||
|         _default_arbiter_port, |         _default_arbiter_port, | ||||||
|     ), |     ), | ||||||
| 
 | 
 | ||||||
|     start_method: str | None = None, |     start_method: Optional[str] = None, | ||||||
|     debug_mode: bool = False, |     debug_mode: bool = False, | ||||||
|     **kwargs |     **kwargs, | ||||||
| 
 | 
 | ||||||
| ) -> None: | ) -> Any: | ||||||
|     ''' |     """Run a trio-actor async function in process. | ||||||
|     Spawn daemon actor which will respond to RPC; the main task simply |  | ||||||
|     starts the runtime and then sleeps forever. |  | ||||||
| 
 |  | ||||||
|     This is a very minimal convenience wrapper around starting |  | ||||||
|     a "run-until-cancelled" root actor which can be started with a set |  | ||||||
|     of enabled modules for RPC request handling. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     kwargs['enable_modules'] = list(enable_modules) |  | ||||||
| 
 |  | ||||||
|     for path in enable_modules: |  | ||||||
|         importlib.import_module(path) |  | ||||||
| 
 | 
 | ||||||
|  |     This is tractor's main entry and the start point for any async actor. | ||||||
|  |     """ | ||||||
|     async def _main(): |     async def _main(): | ||||||
| 
 | 
 | ||||||
|         async with open_root_actor( |         async with open_root_actor( | ||||||
|             registry_addr=registry_addr, |             arbiter_addr=arbiter_addr, | ||||||
|             name=name, |             name=name, | ||||||
|             start_method=start_method, |             start_method=start_method, | ||||||
|             debug_mode=debug_mode, |             debug_mode=debug_mode, | ||||||
|             **kwargs, |             **kwargs, | ||||||
|         ): |         ): | ||||||
|             return await trio.sleep_forever() |  | ||||||
| 
 | 
 | ||||||
|  |             return await async_fn(*args) | ||||||
|  | 
 | ||||||
|  |     warnings.warn( | ||||||
|  |         "`tractor.run()` is now deprecated. `tractor` now" | ||||||
|  |         " implicitly starts the root actor on first actor nursery" | ||||||
|  |         " use. If you want to start the root actor manually, use" | ||||||
|  |         " `tractor.open_root_actor()`.", | ||||||
|  |         DeprecationWarning, | ||||||
|  |         stacklevel=2, | ||||||
|  |     ) | ||||||
|     return trio.run(_main) |     return trio.run(_main) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def run_daemon( | ||||||
|  |     rpc_module_paths: List[str], | ||||||
|  |     **kwargs | ||||||
|  | ) -> None: | ||||||
|  |     """Spawn daemon actor which will respond to RPC. | ||||||
|  | 
 | ||||||
|  |     This is a convenience wrapper around | ||||||
|  |     ``tractor.run(trio.sleep(float('inf')))`` such that the first actor spawned | ||||||
|  |     is meant to run forever responding to RPC requests. | ||||||
|  |     """ | ||||||
|  |     kwargs['rpc_module_paths'] = list(rpc_module_paths) | ||||||
|  | 
 | ||||||
|  |     for path in rpc_module_paths: | ||||||
|  |         importlib.import_module(path) | ||||||
|  | 
 | ||||||
|  |     return run(partial(trio.sleep, float('inf')), **kwargs) | ||||||
|  |  | ||||||
							
								
								
									
										1760
									
								
								tractor/_runtime.py
								
								
								
								
							
							
						
						
									
										1760
									
								
								tractor/_runtime.py
								
								
								
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -1,157 +1,116 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| Machinery for actor process spawning using multiple backends. | Machinery for actor process spawning using multiple backends. | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| from __future__ import annotations |  | ||||||
| import sys | import sys | ||||||
|  | import inspect | ||||||
|  | import multiprocessing as mp | ||||||
| import platform | import platform | ||||||
| from typing import ( | from typing import Any, Dict, Optional | ||||||
|     Any, |  | ||||||
|     Awaitable, |  | ||||||
|     Literal, |  | ||||||
|     Callable, |  | ||||||
|     TypeVar, |  | ||||||
|     TYPE_CHECKING, |  | ||||||
| ) |  | ||||||
| 
 | 
 | ||||||
| from exceptiongroup import BaseExceptionGroup |  | ||||||
| import trio | import trio | ||||||
| from trio_typing import TaskStatus | from trio_typing import TaskStatus | ||||||
|  | from async_generator import aclosing, asynccontextmanager | ||||||
| 
 | 
 | ||||||
| from ._debug import ( | try: | ||||||
|     maybe_wait_for_debugger, |     from multiprocessing import semaphore_tracker  # type: ignore | ||||||
|     acquire_debug_lock, |     resource_tracker = semaphore_tracker | ||||||
| ) |     resource_tracker._resource_tracker = resource_tracker._semaphore_tracker | ||||||
| from ._state import ( | except ImportError: | ||||||
|     current_actor, |     # 3.8 introduces a more general version that also tracks shared mems | ||||||
|     is_main_process, |     from multiprocessing import resource_tracker  # type: ignore | ||||||
|     is_root_process, | 
 | ||||||
|     debug_mode, | from multiprocessing import forkserver  # type: ignore | ||||||
| ) | from typing import Tuple | ||||||
|  | 
 | ||||||
|  | from . import _forkserver_override | ||||||
|  | from ._state import current_actor, is_main_process | ||||||
| from .log import get_logger | from .log import get_logger | ||||||
| from ._portal import Portal | from ._portal import Portal | ||||||
| from ._runtime import Actor | from ._actor import Actor, ActorFailure | ||||||
| from ._entry import _mp_main | from ._entry import _mp_main | ||||||
| from ._exceptions import ActorFailure |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if TYPE_CHECKING: |  | ||||||
|     from ._supervise import ActorNursery |  | ||||||
|     import multiprocessing as mp |  | ||||||
|     ProcessType = TypeVar('ProcessType', mp.Process, trio.Process) |  | ||||||
| 
 |  | ||||||
| log = get_logger('tractor') | log = get_logger('tractor') | ||||||
| 
 | 
 | ||||||
| # placeholder for an mp start context if so using that backend | # placeholder for an mp start context if so using that backend | ||||||
| _ctx: mp.context.BaseContext | None = None | _ctx: Optional[mp.context.BaseContext] = None | ||||||
| SpawnMethodKey = Literal[ | _spawn_method: str = "spawn" | ||||||
|     'trio',  # supported on all platforms |  | ||||||
|     'mp_spawn', |  | ||||||
|     'mp_forkserver',  # posix only |  | ||||||
| ] |  | ||||||
| _spawn_method: SpawnMethodKey = 'trio' |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if platform.system() == 'Windows': | if platform.system() == 'Windows': | ||||||
| 
 |     _spawn_method = "spawn" | ||||||
|     import multiprocessing as mp |  | ||||||
|     _ctx = mp.get_context("spawn") |     _ctx = mp.get_context("spawn") | ||||||
| 
 | 
 | ||||||
|     async def proc_waiter(proc: mp.Process) -> None: |     async def proc_waiter(proc: mp.Process) -> None: | ||||||
|         await trio.lowlevel.WaitForSingleObject(proc.sentinel) |         await trio.lowlevel.WaitForSingleObject(proc.sentinel) | ||||||
| else: | else: | ||||||
|     # *NIX systems use ``trio`` primitives as our default as well |     # *NIX systems use ``trio`` primitives as our default | ||||||
|  |     _spawn_method = "trio" | ||||||
| 
 | 
 | ||||||
|     async def proc_waiter(proc: mp.Process) -> None: |     async def proc_waiter(proc: mp.Process) -> None: | ||||||
|         await trio.lowlevel.wait_readable(proc.sentinel) |         await trio.lowlevel.wait_readable(proc.sentinel) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def try_set_start_method( | def try_set_start_method(name: str) -> Optional[mp.context.BaseContext]: | ||||||
|     key: SpawnMethodKey |     """Attempt to set the method for process starting, aka the "actor | ||||||
| 
 |  | ||||||
| ) -> mp.context.BaseContext | None: |  | ||||||
|     ''' |  | ||||||
|     Attempt to set the method for process starting, aka the "actor |  | ||||||
|     spawning backend". |     spawning backend". | ||||||
| 
 | 
 | ||||||
|     If the desired method is not supported this function will error. |     If the desired method is not supported this function will error. | ||||||
|     On Windows only the ``multiprocessing`` "spawn" method is offered |     On Windows only the ``multiprocessing`` "spawn" method is offered | ||||||
|     besides the default ``trio`` which uses async wrapping around |     besides the default ``trio`` which uses async wrapping around | ||||||
|     ``subprocess.Popen``. |     ``subprocess.Popen``. | ||||||
| 
 |     """ | ||||||
|     ''' |  | ||||||
|     import multiprocessing as mp |  | ||||||
|     global _ctx |     global _ctx | ||||||
|     global _spawn_method |     global _spawn_method | ||||||
| 
 | 
 | ||||||
|     mp_methods = mp.get_all_start_methods() |     methods = mp.get_all_start_methods() | ||||||
|     if 'fork' in mp_methods: |     if 'fork' in methods: | ||||||
|         # forking is incompatible with ``trio``s global task tree |         # forking is incompatible with ``trio``s global task tree | ||||||
|         mp_methods.remove('fork') |         methods.remove('fork') | ||||||
| 
 | 
 | ||||||
|     match key: |     # supported on all platforms | ||||||
|         case 'mp_forkserver': |     methods += ['trio'] | ||||||
|             from . import _forkserver_override |  | ||||||
|             _forkserver_override.override_stdlib() |  | ||||||
|             _ctx = mp.get_context('forkserver') |  | ||||||
| 
 | 
 | ||||||
|         case 'mp_spawn': |     if name not in methods: | ||||||
|             _ctx = mp.get_context('spawn') |  | ||||||
| 
 |  | ||||||
|         case 'trio': |  | ||||||
|             _ctx = None |  | ||||||
| 
 |  | ||||||
|         case _: |  | ||||||
|         raise ValueError( |         raise ValueError( | ||||||
|                 f'Spawn method `{key}` is invalid!\n' |             f"Spawn method `{name}` is invalid please choose one of {methods}" | ||||||
|                 f'Please choose one of {SpawnMethodKey}' |  | ||||||
|         ) |         ) | ||||||
|  |     elif name == 'forkserver': | ||||||
|  |         _forkserver_override.override_stdlib() | ||||||
|  |         _ctx = mp.get_context(name) | ||||||
|  |     elif name == 'trio': | ||||||
|  |         _ctx = None | ||||||
|  |     else: | ||||||
|  |         _ctx = mp.get_context(name) | ||||||
| 
 | 
 | ||||||
|     _spawn_method = key |     _spawn_method = name | ||||||
|     return _ctx |     return _ctx | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def exhaust_portal( | async def exhaust_portal( | ||||||
| 
 |  | ||||||
|     portal: Portal, |     portal: Portal, | ||||||
|     actor: Actor |     actor: Actor | ||||||
| 
 |  | ||||||
| ) -> Any: | ) -> Any: | ||||||
|     ''' |     """Pull final result from portal (assuming it has one). | ||||||
|     Pull final result from portal (assuming it has one). |  | ||||||
| 
 | 
 | ||||||
|     If the main task is an async generator do our best to consume |     If the main task is an async generator do our best to consume | ||||||
|     what's left of it. |     what's left of it. | ||||||
|     ''' |     """ | ||||||
|     __tracebackhide__ = True |  | ||||||
|     try: |     try: | ||||||
|         log.debug(f"Waiting on final result from {actor.uid}") |         log.debug(f"Waiting on final result from {actor.uid}") | ||||||
| 
 |         final = res = await portal.result() | ||||||
|         # XXX: streams should never be reaped here since they should |         # if it's an async-gen then alert that we're cancelling it | ||||||
|         # always be established and shutdown using a context manager api |         if inspect.isasyncgen(res): | ||||||
|         final = await portal.result() |             final = [] | ||||||
| 
 |             log.warning( | ||||||
|     except ( |                 f"Blindly consuming asyncgen for {actor.uid}") | ||||||
|         Exception, |             with trio.fail_after(1): | ||||||
|         BaseExceptionGroup, |                 async with aclosing(res) as agen: | ||||||
|     ) as err: |                     async for item in agen: | ||||||
|         # we reraise in the parent task via a ``BaseExceptionGroup`` |                         log.debug(f"Consuming item {item}") | ||||||
|  |                         final.append(item) | ||||||
|  |     except (Exception, trio.MultiError) as err: | ||||||
|  |         # we reraise in the parent task via a ``trio.MultiError`` | ||||||
|         return err |         return err | ||||||
|     except trio.Cancelled as err: |     except trio.Cancelled as err: | ||||||
|         # lol, of course we need this too ;P |         # lol, of course we need this too ;P | ||||||
|  | @ -164,31 +123,29 @@ async def exhaust_portal( | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def cancel_on_completion( | async def cancel_on_completion( | ||||||
| 
 |  | ||||||
|     portal: Portal, |     portal: Portal, | ||||||
|     actor: Actor, |     actor: Actor, | ||||||
|     errors: dict[tuple[str, str], Exception], |     errors: Dict[Tuple[str, str], Exception], | ||||||
| 
 |     task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED, | ||||||
| ) -> None: | ) -> None: | ||||||
|     ''' |     """Cancel actor gracefully once it's "main" portal's | ||||||
|     Cancel actor gracefully once it's "main" portal's |  | ||||||
|     result arrives. |     result arrives. | ||||||
| 
 | 
 | ||||||
|     Should only be called for actors spawned with `run_in_actor()`. |     Should only be called for actors spawned with `run_in_actor()`. | ||||||
| 
 |     """ | ||||||
|     ''' |     with trio.CancelScope() as cs: | ||||||
|  |         task_status.started(cs) | ||||||
|         # if this call errors we store the exception for later |         # if this call errors we store the exception for later | ||||||
|         # in ``errors`` which will be reraised inside |         # in ``errors`` which will be reraised inside | ||||||
|     # an exception group and we still send out a cancel request |         # a MultiError and we still send out a cancel request | ||||||
|         result = await exhaust_portal(portal, actor) |         result = await exhaust_portal(portal, actor) | ||||||
|         if isinstance(result, Exception): |         if isinstance(result, Exception): | ||||||
|             errors[actor.uid] = result |             errors[actor.uid] = result | ||||||
|             log.warning( |             log.warning( | ||||||
|                 f"Cancelling {portal.channel.uid} after error {result}" |                 f"Cancelling {portal.channel.uid} after error {result}" | ||||||
|             ) |             ) | ||||||
| 
 |  | ||||||
|         else: |         else: | ||||||
|         log.runtime( |             log.info( | ||||||
|                 f"Cancelling {portal.channel.uid} gracefully " |                 f"Cancelling {portal.channel.uid} gracefully " | ||||||
|                 f"after result {result}") |                 f"after result {result}") | ||||||
| 
 | 
 | ||||||
|  | @ -196,158 +153,11 @@ async def cancel_on_completion( | ||||||
|         await portal.cancel_actor() |         await portal.cancel_actor() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def do_hard_kill( | @asynccontextmanager | ||||||
|     proc: trio.Process, | async def spawn_subactor( | ||||||
|     terminate_after: int = 3, |     subactor: 'Actor', | ||||||
| 
 |     parent_addr: Tuple[str, int], | ||||||
| ) -> None: | ): | ||||||
|     # NOTE: this timeout used to do nothing since we were shielding |  | ||||||
|     # the ``.wait()`` inside ``new_proc()`` which will pretty much |  | ||||||
|     # never release until the process exits, now it acts as |  | ||||||
|     # a hard-kill time ultimatum. |  | ||||||
|     log.debug(f"Terminating {proc}") |  | ||||||
|     with trio.move_on_after(terminate_after) as cs: |  | ||||||
| 
 |  | ||||||
|         # NOTE: code below was copied verbatim from the now deprecated |  | ||||||
|         # (in 0.20.0) ``trio._subrocess.Process.aclose()``, orig doc |  | ||||||
|         # string: |  | ||||||
|         # |  | ||||||
|         # Close any pipes we have to the process (both input and output) |  | ||||||
|         # and wait for it to exit. If cancelled, kills the process and |  | ||||||
|         # waits for it to finish exiting before propagating the |  | ||||||
|         # cancellation. |  | ||||||
|         with trio.CancelScope(shield=True): |  | ||||||
|             if proc.stdin is not None: |  | ||||||
|                 await proc.stdin.aclose() |  | ||||||
|             if proc.stdout is not None: |  | ||||||
|                 await proc.stdout.aclose() |  | ||||||
|             if proc.stderr is not None: |  | ||||||
|                 await proc.stderr.aclose() |  | ||||||
|         try: |  | ||||||
|             await proc.wait() |  | ||||||
|         finally: |  | ||||||
|             if proc.returncode is None: |  | ||||||
|                 proc.kill() |  | ||||||
|                 with trio.CancelScope(shield=True): |  | ||||||
|                     await proc.wait() |  | ||||||
| 
 |  | ||||||
|     if cs.cancelled_caught: |  | ||||||
|         # XXX: should pretty much never get here unless we have |  | ||||||
|         # to move the bits from ``proc.__aexit__()`` out and |  | ||||||
|         # into here. |  | ||||||
|         log.critical(f"#ZOMBIE_LORD_IS_HERE: {proc}") |  | ||||||
|         proc.kill() |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def soft_wait( |  | ||||||
| 
 |  | ||||||
|     proc: ProcessType, |  | ||||||
|     wait_func: Callable[ |  | ||||||
|         [ProcessType], |  | ||||||
|         Awaitable, |  | ||||||
|     ], |  | ||||||
|     portal: Portal, |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     # Wait for proc termination but **dont' yet** call |  | ||||||
|     # ``trio.Process.__aexit__()`` (it tears down stdio |  | ||||||
|     # which will kill any waiting remote pdb trace). |  | ||||||
|     # This is a "soft" (cancellable) join/reap. |  | ||||||
|     uid = portal.channel.uid |  | ||||||
|     try: |  | ||||||
|         log.cancel(f'Soft waiting on actor:\n{uid}') |  | ||||||
|         await wait_func(proc) |  | ||||||
|     except trio.Cancelled: |  | ||||||
|         # if cancelled during a soft wait, cancel the child |  | ||||||
|         # actor before entering the hard reap sequence |  | ||||||
|         # below. This means we try to do a graceful teardown |  | ||||||
|         # via sending a cancel message before getting out |  | ||||||
|         # zombie killing tools. |  | ||||||
|         async with trio.open_nursery() as n: |  | ||||||
|             n.cancel_scope.shield = True |  | ||||||
| 
 |  | ||||||
|             async def cancel_on_proc_deth(): |  | ||||||
|                 ''' |  | ||||||
|                 Cancel the actor cancel request if we detect that |  | ||||||
|                 that the process terminated. |  | ||||||
| 
 |  | ||||||
|                 ''' |  | ||||||
|                 await wait_func(proc) |  | ||||||
|                 n.cancel_scope.cancel() |  | ||||||
| 
 |  | ||||||
|             n.start_soon(cancel_on_proc_deth) |  | ||||||
|             await portal.cancel_actor() |  | ||||||
| 
 |  | ||||||
|             if proc.poll() is None:  # type: ignore |  | ||||||
|                 log.warning( |  | ||||||
|                     'Actor still alive after cancel request:\n' |  | ||||||
|                     f'{uid}' |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|                 n.cancel_scope.cancel() |  | ||||||
|         raise |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def new_proc( |  | ||||||
|     name: str, |  | ||||||
|     actor_nursery: ActorNursery, |  | ||||||
|     subactor: Actor, |  | ||||||
|     errors: dict[tuple[str, str], Exception], |  | ||||||
| 
 |  | ||||||
|     # passed through to actor main |  | ||||||
|     bind_addr: tuple[str, int], |  | ||||||
|     parent_addr: tuple[str, int], |  | ||||||
|     _runtime_vars: dict[str, Any],  # serialized and sent to _child |  | ||||||
| 
 |  | ||||||
|     *, |  | ||||||
| 
 |  | ||||||
|     infect_asyncio: bool = False, |  | ||||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     # lookup backend spawning target |  | ||||||
|     target = _methods[_spawn_method] |  | ||||||
| 
 |  | ||||||
|     # mark the new actor with the global spawn method |  | ||||||
|     subactor._spawn_method = _spawn_method |  | ||||||
| 
 |  | ||||||
|     await target( |  | ||||||
|         name, |  | ||||||
|         actor_nursery, |  | ||||||
|         subactor, |  | ||||||
|         errors, |  | ||||||
|         bind_addr, |  | ||||||
|         parent_addr, |  | ||||||
|         _runtime_vars,  # run time vars |  | ||||||
|         infect_asyncio=infect_asyncio, |  | ||||||
|         task_status=task_status, |  | ||||||
|     ) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def trio_proc( |  | ||||||
|     name: str, |  | ||||||
|     actor_nursery: ActorNursery, |  | ||||||
|     subactor: Actor, |  | ||||||
|     errors: dict[tuple[str, str], Exception], |  | ||||||
| 
 |  | ||||||
|     # passed through to actor main |  | ||||||
|     bind_addr: tuple[str, int], |  | ||||||
|     parent_addr: tuple[str, int], |  | ||||||
|     _runtime_vars: dict[str, Any],  # serialized and sent to _child |  | ||||||
|     *, |  | ||||||
|     infect_asyncio: bool = False, |  | ||||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
|     ''' |  | ||||||
|     Create a new ``Process`` using a "spawn method" as (configured using |  | ||||||
|     ``try_set_start_method()``). |  | ||||||
| 
 |  | ||||||
|     This routine should be started in a actor runtime task and the logic |  | ||||||
|     here is to be considered the core supervision strategy. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     spawn_cmd = [ |     spawn_cmd = [ | ||||||
|         sys.executable, |         sys.executable, | ||||||
|         "-m", |         "-m", | ||||||
|  | @ -370,51 +180,69 @@ async def trio_proc( | ||||||
|             "--loglevel", |             "--loglevel", | ||||||
|             subactor.loglevel |             subactor.loglevel | ||||||
|         ] |         ] | ||||||
|     # Tell child to run in guest mode on top of ``asyncio`` loop |  | ||||||
|     if infect_asyncio: |  | ||||||
|         spawn_cmd.append("--asyncio") |  | ||||||
| 
 | 
 | ||||||
|     cancelled_during_spawn: bool = False |     proc = await trio.open_process(spawn_cmd) | ||||||
|     proc: trio.Process | None = None |  | ||||||
|     try: |     try: | ||||||
|         try: |         yield proc | ||||||
|             # TODO: needs ``trio_typing`` patch? |     finally: | ||||||
|             proc = await trio.lowlevel.open_process(spawn_cmd) |         # XXX: do this **after** cancellation/tearfown | ||||||
|  |         # to avoid killing the process too early | ||||||
|  |         # since trio does this internally on ``__aexit__()`` | ||||||
| 
 | 
 | ||||||
|             log.runtime(f"Started {proc}") |         # NOTE: we always "shield" join sub procs in | ||||||
|  |         # the outer scope since no actor zombies are | ||||||
|  |         # ever allowed. This ``__aexit__()`` also shields | ||||||
|  |         # internally. | ||||||
|  |         log.debug(f"Attempting to kill {proc}") | ||||||
|  | 
 | ||||||
|  |         # NOTE: this timeout effectively does nothing right now since | ||||||
|  |         # we are shielding the ``.wait()`` inside ``new_proc()`` which | ||||||
|  |         # will pretty much never release until the process exits. | ||||||
|  |         with trio.move_on_after(3) as cs: | ||||||
|  |             async with proc: | ||||||
|  |                 log.debug(f"Terminating {proc}") | ||||||
|  |         if cs.cancelled_caught: | ||||||
|  |             log.critical(f"HARD KILLING {proc}") | ||||||
|  |             proc.kill() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | async def new_proc( | ||||||
|  |     name: str, | ||||||
|  |     actor_nursery: 'ActorNursery',  # type: ignore | ||||||
|  |     subactor: Actor, | ||||||
|  |     errors: Dict[Tuple[str, str], Exception], | ||||||
|  |     # passed through to actor main | ||||||
|  |     bind_addr: Tuple[str, int], | ||||||
|  |     parent_addr: Tuple[str, int], | ||||||
|  |     _runtime_vars: Dict[str, Any],  # serialized and sent to _child | ||||||
|  |     *, | ||||||
|  |     use_trio_run_in_process: bool = False, | ||||||
|  |     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||||
|  | ) -> None: | ||||||
|  |     """Create a new ``multiprocessing.Process`` using the | ||||||
|  |     spawn method as configured using ``try_set_start_method()``. | ||||||
|  |     """ | ||||||
|  |     cancel_scope = None | ||||||
|  | 
 | ||||||
|  |     # mark the new actor with the global spawn method | ||||||
|  |     subactor._spawn_method = _spawn_method | ||||||
|  | 
 | ||||||
|  |     async with trio.open_nursery() as nursery: | ||||||
|  |         if use_trio_run_in_process or _spawn_method == 'trio': | ||||||
|  |             async with spawn_subactor( | ||||||
|  |                 subactor, | ||||||
|  |                 parent_addr, | ||||||
|  |             ) as proc: | ||||||
|  |                 log.info(f"Started {proc}") | ||||||
| 
 | 
 | ||||||
|                 # wait for actor to spawn and connect back to us |                 # wait for actor to spawn and connect back to us | ||||||
|                 # channel should have handshake completed by the |                 # channel should have handshake completed by the | ||||||
|                 # local actor by the time we get a ref to it |                 # local actor by the time we get a ref to it | ||||||
|                 event, chan = await actor_nursery._actor.wait_for_peer( |                 event, chan = await actor_nursery._actor.wait_for_peer( | ||||||
|                     subactor.uid) |                     subactor.uid) | ||||||
| 
 |  | ||||||
|         except trio.Cancelled: |  | ||||||
|             cancelled_during_spawn = True |  | ||||||
|             # we may cancel before the child connects back in which |  | ||||||
|             # case avoid clobbering the pdb tty. |  | ||||||
|             if debug_mode(): |  | ||||||
|                 with trio.CancelScope(shield=True): |  | ||||||
|                     # don't clobber an ongoing pdb |  | ||||||
|                     if is_root_process(): |  | ||||||
|                         await maybe_wait_for_debugger() |  | ||||||
| 
 |  | ||||||
|                     elif proc is not None: |  | ||||||
|                         async with acquire_debug_lock(subactor.uid): |  | ||||||
|                             # soft wait on the proc to terminate |  | ||||||
|                             with trio.move_on_after(0.5): |  | ||||||
|                                 await proc.wait() |  | ||||||
|             raise |  | ||||||
| 
 |  | ||||||
|         # a sub-proc ref **must** exist now |  | ||||||
|         assert proc |  | ||||||
| 
 |  | ||||||
|                 portal = Portal(chan) |                 portal = Portal(chan) | ||||||
|                 actor_nursery._children[subactor.uid] = ( |                 actor_nursery._children[subactor.uid] = ( | ||||||
|             subactor, |                     subactor, proc, portal) | ||||||
|             proc, |  | ||||||
|             portal, |  | ||||||
|         ) |  | ||||||
| 
 | 
 | ||||||
|                 # send additional init params |                 # send additional init params | ||||||
|                 await chan.send({ |                 await chan.send({ | ||||||
|  | @ -437,102 +265,27 @@ async def trio_proc( | ||||||
|                 with trio.CancelScope(shield=True): |                 with trio.CancelScope(shield=True): | ||||||
|                     await actor_nursery._join_procs.wait() |                     await actor_nursery._join_procs.wait() | ||||||
| 
 | 
 | ||||||
|         async with trio.open_nursery() as nursery: |  | ||||||
|                 if portal in actor_nursery._cancel_after_result_on_exit: |                 if portal in actor_nursery._cancel_after_result_on_exit: | ||||||
|                 nursery.start_soon( |                     cancel_scope = await nursery.start( | ||||||
|                     cancel_on_completion, |                         cancel_on_completion, portal, subactor, errors) | ||||||
|                     portal, |  | ||||||
|                     subactor, |  | ||||||
|                     errors |  | ||||||
|                 ) |  | ||||||
| 
 | 
 | ||||||
|             # This is a "soft" (cancellable) join/reap which |                 # Wait for proc termination but **dont' yet** call | ||||||
|             # will remote cancel the actor on a ``trio.Cancelled`` |                 # ``trio.Process.__aexit__()`` (it tears down stdio | ||||||
|             # condition. |                 # which will kill any waiting remote pdb trace). | ||||||
|             await soft_wait( |  | ||||||
|                 proc, |  | ||||||
|                 trio.Process.wait, |  | ||||||
|                 portal |  | ||||||
|             ) |  | ||||||
| 
 | 
 | ||||||
|             # cancel result waiter that may have been spawned in |                 # TODO: No idea how we can enforce zombie | ||||||
|             # tandem if not done already |                 # reaping more stringently without the shield | ||||||
|             log.warning( |                 # we used to have below... | ||||||
|                 "Cancelling existing result waiter task for " |  | ||||||
|                 f"{subactor.uid}") |  | ||||||
|             nursery.cancel_scope.cancel() |  | ||||||
| 
 | 
 | ||||||
|     finally: |                 # always "hard" join sub procs: | ||||||
|         # XXX NOTE XXX: The "hard" reap since no actor zombies are |                 # no actor zombies allowed | ||||||
|         # allowed! Do this **after** cancellation/teardown to avoid |                 # with trio.CancelScope(shield=True): | ||||||
|         # killing the process too early. |  | ||||||
|         if proc: |  | ||||||
|             log.cancel(f'Hard reap sequence starting for {subactor.uid}') |  | ||||||
|             with trio.CancelScope(shield=True): |  | ||||||
| 
 |  | ||||||
|                 # don't clobber an ongoing pdb |  | ||||||
|                 if cancelled_during_spawn: |  | ||||||
|                     # Try again to avoid TTY clobbering. |  | ||||||
|                     async with acquire_debug_lock(subactor.uid): |  | ||||||
|                         with trio.move_on_after(0.5): |  | ||||||
|                 await proc.wait() |                 await proc.wait() | ||||||
| 
 |  | ||||||
|                 if is_root_process(): |  | ||||||
|                     # TODO: solve the following issue where we need |  | ||||||
|                     # to do a similar wait like this but in an |  | ||||||
|                     # "intermediary" parent actor that itself isn't |  | ||||||
|                     # in debug but has a child that is, and we need |  | ||||||
|                     # to hold off on relaying SIGINT until that child |  | ||||||
|                     # is complete. |  | ||||||
|                     # https://github.com/goodboy/tractor/issues/320 |  | ||||||
|                     await maybe_wait_for_debugger( |  | ||||||
|                         child_in_debug=_runtime_vars.get( |  | ||||||
|                             '_debug_mode', False), |  | ||||||
|                     ) |  | ||||||
| 
 |  | ||||||
|                 if proc.poll() is None: |  | ||||||
|                     log.cancel(f"Attempting to hard kill {proc}") |  | ||||||
|                     await do_hard_kill(proc) |  | ||||||
| 
 |  | ||||||
|                 log.debug(f"Joined {proc}") |  | ||||||
|         else: |         else: | ||||||
|             log.warning('Nursery cancelled before sub-proc started') |             # `multiprocessing` | ||||||
| 
 |  | ||||||
|         if not cancelled_during_spawn: |  | ||||||
|             # pop child entry to indicate we no longer managing this |  | ||||||
|             # subactor |  | ||||||
|             actor_nursery._children.pop(subactor.uid) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| async def mp_proc( |  | ||||||
|     name: str, |  | ||||||
|     actor_nursery: ActorNursery,  # type: ignore  # noqa |  | ||||||
|     subactor: Actor, |  | ||||||
|     errors: dict[tuple[str, str], Exception], |  | ||||||
|     # passed through to actor main |  | ||||||
|     bind_addr: tuple[str, int], |  | ||||||
|     parent_addr: tuple[str, int], |  | ||||||
|     _runtime_vars: dict[str, Any],  # serialized and sent to _child |  | ||||||
|     *, |  | ||||||
|     infect_asyncio: bool = False, |  | ||||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED |  | ||||||
| 
 |  | ||||||
| ) -> None: |  | ||||||
| 
 |  | ||||||
|     # uggh zone |  | ||||||
|     try: |  | ||||||
|         from multiprocessing import semaphore_tracker  # type: ignore |  | ||||||
|         resource_tracker = semaphore_tracker |  | ||||||
|         resource_tracker._resource_tracker = resource_tracker._semaphore_tracker  # noqa |  | ||||||
|     except ImportError: |  | ||||||
|         # 3.8 introduces a more general version that also tracks shared mems |  | ||||||
|         from multiprocessing import resource_tracker  # type: ignore |  | ||||||
| 
 |  | ||||||
|             assert _ctx |             assert _ctx | ||||||
|             start_method = _ctx.get_start_method() |             start_method = _ctx.get_start_method() | ||||||
|             if start_method == 'forkserver': |             if start_method == 'forkserver': | ||||||
| 
 |  | ||||||
|         from multiprocessing import forkserver  # type: ignore |  | ||||||
|                 # XXX do our hackery on the stdlib to avoid multiple |                 # XXX do our hackery on the stdlib to avoid multiple | ||||||
|                 # forkservers (one at each subproc layer). |                 # forkservers (one at each subproc layer). | ||||||
|                 fs = forkserver._forkserver |                 fs = forkserver._forkserver | ||||||
|  | @ -544,40 +297,37 @@ async def mp_proc( | ||||||
|                     # forkserver.set_forkserver_preload(enable_modules) |                     # forkserver.set_forkserver_preload(enable_modules) | ||||||
|                     forkserver.ensure_running() |                     forkserver.ensure_running() | ||||||
|                     fs_info = ( |                     fs_info = ( | ||||||
|                 fs._forkserver_address,  # type: ignore  # noqa |                         fs._forkserver_address, | ||||||
|                 fs._forkserver_alive_fd,  # type: ignore  # noqa |                         fs._forkserver_alive_fd, | ||||||
|                         getattr(fs, '_forkserver_pid', None), |                         getattr(fs, '_forkserver_pid', None), | ||||||
|                         getattr( |                         getattr( | ||||||
|                             resource_tracker._resource_tracker, '_pid', None), |                             resource_tracker._resource_tracker, '_pid', None), | ||||||
|                         resource_tracker._resource_tracker._fd, |                         resource_tracker._resource_tracker._fd, | ||||||
|                     ) |                     ) | ||||||
|         else:  # request to forkerserver to fork a new child |                 else: | ||||||
|                     assert curr_actor._forkserver_info |                     assert curr_actor._forkserver_info | ||||||
|                     fs_info = ( |                     fs_info = ( | ||||||
|                 fs._forkserver_address,  # type: ignore  # noqa |                         fs._forkserver_address, | ||||||
|                 fs._forkserver_alive_fd,  # type: ignore  # noqa |                         fs._forkserver_alive_fd, | ||||||
|                 fs._forkserver_pid,  # type: ignore  # noqa |                         fs._forkserver_pid, | ||||||
|                         resource_tracker._resource_tracker._pid, |                         resource_tracker._resource_tracker._pid, | ||||||
|                         resource_tracker._resource_tracker._fd, |                         resource_tracker._resource_tracker._fd, | ||||||
|                      ) = curr_actor._forkserver_info |                      ) = curr_actor._forkserver_info | ||||||
|             else: |             else: | ||||||
|         # spawn method |  | ||||||
|                 fs_info = (None, None, None, None, None) |                 fs_info = (None, None, None, None, None) | ||||||
| 
 | 
 | ||||||
|     proc: mp.Process = _ctx.Process(  # type: ignore |             proc = _ctx.Process(  # type: ignore | ||||||
|                 target=_mp_main, |                 target=_mp_main, | ||||||
|                 args=( |                 args=( | ||||||
|                     subactor, |                     subactor, | ||||||
|                     bind_addr, |                     bind_addr, | ||||||
|                     fs_info, |                     fs_info, | ||||||
|             _spawn_method, |                     start_method, | ||||||
|                     parent_addr, |                     parent_addr, | ||||||
|             infect_asyncio, |  | ||||||
|                 ), |                 ), | ||||||
|                 # daemon=True, |                 # daemon=True, | ||||||
|                 name=name, |                 name=name, | ||||||
|             ) |             ) | ||||||
| 
 |  | ||||||
|             # `multiprocessing` only (since no async interface): |             # `multiprocessing` only (since no async interface): | ||||||
|             # register the process before start in case we get a cancel |             # register the process before start in case we get a cancel | ||||||
|             # request before the actor has fully spawned - then we can wait |             # request before the actor has fully spawned - then we can wait | ||||||
|  | @ -588,24 +338,13 @@ async def mp_proc( | ||||||
|             if not proc.is_alive(): |             if not proc.is_alive(): | ||||||
|                 raise ActorFailure("Couldn't start sub-actor?") |                 raise ActorFailure("Couldn't start sub-actor?") | ||||||
| 
 | 
 | ||||||
|     log.runtime(f"Started {proc}") |             log.info(f"Started {proc}") | ||||||
| 
 | 
 | ||||||
|     try: |  | ||||||
|             # wait for actor to spawn and connect back to us |             # wait for actor to spawn and connect back to us | ||||||
|             # channel should have handshake completed by the |             # channel should have handshake completed by the | ||||||
|             # local actor by the time we get a ref to it |             # local actor by the time we get a ref to it | ||||||
|             event, chan = await actor_nursery._actor.wait_for_peer( |             event, chan = await actor_nursery._actor.wait_for_peer( | ||||||
|                 subactor.uid) |                 subactor.uid) | ||||||
| 
 |  | ||||||
|         # XXX: monkey patch poll API to match the ``subprocess`` API.. |  | ||||||
|         # not sure why they don't expose this but kk. |  | ||||||
|         proc.poll = lambda: proc.exitcode  # type: ignore |  | ||||||
| 
 |  | ||||||
|     # except: |  | ||||||
|         # TODO: in the case we were cancelled before the sub-proc |  | ||||||
|         # registered itself back we must be sure to try and clean |  | ||||||
|         # any process we may have started. |  | ||||||
| 
 |  | ||||||
|             portal = Portal(chan) |             portal = Portal(chan) | ||||||
|             actor_nursery._children[subactor.uid] = (subactor, proc, portal) |             actor_nursery._children[subactor.uid] = (subactor, proc, portal) | ||||||
| 
 | 
 | ||||||
|  | @ -621,59 +360,25 @@ async def mp_proc( | ||||||
|             # while user code is still doing it's thing. Only after the |             # while user code is still doing it's thing. Only after the | ||||||
|             # nursery block closes do we allow subactor results to be |             # nursery block closes do we allow subactor results to be | ||||||
|             # awaited and reported upwards to the supervisor. |             # awaited and reported upwards to the supervisor. | ||||||
|         with trio.CancelScope(shield=True): |  | ||||||
|             await actor_nursery._join_procs.wait() |             await actor_nursery._join_procs.wait() | ||||||
| 
 | 
 | ||||||
|         async with trio.open_nursery() as nursery: |  | ||||||
|             if portal in actor_nursery._cancel_after_result_on_exit: |             if portal in actor_nursery._cancel_after_result_on_exit: | ||||||
|                 nursery.start_soon( |                 cancel_scope = await nursery.start( | ||||||
|                     cancel_on_completion, |                     cancel_on_completion, portal, subactor, errors) | ||||||
|                     portal, |  | ||||||
|                     subactor, |  | ||||||
|                     errors |  | ||||||
|                 ) |  | ||||||
| 
 | 
 | ||||||
|             # This is a "soft" (cancellable) join/reap which |             # TODO: timeout block here? | ||||||
|             # will remote cancel the actor on a ``trio.Cancelled`` |             if proc.is_alive(): | ||||||
|             # condition. |                 await proc_waiter(proc) | ||||||
|             await soft_wait( |             proc.join() | ||||||
|                 proc, |  | ||||||
|                 proc_waiter, |  | ||||||
|                 portal |  | ||||||
|             ) |  | ||||||
| 
 | 
 | ||||||
|  |         # This is again common logic for all backends: | ||||||
|  | 
 | ||||||
|  |         log.debug(f"Joined {proc}") | ||||||
|  |         # pop child entry to indicate we are no longer managing this subactor | ||||||
|  |         subactor, proc, portal = actor_nursery._children.pop(subactor.uid) | ||||||
|         # cancel result waiter that may have been spawned in |         # cancel result waiter that may have been spawned in | ||||||
|         # tandem if not done already |         # tandem if not done already | ||||||
|  |         if cancel_scope: | ||||||
|             log.warning( |             log.warning( | ||||||
|                 "Cancelling existing result waiter task for " |                 f"Cancelling existing result waiter task for {subactor.uid}") | ||||||
|                 f"{subactor.uid}") |             cancel_scope.cancel() | ||||||
|             nursery.cancel_scope.cancel() |  | ||||||
| 
 |  | ||||||
|     finally: |  | ||||||
|         # hard reap sequence |  | ||||||
|         if proc.is_alive(): |  | ||||||
|             log.cancel(f"Attempting to hard kill {proc}") |  | ||||||
|             with trio.move_on_after(0.1) as cs: |  | ||||||
|                 cs.shield = True |  | ||||||
|                 await proc_waiter(proc) |  | ||||||
| 
 |  | ||||||
|             if cs.cancelled_caught: |  | ||||||
|                 proc.terminate() |  | ||||||
| 
 |  | ||||||
|         proc.join() |  | ||||||
|         log.debug(f"Joined {proc}") |  | ||||||
| 
 |  | ||||||
|         # pop child entry to indicate we are no longer managing subactor |  | ||||||
|         actor_nursery._children.pop(subactor.uid) |  | ||||||
| 
 |  | ||||||
|         # TODO: prolly report to ``mypy`` how this causes all sorts of |  | ||||||
|         # false errors.. |  | ||||||
|         # subactor, proc, portal = actor_nursery._children.pop(subactor.uid) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # proc spawning backend target map |  | ||||||
| _methods: dict[SpawnMethodKey, Callable] = { |  | ||||||
|     'trio': trio_proc, |  | ||||||
|     'mp_spawn': mp_proc, |  | ||||||
|     'mp_forkserver': mp_proc, |  | ||||||
| } |  | ||||||
|  |  | ||||||
|  | @ -1,35 +1,14 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| Per process state | Per process state | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| from typing import ( | from typing import Optional, Dict, Any | ||||||
|     Optional, | from collections.abc import Mapping | ||||||
|     Any, | import multiprocessing as mp | ||||||
| ) |  | ||||||
| 
 | 
 | ||||||
| import trio | import trio | ||||||
| 
 | 
 | ||||||
| from ._exceptions import NoRuntime |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| _current_actor: Optional['Actor'] = None  # type: ignore # noqa | _current_actor: Optional['Actor'] = None  # type: ignore # noqa | ||||||
| _runtime_vars: dict[str, Any] = { | _runtime_vars: Dict[str, Any] = { | ||||||
|     '_debug_mode': False, |     '_debug_mode': False, | ||||||
|     '_is_root': False, |     '_is_root': False, | ||||||
|     '_root_mailbox': (None, None) |     '_root_mailbox': (None, None) | ||||||
|  | @ -40,15 +19,38 @@ def current_actor(err_on_no_runtime: bool = True) -> 'Actor':  # type: ignore # | ||||||
|     """Get the process-local actor instance. |     """Get the process-local actor instance. | ||||||
|     """ |     """ | ||||||
|     if _current_actor is None and err_on_no_runtime: |     if _current_actor is None and err_on_no_runtime: | ||||||
|         raise NoRuntime("No local actor has been initialized yet") |         raise RuntimeError("No local actor has been initialized yet") | ||||||
| 
 | 
 | ||||||
|     return _current_actor |     return _current_actor | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | _conc_name_getters = { | ||||||
|  |     'task': trio.lowlevel.current_task, | ||||||
|  |     'actor': current_actor | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class ActorContextInfo(Mapping): | ||||||
|  |     "Dyanmic lookup for local actor and task names" | ||||||
|  |     _context_keys = ('task', 'actor') | ||||||
|  | 
 | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self._context_keys) | ||||||
|  | 
 | ||||||
|  |     def __iter__(self): | ||||||
|  |         return iter(self._context_keys) | ||||||
|  | 
 | ||||||
|  |     def __getitem__(self, key: str) -> str: | ||||||
|  |         try: | ||||||
|  |             return _conc_name_getters[key]().name  # type: ignore | ||||||
|  |         except RuntimeError: | ||||||
|  |             # no local actor/task context initialized yet | ||||||
|  |             return f'no {key} context' | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| def is_main_process() -> bool: | def is_main_process() -> bool: | ||||||
|     """Bool determining if this actor is running in the top-most process. |     """Bool determining if this actor is running in the top-most process. | ||||||
|     """ |     """ | ||||||
|     import multiprocessing as mp |  | ||||||
|     return mp.current_process().name == 'MainProcess' |     return mp.current_process().name == 'MainProcess' | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -1,755 +1,49 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ |  | ||||||
| Message stream types and APIs. |  | ||||||
| 
 |  | ||||||
| """ |  | ||||||
| from __future__ import annotations |  | ||||||
| import inspect | import inspect | ||||||
| from contextlib import asynccontextmanager | from contextvars import ContextVar | ||||||
| from dataclasses import dataclass | from dataclasses import dataclass | ||||||
| from typing import ( | from typing import Any | ||||||
|     Any, |  | ||||||
|     Optional, |  | ||||||
|     Callable, |  | ||||||
|     AsyncGenerator, |  | ||||||
|     AsyncIterator |  | ||||||
| ) |  | ||||||
| 
 |  | ||||||
| import warnings |  | ||||||
| 
 | 
 | ||||||
| import trio | import trio | ||||||
| 
 | 
 | ||||||
| from ._ipc import Channel | from ._ipc import Channel | ||||||
| from ._exceptions import unpack_error, ContextCancelled |  | ||||||
| from ._state import current_actor |  | ||||||
| from .log import get_logger |  | ||||||
| from .trionics import broadcast_receiver, BroadcastReceiver |  | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| log = get_logger(__name__) | _context: ContextVar['Context'] = ContextVar('context') | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # TODO: the list | @dataclass(frozen=True) | ||||||
| # - generic typing like trio's receive channel but with msgspec |  | ||||||
| #   messages? class ReceiveChannel(AsyncResource, Generic[ReceiveType]): |  | ||||||
| # - use __slots__ on ``Context``? |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| class MsgStream(trio.abc.Channel): |  | ||||||
|     ''' |  | ||||||
|     A bidirectional message stream for receiving logically sequenced |  | ||||||
|     values over an inter-actor IPC ``Channel``. |  | ||||||
| 
 |  | ||||||
|     This is the type returned to a local task which entered either |  | ||||||
|     ``Portal.open_stream_from()`` or ``Context.open_stream()``. |  | ||||||
| 
 |  | ||||||
|     Termination rules: |  | ||||||
| 
 |  | ||||||
|     - on cancellation the stream is **not** implicitly closed and the |  | ||||||
|       surrounding ``Context`` is expected to handle how that cancel |  | ||||||
|       is relayed to any task on the remote side. |  | ||||||
|     - if the remote task signals the end of a stream the |  | ||||||
|       ``ReceiveChannel`` semantics dictate that a ``StopAsyncIteration`` |  | ||||||
|       to terminate the local ``async for``. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     def __init__( |  | ||||||
|         self, |  | ||||||
|         ctx: 'Context',  # typing: ignore # noqa |  | ||||||
|         rx_chan: trio.MemoryReceiveChannel, |  | ||||||
|         _broadcaster: Optional[BroadcastReceiver] = None, |  | ||||||
| 
 |  | ||||||
|     ) -> None: |  | ||||||
|         self._ctx = ctx |  | ||||||
|         self._rx_chan = rx_chan |  | ||||||
|         self._broadcaster = _broadcaster |  | ||||||
| 
 |  | ||||||
|         # flag to denote end of stream |  | ||||||
|         self._eoc: bool = False |  | ||||||
|         self._closed: bool = False |  | ||||||
| 
 |  | ||||||
|     # delegate directly to underlying mem channel |  | ||||||
|     def receive_nowait(self): |  | ||||||
|         msg = self._rx_chan.receive_nowait() |  | ||||||
|         return msg['yield'] |  | ||||||
| 
 |  | ||||||
|     async def receive(self): |  | ||||||
|         '''Async receive a single msg from the IPC transport, the next |  | ||||||
|         in sequence for this stream. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         # see ``.aclose()`` for notes on the old behaviour prior to |  | ||||||
|         # introducing this |  | ||||||
|         if self._eoc: |  | ||||||
|             raise trio.EndOfChannel |  | ||||||
| 
 |  | ||||||
|         if self._closed: |  | ||||||
|             raise trio.ClosedResourceError('This stream was closed') |  | ||||||
| 
 |  | ||||||
|         try: |  | ||||||
|             msg = await self._rx_chan.receive() |  | ||||||
|             return msg['yield'] |  | ||||||
| 
 |  | ||||||
|         except KeyError as err: |  | ||||||
|             # internal error should never get here |  | ||||||
|             assert msg.get('cid'), ("Received internal error at portal?") |  | ||||||
| 
 |  | ||||||
|             # TODO: handle 2 cases with 3.10 match syntax |  | ||||||
|             # - 'stop' |  | ||||||
|             # - 'error' |  | ||||||
|             # possibly just handle msg['stop'] here! |  | ||||||
| 
 |  | ||||||
|             if self._closed: |  | ||||||
|                 raise trio.ClosedResourceError('This stream was closed') |  | ||||||
| 
 |  | ||||||
|             if msg.get('stop') or self._eoc: |  | ||||||
|                 log.debug(f"{self} was stopped at remote end") |  | ||||||
| 
 |  | ||||||
|                 # XXX: important to set so that a new ``.receive()`` |  | ||||||
|                 # call (likely by another task using a broadcast receiver) |  | ||||||
|                 # doesn't accidentally pull the ``return`` message |  | ||||||
|                 # value out of the underlying feed mem chan! |  | ||||||
|                 self._eoc = True |  | ||||||
| 
 |  | ||||||
|                 # # when the send is closed we assume the stream has |  | ||||||
|                 # # terminated and signal this local iterator to stop |  | ||||||
|                 # await self.aclose() |  | ||||||
| 
 |  | ||||||
|                 # XXX: this causes ``ReceiveChannel.__anext__()`` to |  | ||||||
|                 # raise a ``StopAsyncIteration`` **and** in our catch |  | ||||||
|                 # block below it will trigger ``.aclose()``. |  | ||||||
|                 raise trio.EndOfChannel from err |  | ||||||
| 
 |  | ||||||
|             # TODO: test that shows stream raising an expected error!!! |  | ||||||
|             elif msg.get('error'): |  | ||||||
|                 # raise the error message |  | ||||||
|                 raise unpack_error(msg, self._ctx.chan) |  | ||||||
| 
 |  | ||||||
|             else: |  | ||||||
|                 raise |  | ||||||
| 
 |  | ||||||
|         except ( |  | ||||||
|             trio.ClosedResourceError,  # by self._rx_chan |  | ||||||
|             trio.EndOfChannel,  # by self._rx_chan or `stop` msg from far end |  | ||||||
|         ): |  | ||||||
|             # XXX: we close the stream on any of these error conditions: |  | ||||||
| 
 |  | ||||||
|             # a ``ClosedResourceError`` indicates that the internal |  | ||||||
|             # feeder memory receive channel was closed likely by the |  | ||||||
|             # runtime after the associated transport-channel |  | ||||||
|             # disconnected or broke. |  | ||||||
| 
 |  | ||||||
|             # an ``EndOfChannel`` indicates either the internal recv |  | ||||||
|             # memchan exhausted **or** we raisesd it just above after |  | ||||||
|             # receiving a `stop` message from the far end of the stream. |  | ||||||
| 
 |  | ||||||
|             # Previously this was triggered by calling ``.aclose()`` on |  | ||||||
|             # the send side of the channel inside |  | ||||||
|             # ``Actor._push_result()`` (should still be commented code |  | ||||||
|             # there - which should eventually get removed), but now the |  | ||||||
|             # 'stop' message handling has been put just above. |  | ||||||
| 
 |  | ||||||
|             # TODO: Locally, we want to close this stream gracefully, by |  | ||||||
|             # terminating any local consumers tasks deterministically. |  | ||||||
|             # One we have broadcast support, we **don't** want to be |  | ||||||
|             # closing this stream and not flushing a final value to |  | ||||||
|             # remaining (clone) consumers who may not have been |  | ||||||
|             # scheduled to receive it yet. |  | ||||||
| 
 |  | ||||||
|             # when the send is closed we assume the stream has |  | ||||||
|             # terminated and signal this local iterator to stop |  | ||||||
|             await self.aclose() |  | ||||||
| 
 |  | ||||||
|             raise  # propagate |  | ||||||
| 
 |  | ||||||
|     async def aclose(self): |  | ||||||
|         ''' |  | ||||||
|         Cancel associated remote actor task and local memory channel on |  | ||||||
|         close. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         # XXX: keep proper adherance to trio's `.aclose()` semantics: |  | ||||||
|         # https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose |  | ||||||
|         rx_chan = self._rx_chan |  | ||||||
| 
 |  | ||||||
|         if rx_chan._closed: |  | ||||||
|             log.cancel(f"{self} is already closed") |  | ||||||
| 
 |  | ||||||
|             # this stream has already been closed so silently succeed as |  | ||||||
|             # per ``trio.AsyncResource`` semantics. |  | ||||||
|             # https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose |  | ||||||
|             return |  | ||||||
| 
 |  | ||||||
|         self._eoc = True |  | ||||||
| 
 |  | ||||||
|         # NOTE: this is super subtle IPC messaging stuff: |  | ||||||
|         # Relay stop iteration to far end **iff** we're |  | ||||||
|         # in bidirectional mode. If we're only streaming |  | ||||||
|         # *from* one side then that side **won't** have an |  | ||||||
|         # entry in `Actor._cids2qs` (maybe it should though?). |  | ||||||
|         # So any `yield` or `stop` msgs sent from the caller side |  | ||||||
|         # will cause key errors on the callee side since there is |  | ||||||
|         # no entry for a local feeder mem chan since the callee task |  | ||||||
|         # isn't expecting messages to be sent by the caller. |  | ||||||
|         # Thus, we must check that this context DOES NOT |  | ||||||
|         # have a portal reference to ensure this is indeed the callee |  | ||||||
|         # side and can relay a 'stop'. |  | ||||||
| 
 |  | ||||||
|         # In the bidirectional case, `Context.open_stream()` will create |  | ||||||
|         # the `Actor._cids2qs` entry from a call to |  | ||||||
|         # `Actor.get_context()` and will call us here to send the stop |  | ||||||
|         # msg in ``__aexit__()`` on teardown. |  | ||||||
|         try: |  | ||||||
|             # NOTE: if this call is cancelled we expect this end to |  | ||||||
|             # handle as though the stop was never sent (though if it |  | ||||||
|             # was it shouldn't matter since it's unlikely a user |  | ||||||
|             # will try to re-use a stream after attemping to close |  | ||||||
|             # it). |  | ||||||
|             with trio.CancelScope(shield=True): |  | ||||||
|                 await self._ctx.send_stop() |  | ||||||
| 
 |  | ||||||
|         except ( |  | ||||||
|             trio.BrokenResourceError, |  | ||||||
|             trio.ClosedResourceError |  | ||||||
|         ): |  | ||||||
|             # the underlying channel may already have been pulled |  | ||||||
|             # in which case our stop message is meaningless since |  | ||||||
|             # it can't traverse the transport. |  | ||||||
|             ctx = self._ctx |  | ||||||
|             log.warning( |  | ||||||
|                 f'Stream was already destroyed?\n' |  | ||||||
|                 f'actor: {ctx.chan.uid}\n' |  | ||||||
|                 f'ctx id: {ctx.cid}' |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|         self._closed = True |  | ||||||
| 
 |  | ||||||
|         # Do we close the local mem chan ``self._rx_chan`` ??!? |  | ||||||
| 
 |  | ||||||
|         # NO, DEFINITELY NOT if we're a bi-dir ``MsgStream``! |  | ||||||
|         # BECAUSE this same core-msg-loop mem recv-chan is used to deliver |  | ||||||
|         # the potential final result from the surrounding inter-actor |  | ||||||
|         # `Context` so we don't want to close it until that context has |  | ||||||
|         # run to completion. |  | ||||||
| 
 |  | ||||||
|         # XXX: Notes on old behaviour: |  | ||||||
|         # await rx_chan.aclose() |  | ||||||
| 
 |  | ||||||
|         # In the receive-only case, ``Portal.open_stream_from()`` used |  | ||||||
|         # to rely on this call explicitly on teardown such that a new |  | ||||||
|         # call to ``.receive()`` after ``rx_chan`` had been closed, would |  | ||||||
|         # result in us raising a ``trio.EndOfChannel`` (since we |  | ||||||
|         # remapped the ``trio.ClosedResourceError`). However, now if for some |  | ||||||
|         # reason the stream's consumer code tries to manually receive a new |  | ||||||
|         # value before ``.aclose()`` is called **but** the far end has |  | ||||||
|         # stopped `.receive()` **must** raise ``trio.EndofChannel`` in |  | ||||||
|         # order to avoid an infinite hang on ``.__anext__()``; this is |  | ||||||
|         # why we added ``self._eoc`` to denote stream closure indepedent |  | ||||||
|         # of ``rx_chan``. |  | ||||||
| 
 |  | ||||||
|         # In theory we could still use this old method and close the |  | ||||||
|         # underlying msg-loop mem chan as above and then **not** check |  | ||||||
|         # for ``self._eoc`` in ``.receive()`` (if for some reason we |  | ||||||
|         # think that check is a bottle neck - not likely) **but** then |  | ||||||
|         # we would need to map the resulting |  | ||||||
|         # ``trio.ClosedResourceError`` to a ``trio.EndOfChannel`` in |  | ||||||
|         # ``.receive()`` (as it originally was before bi-dir streaming |  | ||||||
|         # support) in order to trigger stream closure. The old behaviour |  | ||||||
|         # is arguably more confusing since we lose detection of the |  | ||||||
|         # runtime's closure of ``rx_chan`` in the case where we may |  | ||||||
|         # still need to consume msgs that are "in transit" from the far |  | ||||||
|         # end (eg. for ``Context.result()``). |  | ||||||
| 
 |  | ||||||
|     @asynccontextmanager |  | ||||||
|     async def subscribe( |  | ||||||
|         self, |  | ||||||
| 
 |  | ||||||
|     ) -> AsyncIterator[BroadcastReceiver]: |  | ||||||
|         ''' |  | ||||||
|         Allocate and return a ``BroadcastReceiver`` which delegates |  | ||||||
|         to this message stream. |  | ||||||
| 
 |  | ||||||
|         This allows multiple local tasks to receive each their own copy |  | ||||||
|         of this message stream. |  | ||||||
| 
 |  | ||||||
|         This operation is indempotent and and mutates this stream's |  | ||||||
|         receive machinery to copy and window-length-store each received |  | ||||||
|         value from the far end via the internally created broudcast |  | ||||||
|         receiver wrapper. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         # NOTE: This operation is indempotent and non-reversible, so be |  | ||||||
|         # sure you can deal with any (theoretical) overhead of the the |  | ||||||
|         # allocated ``BroadcastReceiver`` before calling this method for |  | ||||||
|         # the first time. |  | ||||||
|         if self._broadcaster is None: |  | ||||||
| 
 |  | ||||||
|             bcast = self._broadcaster = broadcast_receiver( |  | ||||||
|                 self, |  | ||||||
|                 # use memory channel size by default |  | ||||||
|                 self._rx_chan._state.max_buffer_size,  # type: ignore |  | ||||||
|                 receive_afunc=self.receive, |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             # NOTE: we override the original stream instance's receive |  | ||||||
|             # method to now delegate to the broadcaster's ``.receive()`` |  | ||||||
|             # such that new subscribers will be copied received values |  | ||||||
|             # and this stream doesn't have to expect it's original |  | ||||||
|             # consumer(s) to get a new broadcast rx handle. |  | ||||||
|             self.receive = bcast.receive  # type: ignore |  | ||||||
|             # seems there's no graceful way to type this with ``mypy``? |  | ||||||
|             # https://github.com/python/mypy/issues/708 |  | ||||||
| 
 |  | ||||||
|         async with self._broadcaster.subscribe() as bstream: |  | ||||||
|             assert bstream.key != self._broadcaster.key |  | ||||||
|             assert bstream._recv == self._broadcaster._recv |  | ||||||
| 
 |  | ||||||
|             # NOTE: we patch on a `.send()` to the bcaster so that the |  | ||||||
|             # caller can still conduct 2-way streaming using this |  | ||||||
|             # ``bstream`` handle transparently as though it was the msg |  | ||||||
|             # stream instance. |  | ||||||
|             bstream.send = self.send  # type: ignore |  | ||||||
| 
 |  | ||||||
|             yield bstream |  | ||||||
| 
 |  | ||||||
|     async def send( |  | ||||||
|         self, |  | ||||||
|         data: Any |  | ||||||
|     ) -> None: |  | ||||||
|         ''' |  | ||||||
|         Send a message over this stream to the far end. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         if self._ctx._error: |  | ||||||
|             raise self._ctx._error  # from None |  | ||||||
| 
 |  | ||||||
|         if self._closed: |  | ||||||
|             raise trio.ClosedResourceError('This stream was already closed') |  | ||||||
| 
 |  | ||||||
|         await self._ctx.chan.send({'yield': data, 'cid': self._ctx.cid}) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| @dataclass |  | ||||||
| class Context: | class Context: | ||||||
|     ''' |     """An IAC (inter-actor communication) context. | ||||||
|     An inter-actor, ``trio`` task communication context. |  | ||||||
| 
 | 
 | ||||||
|     NB: This class should never be instatiated directly, it is delivered |     Allows maintaining task or protocol specific state between communicating | ||||||
|     by either runtime machinery to a remotely started task or by entering |     actors. A unique context is created on the receiving end for every request | ||||||
|     ``Portal.open_context()``. |     to a remote actor. | ||||||
| 
 |     """ | ||||||
|     Allows maintaining task or protocol specific state between |  | ||||||
|     2 communicating actor tasks. A unique context is created on the |  | ||||||
|     callee side/end for every request to a remote actor from a portal. |  | ||||||
| 
 |  | ||||||
|     A context can be cancelled and (possibly eventually restarted) from |  | ||||||
|     either side of the underlying IPC channel, open task oriented |  | ||||||
|     message streams and acts as an IPC aware inter-actor-task cancel |  | ||||||
|     scope. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     chan: Channel |     chan: Channel | ||||||
|     cid: str |     cid: str | ||||||
| 
 |     cancel_scope: trio.CancelScope | ||||||
|     # these are the "feeder" channels for delivering |  | ||||||
|     # message values to the local task from the runtime |  | ||||||
|     # msg processing loop. |  | ||||||
|     _recv_chan: trio.MemoryReceiveChannel |  | ||||||
|     _send_chan: trio.MemorySendChannel |  | ||||||
| 
 |  | ||||||
|     _remote_func_type: Optional[str] = None |  | ||||||
| 
 |  | ||||||
|     # only set on the caller side |  | ||||||
|     _portal: Optional['Portal'] = None    # type: ignore # noqa |  | ||||||
|     _result: Optional[Any] = False |  | ||||||
|     _error: Optional[BaseException] = None |  | ||||||
| 
 |  | ||||||
|     # status flags |  | ||||||
|     _cancel_called: bool = False |  | ||||||
|     _cancel_msg: Optional[str] = None |  | ||||||
|     _enter_debugger_on_cancel: bool = True |  | ||||||
|     _started_called: bool = False |  | ||||||
|     _started_received: bool = False |  | ||||||
|     _stream_opened: bool = False |  | ||||||
| 
 |  | ||||||
|     # only set on the callee side |  | ||||||
|     _scope_nursery: Optional[trio.Nursery] = None |  | ||||||
| 
 |  | ||||||
|     _backpressure: bool = False |  | ||||||
| 
 | 
 | ||||||
|     async def send_yield(self, data: Any) -> None: |     async def send_yield(self, data: Any) -> None: | ||||||
| 
 |  | ||||||
|         warnings.warn( |  | ||||||
|             "`Context.send_yield()` is now deprecated. " |  | ||||||
|             "Use ``MessageStream.send()``. ", |  | ||||||
|             DeprecationWarning, |  | ||||||
|             stacklevel=2, |  | ||||||
|         ) |  | ||||||
|         await self.chan.send({'yield': data, 'cid': self.cid}) |         await self.chan.send({'yield': data, 'cid': self.cid}) | ||||||
| 
 | 
 | ||||||
|     async def send_stop(self) -> None: |     async def send_stop(self) -> None: | ||||||
|         await self.chan.send({'stop': True, 'cid': self.cid}) |         await self.chan.send({'stop': True, 'cid': self.cid}) | ||||||
| 
 | 
 | ||||||
|     async def _maybe_raise_from_remote_msg( |  | ||||||
|         self, |  | ||||||
|         msg: dict[str, Any], |  | ||||||
| 
 |  | ||||||
|     ) -> None: |  | ||||||
|         ''' |  | ||||||
|         (Maybe) unpack and raise a msg error into the local scope |  | ||||||
|         nursery for this context. |  | ||||||
| 
 |  | ||||||
|         Acts as a form of "relay" for a remote error raised |  | ||||||
|         in the corresponding remote callee task. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         error = msg.get('error') |  | ||||||
|         if error: |  | ||||||
|             # If this is an error message from a context opened by |  | ||||||
|             # ``Portal.open_context()`` we want to interrupt any ongoing |  | ||||||
|             # (child) tasks within that context to be notified of the remote |  | ||||||
|             # error relayed here. |  | ||||||
|             # |  | ||||||
|             # The reason we may want to raise the remote error immediately |  | ||||||
|             # is that there is no guarantee the associated local task(s) |  | ||||||
|             # will attempt to read from any locally opened stream any time |  | ||||||
|             # soon. |  | ||||||
|             # |  | ||||||
|             # NOTE: this only applies when |  | ||||||
|             # ``Portal.open_context()`` has been called since it is assumed |  | ||||||
|             # (currently) that other portal APIs (``Portal.run()``, |  | ||||||
|             # ``.run_in_actor()``) do their own error checking at the point |  | ||||||
|             # of the call and result processing. |  | ||||||
|             log.error( |  | ||||||
|                 f'Remote context error for {self.chan.uid}:{self.cid}:\n' |  | ||||||
|                 f'{msg["error"]["tb_str"]}' |  | ||||||
|             ) |  | ||||||
|             error = unpack_error(msg, self.chan) |  | ||||||
|             if ( |  | ||||||
|                 isinstance(error, ContextCancelled) and |  | ||||||
|                 self._cancel_called |  | ||||||
|             ): |  | ||||||
|                 # this is an expected cancel request response message |  | ||||||
|                 # and we don't need to raise it in scope since it will |  | ||||||
|                 # potentially override a real error |  | ||||||
|                 return |  | ||||||
| 
 |  | ||||||
|             self._error = error |  | ||||||
| 
 |  | ||||||
|             # TODO: tempted to **not** do this by-reraising in a |  | ||||||
|             # nursery and instead cancel a surrounding scope, detect |  | ||||||
|             # the cancellation, then lookup the error that was set? |  | ||||||
|             if self._scope_nursery: |  | ||||||
| 
 |  | ||||||
|                 async def raiser(): |  | ||||||
|                     raise self._error from None |  | ||||||
| 
 |  | ||||||
|                 # from trio.testing import wait_all_tasks_blocked |  | ||||||
|                 # await wait_all_tasks_blocked() |  | ||||||
|                 if not self._scope_nursery._closed:  # type: ignore |  | ||||||
|                     self._scope_nursery.start_soon(raiser) |  | ||||||
| 
 |  | ||||||
|     async def cancel( |  | ||||||
|         self, |  | ||||||
|         msg: Optional[str] = None, |  | ||||||
| 
 |  | ||||||
|     ) -> None: |  | ||||||
|         ''' |  | ||||||
|         Cancel this inter-actor-task context. |  | ||||||
| 
 |  | ||||||
|         Request that the far side cancel it's current linked context, |  | ||||||
|         Timeout quickly in an attempt to sidestep 2-generals... |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         side = 'caller' if self._portal else 'callee' |  | ||||||
|         if msg: |  | ||||||
|             assert side == 'callee', 'Only callee side can provide cancel msg' |  | ||||||
| 
 |  | ||||||
|         log.cancel(f'Cancelling {side} side of context to {self.chan.uid}') |  | ||||||
| 
 |  | ||||||
|         self._cancel_called = True |  | ||||||
| 
 |  | ||||||
|         if side == 'caller': |  | ||||||
|             if not self._portal: |  | ||||||
|                 raise RuntimeError( |  | ||||||
|                     "No portal found, this is likely a callee side context" |  | ||||||
|                 ) |  | ||||||
| 
 |  | ||||||
|             cid = self.cid |  | ||||||
|             with trio.move_on_after(0.5) as cs: |  | ||||||
|                 cs.shield = True |  | ||||||
|                 log.cancel( |  | ||||||
|                     f"Cancelling stream {cid} to " |  | ||||||
|                     f"{self._portal.channel.uid}") |  | ||||||
| 
 |  | ||||||
|                 # NOTE: we're telling the far end actor to cancel a task |  | ||||||
|                 # corresponding to *this actor*. The far end local channel |  | ||||||
|                 # instance is passed to `Actor._cancel_task()` implicitly. |  | ||||||
|                 await self._portal.run_from_ns('self', '_cancel_task', cid=cid) |  | ||||||
| 
 |  | ||||||
|             if cs.cancelled_caught: |  | ||||||
|                 # XXX: there's no way to know if the remote task was indeed |  | ||||||
|                 # cancelled in the case where the connection is broken or |  | ||||||
|                 # some other network error occurred. |  | ||||||
|                 # if not self._portal.channel.connected(): |  | ||||||
|                 if not self.chan.connected(): |  | ||||||
|                     log.cancel( |  | ||||||
|                         "May have failed to cancel remote task " |  | ||||||
|                         f"{cid} for {self._portal.channel.uid}") |  | ||||||
|                 else: |  | ||||||
|                     log.cancel( |  | ||||||
|                         "Timed out on cancelling remote task " |  | ||||||
|                         f"{cid} for {self._portal.channel.uid}") |  | ||||||
| 
 |  | ||||||
|         # callee side remote task |  | ||||||
|         else: |  | ||||||
|             self._cancel_msg = msg |  | ||||||
| 
 |  | ||||||
|             # TODO: should we have an explicit cancel message |  | ||||||
|             # or is relaying the local `trio.Cancelled` as an |  | ||||||
|             # {'error': trio.Cancelled, cid: "blah"} enough? |  | ||||||
|             # This probably gets into the discussion in |  | ||||||
|             # https://github.com/goodboy/tractor/issues/36 |  | ||||||
|             assert self._scope_nursery |  | ||||||
|             self._scope_nursery.cancel_scope.cancel() |  | ||||||
| 
 |  | ||||||
|         if self._recv_chan: |  | ||||||
|             await self._recv_chan.aclose() |  | ||||||
| 
 |  | ||||||
|     @asynccontextmanager |  | ||||||
|     async def open_stream( |  | ||||||
| 
 |  | ||||||
|         self, |  | ||||||
|         backpressure: Optional[bool] = True, |  | ||||||
|         msg_buffer_size: Optional[int] = None, |  | ||||||
| 
 |  | ||||||
|     ) -> AsyncGenerator[MsgStream, None]: |  | ||||||
|         ''' |  | ||||||
|         Open a ``MsgStream``, a bi-directional stream connected to the |  | ||||||
|         cross-actor (far end) task for this ``Context``. |  | ||||||
| 
 |  | ||||||
|         This context manager must be entered on both the caller and |  | ||||||
|         callee for the stream to logically be considered "connected". |  | ||||||
| 
 |  | ||||||
|         A ``MsgStream`` is currently "one-shot" use, meaning if you |  | ||||||
|         close it you can not "re-open" it for streaming and instead you |  | ||||||
|         must re-establish a new surrounding ``Context`` using |  | ||||||
|         ``Portal.open_context()``.  In the future this may change but |  | ||||||
|         currently there seems to be no obvious reason to support |  | ||||||
|         "re-opening": |  | ||||||
|             - pausing a stream can be done with a message. |  | ||||||
|             - task errors will normally require a restart of the entire |  | ||||||
|               scope of the inter-actor task context due to the nature of |  | ||||||
|               ``trio``'s cancellation system. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         actor = current_actor() |  | ||||||
| 
 |  | ||||||
|         # here we create a mem chan that corresponds to the |  | ||||||
|         # far end caller / callee. |  | ||||||
| 
 |  | ||||||
|         # Likewise if the surrounding context has been cancelled we error here |  | ||||||
|         # since it likely means the surrounding block was exited or |  | ||||||
|         # killed |  | ||||||
| 
 |  | ||||||
|         if self._cancel_called: |  | ||||||
|             task = trio.lowlevel.current_task().name |  | ||||||
|             raise ContextCancelled( |  | ||||||
|                 f'Context around {actor.uid[0]}:{task} was already cancelled!' |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|         if not self._portal and not self._started_called: |  | ||||||
|             raise RuntimeError( |  | ||||||
|                 'Context.started()` must be called before opening a stream' |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|         # NOTE: in one way streaming this only happens on the |  | ||||||
|         # caller side inside `Actor.start_remote_task()` so if you try |  | ||||||
|         # to send a stop from the caller to the callee in the |  | ||||||
|         # single-direction-stream case you'll get a lookup error |  | ||||||
|         # currently. |  | ||||||
|         ctx = actor.get_context( |  | ||||||
|             self.chan, |  | ||||||
|             self.cid, |  | ||||||
|             msg_buffer_size=msg_buffer_size, |  | ||||||
|         ) |  | ||||||
|         ctx._backpressure = backpressure |  | ||||||
|         assert ctx is self |  | ||||||
| 
 |  | ||||||
|         # XXX: If the underlying channel feeder receive mem chan has |  | ||||||
|         # been closed then likely client code has already exited |  | ||||||
|         # a ``.open_stream()`` block prior or there was some other |  | ||||||
|         # unanticipated error or cancellation from ``trio``. |  | ||||||
| 
 |  | ||||||
|         if ctx._recv_chan._closed: |  | ||||||
|             raise trio.ClosedResourceError( |  | ||||||
|                 'The underlying channel for this stream was already closed!?') |  | ||||||
| 
 |  | ||||||
|         async with MsgStream( |  | ||||||
|             ctx=self, |  | ||||||
|             rx_chan=ctx._recv_chan, |  | ||||||
|         ) as stream: |  | ||||||
| 
 |  | ||||||
|             if self._portal: |  | ||||||
|                 self._portal._streams.add(stream) |  | ||||||
| 
 |  | ||||||
|             try: |  | ||||||
|                 self._stream_opened = True |  | ||||||
| 
 |  | ||||||
|                 # XXX: do we need this? |  | ||||||
|                 # ensure we aren't cancelled before yielding the stream |  | ||||||
|                 # await trio.lowlevel.checkpoint() |  | ||||||
|                 yield stream |  | ||||||
| 
 |  | ||||||
|                 # NOTE: Make the stream "one-shot use".  On exit, signal |  | ||||||
|                 # ``trio.EndOfChannel``/``StopAsyncIteration`` to the |  | ||||||
|                 # far end. |  | ||||||
|                 await stream.aclose() |  | ||||||
| 
 |  | ||||||
|             finally: |  | ||||||
|                 if self._portal: |  | ||||||
|                     try: |  | ||||||
|                         self._portal._streams.remove(stream) |  | ||||||
|                     except KeyError: |  | ||||||
|                         log.warning( |  | ||||||
|                             f'Stream was already destroyed?\n' |  | ||||||
|                             f'actor: {self.chan.uid}\n' |  | ||||||
|                             f'ctx id: {self.cid}' |  | ||||||
|                         ) |  | ||||||
| 
 |  | ||||||
|     async def result(self) -> Any: |  | ||||||
|         ''' |  | ||||||
|         From a caller side, wait for and return the final result from |  | ||||||
|         the callee side task. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         assert self._portal, "Context.result() can not be called from callee!" |  | ||||||
|         assert self._recv_chan |  | ||||||
| 
 |  | ||||||
|         if self._result is False: |  | ||||||
| 
 |  | ||||||
|             if not self._recv_chan._closed:  # type: ignore |  | ||||||
| 
 |  | ||||||
|                 # wait for a final context result consuming |  | ||||||
|                 # and discarding any bi dir stream msgs still |  | ||||||
|                 # in transit from the far end. |  | ||||||
|                 while True: |  | ||||||
| 
 |  | ||||||
|                     msg = await self._recv_chan.receive() |  | ||||||
|                     try: |  | ||||||
|                         self._result = msg['return'] |  | ||||||
|                         break |  | ||||||
|                     except KeyError as msgerr: |  | ||||||
| 
 |  | ||||||
|                         if 'yield' in msg: |  | ||||||
|                             # far end task is still streaming to us so discard |  | ||||||
|                             log.warning(f'Discarding stream delivered {msg}') |  | ||||||
|                             continue |  | ||||||
| 
 |  | ||||||
|                         elif 'stop' in msg: |  | ||||||
|                             log.debug('Remote stream terminated') |  | ||||||
|                             continue |  | ||||||
| 
 |  | ||||||
|                         # internal error should never get here |  | ||||||
|                         assert msg.get('cid'), ( |  | ||||||
|                             "Received internal error at portal?") |  | ||||||
| 
 |  | ||||||
|                         raise unpack_error( |  | ||||||
|                             msg, self._portal.channel |  | ||||||
|                         ) from msgerr |  | ||||||
| 
 |  | ||||||
|         return self._result |  | ||||||
| 
 |  | ||||||
|     async def started( |  | ||||||
|         self, |  | ||||||
|         value: Optional[Any] = None |  | ||||||
| 
 |  | ||||||
|     ) -> None: |  | ||||||
|         ''' |  | ||||||
|         Indicate to calling actor's task that this linked context |  | ||||||
|         has started and send ``value`` to the other side. |  | ||||||
| 
 |  | ||||||
|         On the calling side ``value`` is the second item delivered |  | ||||||
|         in the tuple returned by ``Portal.open_context()``. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         if self._portal: |  | ||||||
|             raise RuntimeError( |  | ||||||
|                 f"Caller side context {self} can not call started!") |  | ||||||
| 
 |  | ||||||
|         elif self._started_called: |  | ||||||
|             raise RuntimeError( |  | ||||||
|                 f"called 'started' twice on context with {self.chan.uid}") |  | ||||||
| 
 |  | ||||||
|         await self.chan.send({'started': value, 'cid': self.cid}) |  | ||||||
|         self._started_called = True |  | ||||||
| 
 |  | ||||||
|     # TODO: do we need a restart api? |  | ||||||
|     # async def restart(self) -> None: |  | ||||||
|     #     pass |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def stream(func: Callable) -> Callable: |  | ||||||
|     """Mark an async function as a streaming routine with ``@stream``. |  | ||||||
| 
 | 
 | ||||||
|  | def current_context(): | ||||||
|  |     """Get the current task's context instance. | ||||||
|     """ |     """ | ||||||
|     # annotate |     return _context.get() | ||||||
|     # TODO: apply whatever solution ``mypy`` ends up picking for this: |  | ||||||
|     # https://github.com/python/mypy/issues/2087#issuecomment-769266912 |  | ||||||
|     func._tractor_stream_function = True  # type: ignore |  | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
|  | def stream(func): | ||||||
|  |     """Mark an async function as a streaming routine with ``@stream``. | ||||||
|  |     """ | ||||||
|  |     func._tractor_stream_function = True | ||||||
|     sig = inspect.signature(func) |     sig = inspect.signature(func) | ||||||
|     params = sig.parameters |     if 'ctx' not in sig.parameters: | ||||||
|     if 'stream' not in params and 'ctx' in params: |  | ||||||
|         warnings.warn( |  | ||||||
|             "`@tractor.stream decorated funcs should now declare a `stream` " |  | ||||||
|             " arg, `ctx` is now designated for use with @tractor.context", |  | ||||||
|             DeprecationWarning, |  | ||||||
|             stacklevel=2, |  | ||||||
|         ) |  | ||||||
| 
 |  | ||||||
|     if ( |  | ||||||
|         'ctx' not in params and |  | ||||||
|         'to_trio' not in params and |  | ||||||
|         'stream' not in params |  | ||||||
|     ): |  | ||||||
|         raise TypeError( |         raise TypeError( | ||||||
|             "The first argument to the stream function " |             "The first argument to the stream function " | ||||||
|             f"{func.__name__} must be `ctx: tractor.Context` " |  | ||||||
|             "(Or ``to_trio`` if using ``asyncio`` in guest mode)." |  | ||||||
|         ) |  | ||||||
|     return func |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| def context(func: Callable) -> Callable: |  | ||||||
|     """Mark an async function as a streaming routine with ``@context``. |  | ||||||
| 
 |  | ||||||
|     """ |  | ||||||
|     # annotate |  | ||||||
|     # TODO: apply whatever solution ``mypy`` ends up picking for this: |  | ||||||
|     # https://github.com/python/mypy/issues/2087#issuecomment-769266912 |  | ||||||
|     func._tractor_context_function = True  # type: ignore |  | ||||||
| 
 |  | ||||||
|     sig = inspect.signature(func) |  | ||||||
|     params = sig.parameters |  | ||||||
|     if 'ctx' not in params: |  | ||||||
|         raise TypeError( |  | ||||||
|             "The first argument to the context function " |  | ||||||
|             f"{func.__name__} must be `ctx: tractor.Context`" |             f"{func.__name__} must be `ctx: tractor.Context`" | ||||||
|         ) |         ) | ||||||
|     return func |     return func | ||||||
|  |  | ||||||
|  | @ -1,40 +1,18 @@ | ||||||
| # tractor: structured concurrent "actors". |  | ||||||
| # Copyright 2018-eternity Tyler Goodlet. |  | ||||||
| 
 |  | ||||||
| # This program is free software: you can redistribute it and/or modify |  | ||||||
| # it under the terms of the GNU Affero General Public License as published by |  | ||||||
| # the Free Software Foundation, either version 3 of the License, or |  | ||||||
| # (at your option) any later version. |  | ||||||
| 
 |  | ||||||
| # This program is distributed in the hope that it will be useful, |  | ||||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of |  | ||||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the |  | ||||||
| # GNU Affero General Public License for more details. |  | ||||||
| 
 |  | ||||||
| # You should have received a copy of the GNU Affero General Public License |  | ||||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. |  | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| ``trio`` inspired apis and helpers | ``trio`` inspired apis and helpers | ||||||
| 
 |  | ||||||
| """ | """ | ||||||
| from contextlib import asynccontextmanager as acm |  | ||||||
| from functools import partial | from functools import partial | ||||||
| import inspect | import multiprocessing as mp | ||||||
| from typing import ( | from typing import Tuple, List, Dict, Optional | ||||||
|     Optional, |  | ||||||
|     TYPE_CHECKING, |  | ||||||
| ) |  | ||||||
| import typing | import typing | ||||||
| import warnings | import warnings | ||||||
| 
 | 
 | ||||||
| from exceptiongroup import BaseExceptionGroup |  | ||||||
| import trio | import trio | ||||||
|  | from async_generator import asynccontextmanager | ||||||
| 
 | 
 | ||||||
| from ._debug import maybe_wait_for_debugger |  | ||||||
| from ._state import current_actor, is_main_process | from ._state import current_actor, is_main_process | ||||||
| from .log import get_logger, get_loglevel | from .log import get_logger, get_loglevel | ||||||
| from ._runtime import Actor | from ._actor import Actor | ||||||
| from ._portal import Portal | from ._portal import Portal | ||||||
| from ._exceptions import is_multi_cancelled | from ._exceptions import is_multi_cancelled | ||||||
| from ._root import open_root_actor | from ._root import open_root_actor | ||||||
|  | @ -42,98 +20,52 @@ from . import _state | ||||||
| from . import _spawn | from . import _spawn | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| if TYPE_CHECKING: |  | ||||||
|     import multiprocessing as mp |  | ||||||
| 
 |  | ||||||
| log = get_logger(__name__) | log = get_logger(__name__) | ||||||
| 
 | 
 | ||||||
| _default_bind_addr: tuple[str, int] = ('127.0.0.1', 0) | _default_bind_addr: Tuple[str, int] = ('127.0.0.1', 0) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class ActorNursery: | class ActorNursery: | ||||||
|     ''' |     """Spawn scoped subprocess actors. | ||||||
|     The fundamental actor supervision construct: spawn and manage |     """ | ||||||
|     explicit lifetime and capability restricted, bootstrapped, |  | ||||||
|     ``trio.run()`` scheduled sub-processes. |  | ||||||
| 
 |  | ||||||
|     Though the concept of a "process nursery" is different in complexity |  | ||||||
|     and slightly different in semantics then a tradtional single |  | ||||||
|     threaded task nursery, much of the interface is the same. New |  | ||||||
|     processes each require a top level "parent" or "root" task which is |  | ||||||
|     itself no different then any task started by a tradtional |  | ||||||
|     ``trio.Nursery``. The main difference is that each "actor" (a |  | ||||||
|     process + ``trio.run()``) contains a full, paralell executing |  | ||||||
|     ``trio``-task-tree. The following super powers ensue: |  | ||||||
| 
 |  | ||||||
|     - starting tasks in a child actor are completely independent of |  | ||||||
|       tasks started in the current process. They execute in *parallel* |  | ||||||
|       relative to tasks in the current process and are scheduled by their |  | ||||||
|       own actor's ``trio`` run loop. |  | ||||||
|     - tasks scheduled in a remote process still maintain an SC protocol |  | ||||||
|       across memory boundaries using a so called "structured concurrency |  | ||||||
|       dialogue protocol" which ensures task-hierarchy-lifetimes are linked. |  | ||||||
|     - remote tasks (in another actor) can fail and relay failure back to |  | ||||||
|       the caller task (in some other actor) via a seralized |  | ||||||
|       ``RemoteActorError`` which means no zombie process or RPC |  | ||||||
|       initiated task can ever go off on its own. |  | ||||||
| 
 |  | ||||||
|     ''' |  | ||||||
|     def __init__( |     def __init__( | ||||||
|         self, |         self, | ||||||
|         actor: Actor, |         actor: Actor, | ||||||
|         ria_nursery: trio.Nursery, |         ria_nursery: trio.Nursery, | ||||||
|         da_nursery: trio.Nursery, |         da_nursery: trio.Nursery, | ||||||
|         errors: dict[tuple[str, str], BaseException], |         errors: Dict[Tuple[str, str], Exception], | ||||||
|     ) -> None: |     ) -> None: | ||||||
|         # self.supervisor = supervisor  # TODO |         # self.supervisor = supervisor  # TODO | ||||||
|         self._actor: Actor = actor |         self._actor: Actor = actor | ||||||
|         self._ria_nursery = ria_nursery |         self._ria_nursery = ria_nursery | ||||||
|         self._da_nursery = da_nursery |         self._da_nursery = da_nursery | ||||||
|         self._children: dict[ |         self._children: Dict[ | ||||||
|             tuple[str, str], |             Tuple[str, str], | ||||||
|             tuple[ |             Tuple[Actor, mp.Process, Optional[Portal]] | ||||||
|                 Actor, |  | ||||||
|                 trio.Process | mp.Process, |  | ||||||
|                 Optional[Portal], |  | ||||||
|             ] |  | ||||||
|         ] = {} |         ] = {} | ||||||
|         # portals spawned with ``run_in_actor()`` are |         # portals spawned with ``run_in_actor()`` are | ||||||
|         # cancelled when their "main" result arrives |         # cancelled when their "main" result arrives | ||||||
|         self._cancel_after_result_on_exit: set = set() |         self._cancel_after_result_on_exit: set = set() | ||||||
|         self.cancelled: bool = False |         self.cancelled: bool = False | ||||||
|         self._join_procs = trio.Event() |         self._join_procs = trio.Event() | ||||||
|         self._at_least_one_child_in_debug: bool = False |  | ||||||
|         self.errors = errors |         self.errors = errors | ||||||
|         self.exited = trio.Event() |  | ||||||
| 
 | 
 | ||||||
|     async def start_actor( |     async def start_actor( | ||||||
|         self, |         self, | ||||||
|         name: str, |         name: str, | ||||||
|         *, |         *, | ||||||
|         bind_addr: tuple[str, int] = _default_bind_addr, |         bind_addr: Tuple[str, int] = _default_bind_addr, | ||||||
|         rpc_module_paths: list[str] | None = None, |         rpc_module_paths: List[str] = None, | ||||||
|         enable_modules: list[str] | None = None, |         enable_modules: List[str] = None, | ||||||
|         loglevel: str | None = None,  # set log level per subactor |         loglevel: str = None,  # set log level per subactor | ||||||
|         nursery: trio.Nursery | None = None, |         nursery: trio.Nursery = None, | ||||||
|         debug_mode: Optional[bool] | None = None, |  | ||||||
|         infect_asyncio: bool = False, |  | ||||||
|     ) -> Portal: |     ) -> Portal: | ||||||
|         ''' |  | ||||||
|         Start a (daemon) actor: an process that has no designated |  | ||||||
|         "main task" besides the runtime. |  | ||||||
| 
 |  | ||||||
|         ''' |  | ||||||
|         loglevel = loglevel or self._actor.loglevel or get_loglevel() |         loglevel = loglevel or self._actor.loglevel or get_loglevel() | ||||||
| 
 | 
 | ||||||
|         # configure and pass runtime state |         # configure and pass runtime state | ||||||
|         _rtv = _state._runtime_vars.copy() |         _rtv = _state._runtime_vars.copy() | ||||||
|         _rtv['_is_root'] = False |         _rtv['_is_root'] = False | ||||||
| 
 | 
 | ||||||
|         # allow setting debug policy per actor |  | ||||||
|         if debug_mode is not None: |  | ||||||
|             _rtv['_debug_mode'] = debug_mode |  | ||||||
|             self._at_least_one_child_in_debug = True |  | ||||||
| 
 |  | ||||||
|         enable_modules = enable_modules or [] |         enable_modules = enable_modules or [] | ||||||
| 
 | 
 | ||||||
|         if rpc_module_paths: |         if rpc_module_paths: | ||||||
|  | @ -170,25 +102,18 @@ class ActorNursery: | ||||||
|                 bind_addr, |                 bind_addr, | ||||||
|                 parent_addr, |                 parent_addr, | ||||||
|                 _rtv,  # run time vars |                 _rtv,  # run time vars | ||||||
|                 infect_asyncio=infect_asyncio, |  | ||||||
|             ) |             ) | ||||||
|         ) |         ) | ||||||
| 
 | 
 | ||||||
|     async def run_in_actor( |     async def run_in_actor( | ||||||
|         self, |         self, | ||||||
| 
 |  | ||||||
|         fn: typing.Callable, |         fn: typing.Callable, | ||||||
|         *, |         *, | ||||||
| 
 |  | ||||||
|         name: Optional[str] = None, |         name: Optional[str] = None, | ||||||
|         bind_addr: tuple[str, int] = _default_bind_addr, |         bind_addr: Tuple[str, int] = _default_bind_addr, | ||||||
|         rpc_module_paths: list[str] | None = None, |         rpc_module_paths: Optional[List[str]] = None, | ||||||
|         enable_modules: list[str] | None = None, |         loglevel: str = None,  # set log level per subactor | ||||||
|         loglevel: str | None = None,  # set log level per subactor |  | ||||||
|         infect_asyncio: bool = False, |  | ||||||
| 
 |  | ||||||
|         **kwargs,  # explicit args to ``fn`` |         **kwargs,  # explicit args to ``fn`` | ||||||
| 
 |  | ||||||
|     ) -> Portal: |     ) -> Portal: | ||||||
|         """Spawn a new actor, run a lone task, then terminate the actor and |         """Spawn a new actor, run a lone task, then terminate the actor and | ||||||
|         return its result. |         return its result. | ||||||
|  | @ -205,23 +130,12 @@ class ActorNursery: | ||||||
| 
 | 
 | ||||||
|         portal = await self.start_actor( |         portal = await self.start_actor( | ||||||
|             name, |             name, | ||||||
|             enable_modules=[mod_path] + ( |             rpc_module_paths=[mod_path] + (rpc_module_paths or []), | ||||||
|                 enable_modules or rpc_module_paths or [] |  | ||||||
|             ), |  | ||||||
|             bind_addr=bind_addr, |             bind_addr=bind_addr, | ||||||
|             loglevel=loglevel, |             loglevel=loglevel, | ||||||
|             # use the run_in_actor nursery |             # use the run_in_actor nursery | ||||||
|             nursery=self._ria_nursery, |             nursery=self._ria_nursery, | ||||||
|             infect_asyncio=infect_asyncio, |  | ||||||
|         ) |         ) | ||||||
| 
 |  | ||||||
|         # XXX: don't allow stream funcs |  | ||||||
|         if not ( |  | ||||||
|             inspect.iscoroutinefunction(fn) and |  | ||||||
|             not getattr(fn, '_tractor_stream_function', False) |  | ||||||
|         ): |  | ||||||
|             raise TypeError(f'{fn} must be an async function!') |  | ||||||
| 
 |  | ||||||
|         # this marks the actor to be cancelled after its portal result |         # this marks the actor to be cancelled after its portal result | ||||||
|         # is retreived, see logic in `open_nursery()` below. |         # is retreived, see logic in `open_nursery()` below. | ||||||
|         self._cancel_after_result_on_exit.add(portal) |         self._cancel_after_result_on_exit.add(portal) | ||||||
|  | @ -241,27 +155,18 @@ class ActorNursery: | ||||||
|         """ |         """ | ||||||
|         self.cancelled = True |         self.cancelled = True | ||||||
| 
 | 
 | ||||||
|         log.cancel(f"Cancelling nursery in {self._actor.uid}") |         log.warning(f"Cancelling nursery in {self._actor.uid}") | ||||||
|         with trio.move_on_after(3) as cs: |         with trio.move_on_after(3) as cs: | ||||||
| 
 |  | ||||||
|             async with trio.open_nursery() as nursery: |             async with trio.open_nursery() as nursery: | ||||||
| 
 |  | ||||||
|                 for subactor, proc, portal in self._children.values(): |                 for subactor, proc, portal in self._children.values(): | ||||||
| 
 |  | ||||||
|                     # TODO: are we ever even going to use this or |  | ||||||
|                     # is the spawning backend responsible for such |  | ||||||
|                     # things? I'm thinking latter. |  | ||||||
|                     if hard_kill: |                     if hard_kill: | ||||||
|                         proc.terminate() |                         proc.terminate() | ||||||
| 
 |  | ||||||
|                     else: |                     else: | ||||||
|                         if portal is None:  # actor hasn't fully spawned yet |                         if portal is None:  # actor hasn't fully spawned yet | ||||||
|                             event = self._actor._peer_connected[subactor.uid] |                             event = self._actor._peer_connected[subactor.uid] | ||||||
|                             log.warning( |                             log.warning( | ||||||
|                                 f"{subactor.uid} wasn't finished spawning?") |                                 f"{subactor.uid} wasn't finished spawning?") | ||||||
| 
 |  | ||||||
|                             await event.wait() |                             await event.wait() | ||||||
| 
 |  | ||||||
|                             # channel/portal should now be up |                             # channel/portal should now be up | ||||||
|                             _, _, portal = self._children[subactor.uid] |                             _, _, portal = self._children[subactor.uid] | ||||||
| 
 | 
 | ||||||
|  | @ -279,7 +184,6 @@ class ActorNursery: | ||||||
| 
 | 
 | ||||||
|                         # spawn cancel tasks for each sub-actor |                         # spawn cancel tasks for each sub-actor | ||||||
|                         assert portal |                         assert portal | ||||||
|                         if portal.channel.connected(): |  | ||||||
|                         nursery.start_soon(portal.cancel_actor) |                         nursery.start_soon(portal.cancel_actor) | ||||||
| 
 | 
 | ||||||
|         # if we cancelled the cancel (we hung cancelling remote actors) |         # if we cancelled the cancel (we hung cancelling remote actors) | ||||||
|  | @ -295,17 +199,13 @@ class ActorNursery: | ||||||
|         self._join_procs.set() |         self._join_procs.set() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @acm | @asynccontextmanager | ||||||
| async def _open_and_supervise_one_cancels_all_nursery( | async def _open_and_supervise_one_cancels_all_nursery( | ||||||
|     actor: Actor, |     actor: Actor, | ||||||
| 
 |  | ||||||
| ) -> typing.AsyncGenerator[ActorNursery, None]: | ) -> typing.AsyncGenerator[ActorNursery, None]: | ||||||
| 
 | 
 | ||||||
|     # TODO: yay or nay? |  | ||||||
|     __tracebackhide__ = True |  | ||||||
| 
 |  | ||||||
|     # the collection of errors retreived from spawned sub-actors |     # the collection of errors retreived from spawned sub-actors | ||||||
|     errors: dict[tuple[str, str], BaseException] = {} |     errors: Dict[Tuple[str, str], Exception] = {} | ||||||
| 
 | 
 | ||||||
|     # This is the outermost level "deamon actor" nursery. It is awaited |     # This is the outermost level "deamon actor" nursery. It is awaited | ||||||
|     # **after** the below inner "run in actor nursery". This allows for |     # **after** the below inner "run in actor nursery". This allows for | ||||||
|  | @ -326,7 +226,6 @@ async def _open_and_supervise_one_cancels_all_nursery( | ||||||
|             # As such if the strategy propagates any error(s) upwards |             # As such if the strategy propagates any error(s) upwards | ||||||
|             # the above "daemon actor" nursery will be notified. |             # the above "daemon actor" nursery will be notified. | ||||||
|             async with trio.open_nursery() as ria_nursery: |             async with trio.open_nursery() as ria_nursery: | ||||||
| 
 |  | ||||||
|                 anursery = ActorNursery( |                 anursery = ActorNursery( | ||||||
|                     actor, |                     actor, | ||||||
|                     ria_nursery, |                     ria_nursery, | ||||||
|  | @ -337,85 +236,64 @@ async def _open_and_supervise_one_cancels_all_nursery( | ||||||
|                     # spawning of actors happens in the caller's scope |                     # spawning of actors happens in the caller's scope | ||||||
|                     # after we yield upwards |                     # after we yield upwards | ||||||
|                     yield anursery |                     yield anursery | ||||||
| 
 |                     log.debug( | ||||||
|                     # When we didn't error in the caller's scope, |  | ||||||
|                     # signal all process-monitor-tasks to conduct |  | ||||||
|                     # the "hard join phase". |  | ||||||
|                     log.runtime( |  | ||||||
|                         f"Waiting on subactors {anursery._children} " |                         f"Waiting on subactors {anursery._children} " | ||||||
|                         "to complete" |                         "to complete" | ||||||
|                     ) |                     ) | ||||||
|                     anursery._join_procs.set() |                 except BaseException as err: | ||||||
| 
 |  | ||||||
|                 except BaseException as inner_err: |  | ||||||
|                     errors[actor.uid] = inner_err |  | ||||||
| 
 |  | ||||||
|                     # If we error in the root but the debugger is |  | ||||||
|                     # engaged we don't want to prematurely kill (and |  | ||||||
|                     # thus clobber access to) the local tty since it |  | ||||||
|                     # will make the pdb repl unusable. |  | ||||||
|                     # Instead try to wait for pdb to be released before |  | ||||||
|                     # tearing down. |  | ||||||
|                     await maybe_wait_for_debugger( |  | ||||||
|                         child_in_debug=anursery._at_least_one_child_in_debug |  | ||||||
|                     ) |  | ||||||
| 
 |  | ||||||
|                     # if the caller's scope errored then we activate our |                     # if the caller's scope errored then we activate our | ||||||
|                     # one-cancels-all supervisor strategy (don't |                     # one-cancels-all supervisor strategy (don't | ||||||
|                     # worry more are coming). |                     # worry more are coming). | ||||||
|                     anursery._join_procs.set() |                     anursery._join_procs.set() | ||||||
| 
 |                     try: | ||||||
|                         # XXX: hypothetically an error could be |                         # XXX: hypothetically an error could be | ||||||
|                         # raised and then a cancel signal shows up |                         # raised and then a cancel signal shows up | ||||||
|                         # slightly after in which case the `else:` |                         # slightly after in which case the `else:` | ||||||
|                         # block here might not complete?  For now, |                         # block here might not complete?  For now, | ||||||
|                         # shield both. |                         # shield both. | ||||||
|                         with trio.CancelScope(shield=True): |                         with trio.CancelScope(shield=True): | ||||||
|                         etype = type(inner_err) |                             etype = type(err) | ||||||
|                             if etype in ( |                             if etype in ( | ||||||
|                                 trio.Cancelled, |                                 trio.Cancelled, | ||||||
|                                 KeyboardInterrupt |                                 KeyboardInterrupt | ||||||
|                             ) or ( |                             ) or ( | ||||||
|                             is_multi_cancelled(inner_err) |                                 is_multi_cancelled(err) | ||||||
|                             ): |                             ): | ||||||
|                             log.cancel( |                                 log.warning( | ||||||
|                                     f"Nursery for {current_actor().uid} " |                                     f"Nursery for {current_actor().uid} " | ||||||
|                                     f"was cancelled with {etype}") |                                     f"was cancelled with {etype}") | ||||||
|                             else: |                             else: | ||||||
|                                 log.exception( |                                 log.exception( | ||||||
|                                     f"Nursery for {current_actor().uid} " |                                     f"Nursery for {current_actor().uid} " | ||||||
|                                 f"errored with") |                                     f"errored with {err}, ") | ||||||
| 
 | 
 | ||||||
|                             # cancel all subactors |                             # cancel all subactors | ||||||
|                             await anursery.cancel() |                             await anursery.cancel() | ||||||
| 
 | 
 | ||||||
|  |                     except trio.MultiError as merr: | ||||||
|  |                         # If we receive additional errors while waiting on | ||||||
|  |                         # remaining subactors that were cancelled, | ||||||
|  |                         # aggregate those errors with the original error | ||||||
|  |                         # that triggered this teardown. | ||||||
|  |                         if err not in merr.exceptions: | ||||||
|  |                             raise trio.MultiError(merr.exceptions + [err]) | ||||||
|  |                     else: | ||||||
|  |                         raise | ||||||
|  | 
 | ||||||
|  |                 # Last bit before first nursery block ends in the case | ||||||
|  |                 # where we didn't error in the caller's scope | ||||||
|  |                 log.debug("Waiting on all subactors to complete") | ||||||
|  |                 anursery._join_procs.set() | ||||||
|  | 
 | ||||||
|                 # ria_nursery scope end |                 # ria_nursery scope end | ||||||
| 
 | 
 | ||||||
|         # TODO: this is the handler around the ``.run_in_actor()`` |         # XXX: do we need a `trio.Cancelled` catch here as well? | ||||||
|         # nursery. Ideally we can drop this entirely in the future as |         except (Exception, trio.MultiError, trio.Cancelled) as err: | ||||||
|         # the whole ``.run_in_actor()`` API should be built "on top of" |  | ||||||
|         # this lower level spawn-request-cancel "daemon actor" API where |  | ||||||
|         # a local in-actor task nursery is used with one-to-one task |  | ||||||
|         # + `await Portal.run()` calls and the results/errors are |  | ||||||
|         # handled directly (inline) and errors by the local nursery. |  | ||||||
|         except ( |  | ||||||
|             Exception, |  | ||||||
|             BaseExceptionGroup, |  | ||||||
|             trio.Cancelled |  | ||||||
| 
 |  | ||||||
|         ) as err: |  | ||||||
| 
 |  | ||||||
|             # XXX: yet another guard before allowing the cancel |  | ||||||
|             # sequence in case a (single) child is in debug. |  | ||||||
|             await maybe_wait_for_debugger( |  | ||||||
|                 child_in_debug=anursery._at_least_one_child_in_debug |  | ||||||
|             ) |  | ||||||
| 
 |  | ||||||
|             # If actor-local error was raised while waiting on |             # If actor-local error was raised while waiting on | ||||||
|             # ".run_in_actor()" actors then we also want to cancel all |             # ".run_in_actor()" actors then we also want to cancel all | ||||||
|             # remaining sub-actors (due to our lone strategy: |             # remaining sub-actors (due to our lone strategy: | ||||||
|             # one-cancels-all). |             # one-cancels-all). | ||||||
|             log.cancel(f"Nursery cancelling due to {err}") |             log.warning(f"Nursery cancelling due to {err}") | ||||||
|             if anursery._children: |             if anursery._children: | ||||||
|                 with trio.CancelScope(shield=True): |                 with trio.CancelScope(shield=True): | ||||||
|                     await anursery.cancel() |                     await anursery.cancel() | ||||||
|  | @ -432,26 +310,22 @@ async def _open_and_supervise_one_cancels_all_nursery( | ||||||
|                     with trio.CancelScope(shield=True): |                     with trio.CancelScope(shield=True): | ||||||
|                         await anursery.cancel() |                         await anursery.cancel() | ||||||
| 
 | 
 | ||||||
|                 # use `BaseExceptionGroup` as needed |                 # use `MultiError` as needed | ||||||
|                 if len(errors) > 1: |                 if len(errors) > 1: | ||||||
|                     raise BaseExceptionGroup( |                     raise trio.MultiError(tuple(errors.values())) | ||||||
|                         'tractor.ActorNursery errored with', |  | ||||||
|                         tuple(errors.values()), |  | ||||||
|                     ) |  | ||||||
|                 else: |                 else: | ||||||
|                     raise list(errors.values())[0] |                     raise list(errors.values())[0] | ||||||
| 
 | 
 | ||||||
|         # da_nursery scope end - nursery checkpoint |         # ria_nursery scope end - nursery checkpoint | ||||||
|     # final exit | 
 | ||||||
|  |     # after nursery exit | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @acm | @asynccontextmanager | ||||||
| async def open_nursery( | async def open_nursery( | ||||||
|     **kwargs, |     **kwargs, | ||||||
| 
 |  | ||||||
| ) -> typing.AsyncGenerator[ActorNursery, None]: | ) -> typing.AsyncGenerator[ActorNursery, None]: | ||||||
|     ''' |     """Create and yield a new ``ActorNursery`` to be used for spawning | ||||||
|     Create and yield a new ``ActorNursery`` to be used for spawning |  | ||||||
|     structured concurrent subactors. |     structured concurrent subactors. | ||||||
| 
 | 
 | ||||||
|     When an actor is spawned a new trio task is started which |     When an actor is spawned a new trio task is started which | ||||||
|  | @ -463,8 +337,7 @@ async def open_nursery( | ||||||
|     close it. It turns out this approach is probably more correct |     close it. It turns out this approach is probably more correct | ||||||
|     anyway since it is more clear from the following nested nurseries |     anyway since it is more clear from the following nested nurseries | ||||||
|     which cancellation scopes correspond to each spawned subactor set. |     which cancellation scopes correspond to each spawned subactor set. | ||||||
| 
 |     """ | ||||||
|     ''' |  | ||||||
|     implicit_runtime = False |     implicit_runtime = False | ||||||
| 
 | 
 | ||||||
|     actor = current_actor(err_on_no_runtime=False) |     actor = current_actor(err_on_no_runtime=False) | ||||||
|  | @ -472,8 +345,7 @@ async def open_nursery( | ||||||
|     try: |     try: | ||||||
|         if actor is None and is_main_process(): |         if actor is None and is_main_process(): | ||||||
| 
 | 
 | ||||||
|             # if we are the parent process start the |             # if we are the parent process start the actor runtime implicitly | ||||||
|             # actor runtime implicitly |  | ||||||
|             log.info("Starting actor runtime!") |             log.info("Starting actor runtime!") | ||||||
| 
 | 
 | ||||||
|             # mark us for teardown on exit |             # mark us for teardown on exit | ||||||
|  | @ -482,23 +354,19 @@ async def open_nursery( | ||||||
|             async with open_root_actor(**kwargs) as actor: |             async with open_root_actor(**kwargs) as actor: | ||||||
|                 assert actor is current_actor() |                 assert actor is current_actor() | ||||||
| 
 | 
 | ||||||
|                 try: |  | ||||||
|                 async with _open_and_supervise_one_cancels_all_nursery( |                 async with _open_and_supervise_one_cancels_all_nursery( | ||||||
|                     actor |                     actor | ||||||
|                 ) as anursery: |                 ) as anursery: | ||||||
|  | 
 | ||||||
|                     yield anursery |                     yield anursery | ||||||
|                 finally: |  | ||||||
|                     anursery.exited.set() |  | ||||||
| 
 | 
 | ||||||
|         else:  # sub-nursery case |         else:  # sub-nursery case | ||||||
| 
 | 
 | ||||||
|             try: |  | ||||||
|             async with _open_and_supervise_one_cancels_all_nursery( |             async with _open_and_supervise_one_cancels_all_nursery( | ||||||
|                 actor |                 actor | ||||||
|             ) as anursery: |             ) as anursery: | ||||||
|  | 
 | ||||||
|                 yield anursery |                 yield anursery | ||||||
|             finally: |  | ||||||
|                 anursery.exited.set() |  | ||||||
| 
 | 
 | ||||||
|     finally: |     finally: | ||||||
|         log.debug("Nursery teardown complete") |         log.debug("Nursery teardown complete") | ||||||
Some files were not shown because too many files have changed in this diff Show More
		Loading…
	
		Reference in New Issue