forked from goodboy/tractor
				
			Compare commits
	
		
			229 Commits 
		
	
	
		
			sigintsavi
			...
			master
		
	
	| Author | SHA1 | Date | 
|---|---|---|
|  | e5ee2e3de8 | |
|  | 41aa91c8eb | |
|  | 6758e4487c | |
|  | 1c3893a383 | |
|  | 73befac9bc | |
|  | 79622bbeea | |
|  | 95535b2226 | |
|  | 87c6e09d6b | |
|  | 9ccd3a74b6 | |
|  | ae4ff5dc8d | |
|  | 705538398f | |
|  | 86aef5238d | |
|  | cc82447db6 | |
|  | 23cffbd940 | |
|  | 3d202272c4 | |
|  | 63cdb0891f | |
|  | 0f7db27b68 | |
|  | c53d62d2f7 | |
|  | f667d16d66 | |
|  | 24a062341e | |
|  | e714bec8db | |
|  | 009cd6552e | |
|  | 649c5e7504 | |
|  | 203f95615c | |
|  | efb8bec828 | |
|  | 8637778739 | |
|  | 47166e45f0 | |
|  | 4ce2dcd12b | |
|  | 80f983818f | |
|  | 6ba29f8d56 | |
|  | 2707a0e971 | |
|  | c8efcdd0d3 | |
|  | 9f9907271b | |
|  | c2367c1c5e | |
|  | a777217674 | |
|  | 13c9eadc8f | |
|  | af6c325072 | |
|  | 195d2f0ed4 | |
|  | aa4871b13d | |
|  | 556f4626db | |
|  | 3967c0ed9e | |
|  | e34823aab4 | |
|  | 6c35ba2cb6 | |
|  | 3a0817ff55 | |
|  | 7fddb4416b | |
|  | 1d92f2552a | |
|  | 4f8586a928 | |
|  | fb9ff45745 | |
|  | 36a83cb306 | |
|  | 7394a187e0 | |
|  | df01294bb2 | |
|  | ddf3d0d1b3 | |
|  | 158569adae | |
|  | 97d5f7233b | |
|  | d27c081a15 | |
|  | a4874a3227 | |
|  | de04bbb2bb | |
|  | 4f977189c0 | |
|  | 9fd62cf71f | |
|  | 606efa5bb7 | |
|  | 121a8cc891 | |
|  | c54b8ca4ba | |
|  | de93c8257c | |
|  | 5b8a87d0f6 | |
|  | 9e5c8ce6f6 | |
|  | 965cd406a2 | |
|  | 2e278ceb74 | |
|  | 6d124db7c9 | |
|  | dba8118553 | |
|  | fca2e7c10e | |
|  | 5ed62c5c54 | |
|  | 588b7ca7bf | |
|  | d8214735b9 | |
|  | 48f6d514ef | |
|  | 6c8cacc9d1 | |
|  | 38326e8c15 | |
|  | b5192cca8e | |
|  | c606be8c64 | |
|  | d8e48e29ba | |
|  | a0f6668ce8 | |
|  | 274c66cf9d | |
|  | f2641c8964 | |
|  | c47575997a | |
|  | f39414ce12 | |
|  | 0a1bf8e57d | |
|  | e298b70edf | |
|  | c0dd5d7ffc | |
|  | 347591c348 | |
|  | 38f9d35dee | |
|  | 88448f7281 | |
|  | 0956d5f461 | |
|  | c646c79a82 | |
|  | 33f2234baf | |
|  | 7521bded3d | |
|  | 0f523b65fb | |
|  | 50fe098e06 | |
|  | d87d6af7e1 | |
|  | df69aedcd5 | |
|  | b15e4ed9ce | |
|  | 98056f6ed7 | |
|  | 247d3448ae | |
|  | fc17f6790e | |
|  | b81b6be98a | |
|  | 72fbda4cef | |
|  | fb721f36ef | |
|  | 734d8dd663 | |
|  | 30ea7a06b0 | |
|  | 3398153c52 | |
|  | 1c480e6c92 | |
|  | dfdad4d1fa | |
|  | b892bc74f6 | |
|  | 44b59f3338 | |
|  | 7a719ac2a7 | |
|  | 9e6266dda3 | |
|  | b1abec543f | |
|  | 93b9d2dc2d | |
|  | 4d808757a6 | |
|  | 7e5bb0437e | |
|  | b19f08d9f0 | |
|  | 2c20b2d64f | |
|  | 023b6fc845 | |
|  | d24fae8381 | |
|  | 5ab98513b7 | |
|  | 90f4912580 | |
|  | 6e24e16068 | |
|  | 15047341bd | |
|  | dc295ab227 | |
|  | 6a0337b69d | |
|  | e609183242 | |
|  | 368e9f3f7c | |
|  | 10eeda2d2b | |
|  | a113e22bb9 | |
|  | ad19bf2cf1 | |
|  | 9aef03772a | |
|  | 7548dba8f2 | |
|  | ba4d4e9af3 | |
|  | 208d56af2c | |
|  | a3a5bc267e | |
|  | d4084b2032 | |
|  | 1e6b4d5dd4 | |
|  | c613acfe5c | |
|  | fea9dc7065 | |
|  | e558c427de | |
|  | f07c3aa4a1 | |
|  | bafd10a260 | |
|  | 5ad540c417 | |
|  | 83b44cf469 | |
|  | 1f2001020e | |
|  | 71f9881a60 | |
|  | e24645eec8 | |
|  | c3cdeeb3ba | |
|  | 9bd534df83 | |
|  | c1d700f257 | |
|  | 14c6e34658 | |
|  | 3393bc23e4 | |
|  | 171f1bc243 | |
|  | ee02cd2496 | |
|  | 4c5d435aac | |
|  | a9b4a61620 | |
|  | 641ed7a32a | |
|  | cc5f60bba0 | |
|  | 8f1fe2376a | |
|  | 65540f3e2a | |
|  | 650313dfef | |
|  | e4006da6f4 | |
|  | 7f6169a050 | |
|  | 2d387f2610 | |
|  | 8115759984 | |
|  | 02c3b9a672 | |
|  | fa4388835c | |
|  | 54de72d8df | |
|  | c5c7a9027c | |
|  | e4771eec16 | |
|  | a9aaee9dbd | |
|  | acfbae4b95 | |
|  | aca9a6b99a | |
|  | 8896ba2bf8 | |
|  | 87b2ccb86a | |
|  | 937ed99e39 | |
|  | 91f034a136 | |
|  | 08cf03cd9e | |
|  | 5e23b3ca0d | |
|  | 6f01c78122 | |
|  | 457499bc2e | |
|  | a4bac135d9 | |
|  | 20c660faa7 | |
|  | 1d4d55f5cd | |
|  | c0cd99e374 | |
|  | a4538a3d84 | |
|  | b01daa5319 | |
|  | bd362a05f0 | |
|  | cb0c47c42a | |
|  | 808d7ae2c6 | |
|  | b21f2e16ad | |
|  | 4779badd96 | |
|  | 6bdcbdb96f | |
|  | adbebd3f06 | |
|  | a2e90194bc | |
|  | ba7b355d9c | |
|  | 617d57dc35 | |
|  | dadd5e6148 | |
|  | a72350118c | |
|  | ef8dc0204c | |
|  | a101971027 | |
|  | 835836123b | |
|  | 70ad0f6b8e | |
|  | 56b30a9a53 | |
|  | 925d5c1ceb | |
|  | b9eb601265 | |
|  | 4dcc21234e | |
|  | 64909e676e | |
|  | 19fb77f698 | |
|  | 8b9f342eef | |
|  | bd7d507153 | |
|  | 9bc38cbf04 | |
|  | a90ca4b384 | |
|  | d0dcd55f47 | |
|  | 4e08605b0d | |
|  | 519f4c300b | |
|  | 56c19093bb | |
|  | ff3f5959e9 | |
|  | abb00531d3 | |
|  | 439d320a25 | |
|  | 18c525d2f1 | |
|  | 201c026284 | |
|  | 2a61aa099b | |
|  | e2453fd3da | |
|  | b29def8b5d | |
|  | f07e9dbb2f | 
|  | @ -26,8 +26,10 @@ jobs: | |||
|         run: pip install -U . --upgrade-strategy eager -r requirements-test.txt | ||||
| 
 | ||||
|       - name: Run MyPy check | ||||
|         run: mypy tractor/ --ignore-missing-imports | ||||
|         run: mypy tractor/ --ignore-missing-imports --show-traceback | ||||
| 
 | ||||
|   # test that we can generate a software distribution and install it | ||||
|   # thus avoid missing file issues after packaging. | ||||
|   sdist-linux: | ||||
|     name: 'sdist' | ||||
|     runs-on: ubuntu-latest | ||||
|  | @ -57,8 +59,12 @@ jobs: | |||
|       fail-fast: false | ||||
|       matrix: | ||||
|         os: [ubuntu-latest] | ||||
|         python: ['3.9', '3.10'] | ||||
|         spawn_backend: ['trio', 'mp'] | ||||
|         python: ['3.10'] | ||||
|         spawn_backend: [ | ||||
|           'trio', | ||||
|           'mp_spawn', | ||||
|           'mp_forkserver', | ||||
|         ] | ||||
| 
 | ||||
|     steps: | ||||
| 
 | ||||
|  | @ -73,42 +79,53 @@ jobs: | |||
|       - name: Install dependencies | ||||
|         run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||
| 
 | ||||
|       - name: Run tests | ||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs | ||||
|       - name: List dependencies | ||||
|         run: pip list | ||||
| 
 | ||||
|   # We skip 3.10 on windows for now due to | ||||
|   # https://github.com/pytest-dev/pytest/issues/8733 | ||||
|   # some kinda weird `pyreadline` issue.. | ||||
|       - name: Run tests | ||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx | ||||
| 
 | ||||
|   # We skip 3.10 on windows for now due to not having any collabs to | ||||
|   # debug the CI failures. Anyone wanting to hack and solve them is very | ||||
|   # welcome, but our primary user base is not using that OS. | ||||
| 
 | ||||
|   # TODO: use job filtering to accomplish instead of repeated | ||||
|   # boilerplate as is above XD: | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idif | ||||
|   testing-windows: | ||||
|     name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' | ||||
|     timeout-minutes: 12 | ||||
|     runs-on: ${{ matrix.os }} | ||||
|   # testing-windows: | ||||
|   #   name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' | ||||
|   #   timeout-minutes: 12 | ||||
|   #   runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         os: [windows-latest] | ||||
|         python: ['3.9', '3.10'] | ||||
|         spawn_backend: ['trio', 'mp'] | ||||
|   #   strategy: | ||||
|   #     fail-fast: false | ||||
|   #     matrix: | ||||
|   #       os: [windows-latest] | ||||
|   #       python: ['3.10'] | ||||
|   #       spawn_backend: ['trio', 'mp'] | ||||
| 
 | ||||
|     steps: | ||||
|   #   steps: | ||||
| 
 | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v2 | ||||
|   #     - name: Checkout | ||||
|   #       uses: actions/checkout@v2 | ||||
| 
 | ||||
|       - name: Setup python | ||||
|         uses: actions/setup-python@v2 | ||||
|         with: | ||||
|           python-version: '${{ matrix.python }}' | ||||
|   #     - name: Setup python | ||||
|   #       uses: actions/setup-python@v2 | ||||
|   #       with: | ||||
|   #         python-version: '${{ matrix.python }}' | ||||
| 
 | ||||
|       - name: Install dependencies | ||||
|         run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||
|   #     - name: Install dependencies | ||||
|   #       run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||
| 
 | ||||
|       - name: Run tests | ||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs | ||||
|   #     # TODO: pretty sure this solves debugger deps-issues on windows, but it needs to | ||||
|   #     # be verified by someone with a native setup. | ||||
|   #     # - name: Force pyreadline3 | ||||
|   #     #   run: pip uninstall pyreadline; pip install -U pyreadline3 | ||||
| 
 | ||||
|   #     - name: List dependencies | ||||
|   #       run: pip list | ||||
| 
 | ||||
|   #     - name: Run tests | ||||
|   #       run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx | ||||
|  |  | |||
|  | @ -1,7 +0,0 @@ | |||
| Add ``tractor.query_actor()`` an addr looker-upper which doesn't deliver | ||||
| a ``Portal`` instance and instead just a socket address ``tuple``. | ||||
| 
 | ||||
| Sometimes it's handy to just have a simple way to figure out if | ||||
| a "service" actor is up, so add this discovery helper for that. We'll | ||||
| prolly just leave it undocumented for now until we figure out | ||||
| a longer-term/better discovery system. | ||||
							
								
								
									
										142
									
								
								NEWS.rst
								
								
								
								
							
							
						
						
									
										142
									
								
								NEWS.rst
								
								
								
								
							|  | @ -4,6 +4,148 @@ Changelog | |||
| 
 | ||||
| .. towncrier release notes start | ||||
| 
 | ||||
| tractor 0.1.0a5 (2022-08-03) | ||||
| ============================ | ||||
| 
 | ||||
| This is our final release supporting Python 3.9 since we will be moving | ||||
| internals to the new `match:` syntax from 3.10 going forward and | ||||
| further, we have officially dropped usage of the `msgpack` library and | ||||
| happily adopted `msgspec`. | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| 
 | ||||
| - `#165 <https://github.com/goodboy/tractor/issues/165>`_: Add SIGINT | ||||
|   protection to our `pdbpp` based debugger subystem such that for | ||||
|   (single-depth) actor trees in debug mode we ignore interrupts in any | ||||
|   actor currently holding the TTY lock thus avoiding clobbering IPC | ||||
|   connections and/or task and process state when working in the REPL. | ||||
| 
 | ||||
|   As a big note currently so called "nested" actor trees (trees with | ||||
|   actors having more then one parent/ancestor) are not fully supported | ||||
|   since we don't yet have a mechanism to relay the debug mode knowledge | ||||
|   "up" the actor tree (for eg. when handling a crash in a leaf actor). | ||||
|   As such currently there is a set of tests and known scenarios which will | ||||
|   result in process cloberring by the zombie repaing machinery and these | ||||
|   have been documented in https://github.com/goodboy/tractor/issues/320. | ||||
| 
 | ||||
|   The implementation details include: | ||||
| 
 | ||||
|   - utilizing a custom SIGINT handler which we apply whenever an actor's | ||||
|     runtime enters the debug machinery, which we also make sure the | ||||
|     stdlib's `pdb` configuration doesn't override (which it does by | ||||
|     default without special instance config). | ||||
|   - litter the runtime with `maybe_wait_for_debugger()` mostly in spots | ||||
|     where the root actor should block before doing embedded nursery | ||||
|     teardown ops which both cancel potential-children-in-deubg as well | ||||
|     as eventually trigger zombie reaping machinery. | ||||
|   - hardening of the TTY locking semantics/API both in terms of IPC | ||||
|     terminations and cancellation and lock release determinism from | ||||
|     sync debugger instance methods. | ||||
|   - factoring of locking infrastructure into a new `._debug.Lock` global | ||||
|     which encapsulates all details of the ``trio`` sync primitives and | ||||
|     task/actor uid management and tracking. | ||||
| 
 | ||||
|   We also add `ctrl-c` cases throughout the test suite though these are | ||||
|   disabled for py3.9 (`pdbpp` UX differences that don't seem worth | ||||
|   compensating for, especially since this will be our last 3.9 supported | ||||
|   release) and there are a slew of marked cases that aren't expected to | ||||
|   work in CI more generally (as mentioned in the "nested" tree note | ||||
|   above) despite seemingly working  when run manually on linux. | ||||
| 
 | ||||
| - `#304 <https://github.com/goodboy/tractor/issues/304>`_: Add a new | ||||
|   ``to_asyncio.LinkedTaskChannel.subscribe()`` which gives task-oriented | ||||
|   broadcast functionality semantically equivalent to | ||||
|   ``tractor.MsgStream.subscribe()`` this makes it possible for multiple | ||||
|   ``trio``-side tasks to consume ``asyncio``-side task msgs in tandem. | ||||
| 
 | ||||
|   Further Improvements to the test suite were added in this patch set | ||||
|   including a new scenario test for a sub-actor managed "service nursery" | ||||
|   (implementing the basics of a "service manager") including use of | ||||
|   *infected asyncio* mode. Further we added a lower level | ||||
|   ``test_trioisms.py`` to start to track issues we need to work around in | ||||
|   ``trio`` itself which in this case included a bug we were trying to | ||||
|   solve related to https://github.com/python-trio/trio/issues/2258. | ||||
| 
 | ||||
| 
 | ||||
| Bug Fixes | ||||
| --------- | ||||
| 
 | ||||
| - `#318 <https://github.com/goodboy/tractor/issues/318>`_: Fix | ||||
|   a previously undetected ``trio``-``asyncio`` task lifetime linking | ||||
|   issue with the ``to_asyncio.open_channel_from()`` api where both sides | ||||
|   where not properly waiting/signalling termination and it was possible | ||||
|   for ``asyncio``-side errors to not propagate due to a race condition. | ||||
| 
 | ||||
|   The implementation fix summary is: | ||||
|   - add state to signal the end of the ``trio`` side task to be | ||||
|     read by the ``asyncio`` side and always cancel any ongoing | ||||
|     task in such cases. | ||||
|   - always wait on the ``asyncio`` task termination from the ``trio`` | ||||
|     side on error before maybe raising said error. | ||||
|   - always close the ``trio`` mem chan on exit to ensure the other | ||||
|     side can detect it and follow. | ||||
| 
 | ||||
| 
 | ||||
| Trivial/Internal Changes | ||||
| ------------------------ | ||||
| 
 | ||||
| - `#248 <https://github.com/goodboy/tractor/issues/248>`_: Adjust the | ||||
|   `tractor._spawn.soft_wait()` strategy to avoid sending an actor cancel | ||||
|   request (via `Portal.cancel_actor()`) if either the child process is | ||||
|   detected as having terminated or the IPC channel is detected to be | ||||
|   closed. | ||||
| 
 | ||||
|   This ensures (even) more deterministic inter-actor cancellation by | ||||
|   avoiding the timeout condition where possible when a whild never | ||||
|   sucessfully spawned, crashed, or became un-contactable over IPC. | ||||
| 
 | ||||
| - `#295 <https://github.com/goodboy/tractor/issues/295>`_: Add an | ||||
|   experimental ``tractor.msg.NamespacePath`` type for passing Python | ||||
|   objects by "reference" through a ``str``-subtype message and using the | ||||
|   new ``pkgutil.resolve_name()`` for reference loading. | ||||
| 
 | ||||
| - `#298 <https://github.com/goodboy/tractor/issues/298>`_: Add a new | ||||
|   `tractor.experimental` subpackage for staging new high level APIs and | ||||
|   subystems that we might eventually make built-ins. | ||||
| 
 | ||||
| - `#300 <https://github.com/goodboy/tractor/issues/300>`_: Update to and | ||||
|   pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) both of which | ||||
|   required adjustments for backwards imcompatible API tweaks. | ||||
| 
 | ||||
| - `#303 <https://github.com/goodboy/tractor/issues/303>`_: Fence off | ||||
|   ``multiprocessing`` imports until absolutely necessary in an effort to | ||||
|   avoid "resource tracker" spawning side effects that seem to have | ||||
|   varying degrees of unreliability per Python release. Port to new | ||||
|   ``msgspec.DecodeError``. | ||||
| 
 | ||||
| - `#305 <https://github.com/goodboy/tractor/issues/305>`_: Add | ||||
|   ``tractor.query_actor()`` an addr looker-upper which doesn't deliver | ||||
|   a ``Portal`` instance and instead just a socket address ``tuple``. | ||||
| 
 | ||||
|   Sometimes it's handy to just have a simple way to figure out if | ||||
|   a "service" actor is up, so add this discovery helper for that. We'll | ||||
|   prolly just leave it undocumented for now until we figure out | ||||
|   a longer-term/better discovery system. | ||||
| 
 | ||||
| - `#316 <https://github.com/goodboy/tractor/issues/316>`_: Run windows | ||||
|   CI jobs on python 3.10 after some hacks for ``pdbpp`` dependency | ||||
|   issues. | ||||
| 
 | ||||
|   Issue was to do with the now deprecated `pyreadline` project which | ||||
|   should be changed over to `pyreadline3`. | ||||
| 
 | ||||
| - `#317 <https://github.com/goodboy/tractor/issues/317>`_: Drop use of | ||||
|   the ``msgpack`` package and instead move fully to the ``msgspec`` | ||||
|   codec library. | ||||
| 
 | ||||
|   We've now used ``msgspec`` extensively in production and there's no | ||||
|   reason to not use it as default. Further this change preps us for the up | ||||
|   and coming typed messaging semantics (#196), dialog-unprotocol system | ||||
|   (#297), and caps-based messaging-protocols (#299) planned before our | ||||
|   first beta. | ||||
| 
 | ||||
| 
 | ||||
| tractor 0.1.0a4 (2021-12-18) | ||||
| ============================ | ||||
| 
 | ||||
|  |  | |||
|  | @ -6,8 +6,14 @@ | |||
| ``tractor`` is a `structured concurrent`_, multi-processing_ runtime | ||||
| built on trio_. | ||||
| 
 | ||||
| Fundamentally ``tractor`` gives you parallelism via ``trio``-"*actors*": | ||||
| our nurseries_ let you spawn new Python processes which each run a ``trio`` | ||||
| Fundamentally, ``tractor`` gives you parallelism via | ||||
| ``trio``-"*actors*": independent Python processes (aka | ||||
| non-shared-memory threads) which maintain structured | ||||
| concurrency (SC) *end-to-end* inside a *supervision tree*. | ||||
| 
 | ||||
| Cross-process (and thus cross-host) SC is accomplished through the | ||||
| combined use of our "actor nurseries_" and an "SC-transitive IPC | ||||
| protocol" constructed on top of multiple Pythons each running a ``trio`` | ||||
| scheduled runtime - a call to ``trio.run()``. | ||||
| 
 | ||||
| We believe the system adheres to the `3 axioms`_ of an "`actor model`_" | ||||
|  | @ -23,7 +29,8 @@ Features | |||
| - **It's just** a ``trio`` API | ||||
| - *Infinitely nesteable* process trees | ||||
| - Builtin IPC streaming APIs with task fan-out broadcasting | ||||
| - A (first ever?) "native" multi-core debugger UX for Python using `pdb++`_ | ||||
| - A "native" multi-core debugger REPL using `pdbp`_ (a fork & fix of | ||||
|   `pdb++`_ thanks to @mdmintz!) | ||||
| - Support for a swappable, OS specific, process spawning layer | ||||
| - A modular transport stack, allowing for custom serialization (eg. with | ||||
|   `msgspec`_), communications protocols, and environment specific IPC | ||||
|  | @ -118,7 +125,7 @@ Zombie safe: self-destruct a process tree | |||
|             f"running in pid {os.getpid()}" | ||||
|         ) | ||||
| 
 | ||||
|        await trio.sleep_forever() | ||||
|         await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
|     async def main(): | ||||
|  | @ -149,7 +156,7 @@ it **is a bug**. | |||
| 
 | ||||
| "Native" multi-process debugging | ||||
| -------------------------------- | ||||
| Using the magic of `pdb++`_ and our internal IPC, we've | ||||
| Using the magic of `pdbp`_ and our internal IPC, we've | ||||
| been able to create a native feeling debugging experience for | ||||
| any (sub-)process in your ``tractor`` tree. | ||||
| 
 | ||||
|  | @ -567,6 +574,13 @@ Help us push toward the future of distributed `Python`. | |||
| - Typed capability-based (dialog) protocols ( see `#196 | ||||
|   <https://github.com/goodboy/tractor/issues/196>`_ with draft work | ||||
|   started in `#311 <https://github.com/goodboy/tractor/pull/311>`_) | ||||
| - We **recently disabled CI-testing on windows** and need help getting | ||||
|   it running again! (see `#327 | ||||
|   <https://github.com/goodboy/tractor/pull/327>`_). **We do have windows | ||||
|   support** (and have for quite a while) but since no active hacker | ||||
|   exists in the user-base to help test on that OS, for now we're not | ||||
|   actively maintaining testing due to the added hassle and general | ||||
|   latency.. | ||||
| 
 | ||||
| 
 | ||||
| Feel like saying hi? | ||||
|  | @ -590,6 +604,7 @@ channel`_! | |||
| .. _adherance to: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=1821s | ||||
| .. _trio gitter channel: https://gitter.im/python-trio/general | ||||
| .. _matrix channel: https://matrix.to/#/!tractor:matrix.org | ||||
| .. _pdbp: https://github.com/mdmintz/pdbp | ||||
| .. _pdb++: https://github.com/pdbpp/pdbpp | ||||
| .. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops | ||||
| .. _messages: https://en.wikipedia.org/wiki/Message_passing | ||||
|  |  | |||
|  | @ -396,7 +396,7 @@ tasks spawned via multiple RPC calls to an actor can modify | |||
| 
 | ||||
| 
 | ||||
|         # a per process cache | ||||
|         _actor_cache: Dict[str, bool] = {} | ||||
|         _actor_cache: dict[str, bool] = {} | ||||
| 
 | ||||
| 
 | ||||
|         def ping_endpoints(endpoints: List[str]): | ||||
|  |  | |||
|  | @ -0,0 +1,151 @@ | |||
| ''' | ||||
| Complex edge case where during real-time streaming the IPC tranport | ||||
| channels are wiped out (purposely in this example though it could have | ||||
| been an outage) and we want to ensure that despite being in debug mode | ||||
| (or not) the user can sent SIGINT once they notice the hang and the | ||||
| actor tree will eventually be cancelled without leaving any zombies. | ||||
| 
 | ||||
| ''' | ||||
| import trio | ||||
| from tractor import ( | ||||
|     open_nursery, | ||||
|     context, | ||||
|     Context, | ||||
|     MsgStream, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| async def break_channel_silently_then_error( | ||||
|     stream: MsgStream, | ||||
| ): | ||||
|     async for msg in stream: | ||||
|         await stream.send(msg) | ||||
| 
 | ||||
|         # XXX: close the channel right after an error is raised | ||||
|         # purposely breaking the IPC transport to make sure the parent | ||||
|         # doesn't get stuck in debug or hang on the connection join. | ||||
|         # this more or less simulates an infinite msg-receive hang on | ||||
|         # the other end. | ||||
|         await stream._ctx.chan.send(None) | ||||
|         assert 0 | ||||
| 
 | ||||
| 
 | ||||
| async def close_stream_and_error( | ||||
|     stream: MsgStream, | ||||
| ): | ||||
|     async for msg in stream: | ||||
|         await stream.send(msg) | ||||
| 
 | ||||
|         # wipe out channel right before raising | ||||
|         await stream._ctx.chan.send(None) | ||||
|         await stream.aclose() | ||||
|         assert 0 | ||||
| 
 | ||||
| 
 | ||||
| @context | ||||
| async def recv_and_spawn_net_killers( | ||||
| 
 | ||||
|     ctx: Context, | ||||
|     break_ipc_after: bool | int = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Receive stream msgs and spawn some IPC killers mid-stream. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     async with ( | ||||
|         ctx.open_stream() as stream, | ||||
|         trio.open_nursery() as n, | ||||
|     ): | ||||
|         async for i in stream: | ||||
|             print(f'child echoing {i}') | ||||
|             await stream.send(i) | ||||
|             if ( | ||||
|                 break_ipc_after | ||||
|                 and i > break_ipc_after | ||||
|             ): | ||||
|                 '#################################\n' | ||||
|                 'Simulating child-side IPC BREAK!\n' | ||||
|                 '#################################' | ||||
|                 n.start_soon(break_channel_silently_then_error, stream) | ||||
|                 n.start_soon(close_stream_and_error, stream) | ||||
| 
 | ||||
| 
 | ||||
| async def main( | ||||
|     debug_mode: bool = False, | ||||
|     start_method: str = 'trio', | ||||
| 
 | ||||
|     # by default we break the parent IPC first (if configured to break | ||||
|     # at all), but this can be changed so the child does first (even if | ||||
|     # both are set to break). | ||||
|     break_parent_ipc_after: int | bool = False, | ||||
|     break_child_ipc_after: int | bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     async with ( | ||||
|         open_nursery( | ||||
|             start_method=start_method, | ||||
| 
 | ||||
|             # NOTE: even debugger is used we shouldn't get | ||||
|             # a hang since it never engages due to broken IPC | ||||
|             debug_mode=debug_mode, | ||||
|             loglevel='warning', | ||||
| 
 | ||||
|         ) as an, | ||||
|     ): | ||||
|         portal = await an.start_actor( | ||||
|             'chitty_hijo', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             recv_and_spawn_net_killers, | ||||
|             break_ipc_after=break_child_ipc_after, | ||||
| 
 | ||||
|         ) as (ctx, sent): | ||||
|             async with ctx.open_stream() as stream: | ||||
|                 for i in range(1000): | ||||
| 
 | ||||
|                     if ( | ||||
|                         break_parent_ipc_after | ||||
|                         and i > break_parent_ipc_after | ||||
|                     ): | ||||
|                         print( | ||||
|                             '#################################\n' | ||||
|                             'Simulating parent-side IPC BREAK!\n' | ||||
|                             '#################################' | ||||
|                         ) | ||||
|                         await stream._ctx.chan.send(None) | ||||
| 
 | ||||
|                     # it actually breaks right here in the | ||||
|                     # mp_spawn/forkserver backends and thus the zombie | ||||
|                     # reaper never even kicks in? | ||||
|                     print(f'parent sending {i}') | ||||
|                     await stream.send(i) | ||||
| 
 | ||||
|                     with trio.move_on_after(2) as cs: | ||||
| 
 | ||||
|                         # NOTE: in the parent side IPC failure case this | ||||
|                         # will raise an ``EndOfChannel`` after the child | ||||
|                         # is killed and sends a stop msg back to it's | ||||
|                         # caller/this-parent. | ||||
|                         rx = await stream.receive() | ||||
| 
 | ||||
|                         print(f"I'm a happy user and echoed to me is {rx}") | ||||
| 
 | ||||
|                     if cs.cancelled_caught: | ||||
|                         # pretend to be a user seeing no streaming action | ||||
|                         # thinking it's a hang, and then hitting ctl-c.. | ||||
|                         print("YOO i'm a user anddd thingz hangin..") | ||||
| 
 | ||||
|                 print( | ||||
|                     "YOO i'm mad send side dun but thingz hangin..\n" | ||||
|                     'MASHING CTlR-C Ctl-c..' | ||||
|                 ) | ||||
|                 raise KeyboardInterrupt | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -27,7 +27,18 @@ async def main(): | |||
| 
 | ||||
|         # retreive results | ||||
|         async with p0.open_stream_from(breakpoint_forever) as stream: | ||||
|             await p1.run(name_error) | ||||
| 
 | ||||
|             # triggers the first name error | ||||
|             try: | ||||
|                 await p1.run(name_error) | ||||
|             except tractor.RemoteActorError as rae: | ||||
|                 assert rae.type is NameError | ||||
| 
 | ||||
|             async for i in stream: | ||||
| 
 | ||||
|                 # a second time try the failing subactor and this tie | ||||
|                 # let error propagate up to the parent/nursery. | ||||
|                 await p1.run(name_error) | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|  |  | |||
|  | @ -12,18 +12,31 @@ async def breakpoint_forever(): | |||
|     while True: | ||||
|         await tractor.breakpoint() | ||||
| 
 | ||||
|         # NOTE: if the test never sent 'q'/'quit' commands | ||||
|         # on the pdb repl, without this checkpoint line the | ||||
|         # repl would spin in this actor forever. | ||||
|         # await trio.sleep(0) | ||||
| 
 | ||||
| 
 | ||||
| async def spawn_until(depth=0): | ||||
|     """"A nested nursery that triggers another ``NameError``. | ||||
|     """ | ||||
|     async with tractor.open_nursery() as n: | ||||
|         if depth < 1: | ||||
|             # await n.run_in_actor('breakpoint_forever', breakpoint_forever) | ||||
|             await n.run_in_actor( | ||||
| 
 | ||||
|             await n.run_in_actor(breakpoint_forever) | ||||
| 
 | ||||
|             p = await n.run_in_actor( | ||||
|                 name_error, | ||||
|                 name='name_error' | ||||
|             ) | ||||
|             await trio.sleep(0.5) | ||||
|             # rx and propagate error from child | ||||
|             await p.result() | ||||
| 
 | ||||
|         else: | ||||
|             # recusrive call to spawn another process branching layer of | ||||
|             # the tree | ||||
|             depth -= 1 | ||||
|             await n.run_in_actor( | ||||
|                 spawn_until, | ||||
|  | @ -53,6 +66,7 @@ async def main(): | |||
|     """ | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|         # loglevel='cancel', | ||||
|     ) as n: | ||||
| 
 | ||||
|         # spawn both actors | ||||
|  | @ -67,8 +81,16 @@ async def main(): | |||
|             name='spawner1', | ||||
|         ) | ||||
| 
 | ||||
|         # TODO: test this case as well where the parent don't see | ||||
|         # the sub-actor errors by default and instead expect a user | ||||
|         # ctrl-c to kill the root. | ||||
|         with trio.move_on_after(3): | ||||
|             await trio.sleep_forever() | ||||
| 
 | ||||
|         # gah still an issue here. | ||||
|         await portal.result() | ||||
| 
 | ||||
|         # should never get here | ||||
|         await portal1.result() | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -0,0 +1,40 @@ | |||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def just_sleep( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Start and sleep. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
| async def main() -> None: | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|     ) as n: | ||||
|         portal = await n.start_actor( | ||||
|             'ctx_child', | ||||
| 
 | ||||
|             # XXX: we don't enable the current module in order | ||||
|             # to trigger `ModuleNotFound`. | ||||
|             enable_modules=[], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             just_sleep,  # taken from pytest parameterization | ||||
|         ) as (ctx, sent): | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -0,0 +1,24 @@ | |||
| import os | ||||
| import sys | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def main() -> None: | ||||
|     async with tractor.open_nursery(debug_mode=True) as an: | ||||
| 
 | ||||
|         assert os.environ['PYTHONBREAKPOINT'] == 'tractor._debug._set_trace' | ||||
| 
 | ||||
|         # TODO: an assert that verifies the hook has indeed been, hooked | ||||
|         # XD | ||||
|         assert sys.breakpointhook is not tractor._debug._set_trace | ||||
| 
 | ||||
|         breakpoint() | ||||
| 
 | ||||
|     # TODO: an assert that verifies the hook is unhooked.. | ||||
|     assert sys.breakpointhook | ||||
|     breakpoint() | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -9,7 +9,7 @@ is ``tractor``'s channels. | |||
| 
 | ||||
| """ | ||||
| from contextlib import asynccontextmanager | ||||
| from typing import List, Callable | ||||
| from typing import Callable | ||||
| import itertools | ||||
| import math | ||||
| import time | ||||
|  | @ -71,8 +71,8 @@ async def worker_pool(workers=4): | |||
| 
 | ||||
|         async def _map( | ||||
|             worker_func: Callable[[int], bool], | ||||
|             sequence: List[int] | ||||
|         ) -> List[bool]: | ||||
|             sequence: list[int] | ||||
|         ) -> list[bool]: | ||||
| 
 | ||||
|             # define an async (local) task to collect results from workers | ||||
|             async def send_result(func, value, portal): | ||||
|  |  | |||
|  | @ -1,8 +0,0 @@ | |||
| Adjust the `tractor._spawn.soft_wait()` strategy to avoid sending an | ||||
| actor cancel request (via `Portal.cancel_actor()`) if either the child | ||||
| process is detected as having terminated or the IPC channel is detected | ||||
| to be closed. | ||||
| 
 | ||||
| This ensures (even) more deterministic inter-actor cancellation by | ||||
| avoiding the timeout condition where possible when a whild never | ||||
| sucessfully spawned, crashed, or became un-contactable over IPC. | ||||
|  | @ -1,3 +0,0 @@ | |||
| Add an experimental ``tractor.msg.NamespacePath`` type for passing Python | ||||
| objects by "reference" through a ``str``-subtype message and using the | ||||
| new ``pkgutil.resolve_name()`` for reference loading. | ||||
|  | @ -1,2 +0,0 @@ | |||
| Add a new `tractor.experimental` subpackage for staging new high level | ||||
| APIs and subystems that we might eventually make built-ins. | ||||
|  | @ -1,3 +0,0 @@ | |||
| Update to and pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) | ||||
| both of which required adjustments for backwards imcompatible API | ||||
| tweaks. | ||||
|  | @ -1,4 +0,0 @@ | |||
| Fence off ``multiprocessing`` imports until absolutely necessary in an | ||||
| effort to avoid "resource tracker" spawning side effects that seem to | ||||
| have varying degrees of unreliability per Python release. Port to new | ||||
| ``msgspec.DecodeError``. | ||||
|  | @ -1,12 +0,0 @@ | |||
| Add a new ``to_asyncio.LinkedTaskChannel.subscribe()`` which gives | ||||
| task-oriented broadcast functionality semantically equivalent to | ||||
| ``tractor.MsgStream.subscribe()`` this makes it possible for multiple | ||||
| ``trio``-side tasks to consume ``asyncio``-side task msgs in tandem. | ||||
| 
 | ||||
| Further Improvements to the test suite were added in this patch set | ||||
| including a new scenario test for a sub-actor managed "service nursery" | ||||
| (implementing the basics of a "service manager") including use of | ||||
| *infected asyncio* mode. Further we added a lower level | ||||
| ``test_trioisms.py`` to start to track issues we need to work around in | ||||
| ``trio`` itself which in this case included a bug we were trying to | ||||
| solve related to https://github.com/python-trio/trio/issues/2258. | ||||
|  | @ -1,5 +0,0 @@ | |||
| Run windows CI jobs on python 3.10 after some | ||||
| hacks for ``pdbpp`` dependency issues. | ||||
| 
 | ||||
| Issue was to do with the now deprecated `pyreadline` project which | ||||
| should be changed over to `pyreadline3`. | ||||
|  | @ -1,8 +0,0 @@ | |||
| Drop use of the ``msgpack`` package and instead move fully to the | ||||
| ``msgspec`` codec library. | ||||
| 
 | ||||
| We've now used ``msgspec`` extensively in production and there's no | ||||
| reason to not use it as default. Further this change preps us for the up | ||||
| and coming typed messaging semantics (#196), dialog-unprotocol system | ||||
| (#297), and caps-based messaging-protocols (#299) planned before our | ||||
| first beta. | ||||
|  | @ -1,13 +0,0 @@ | |||
| Fix a previously undetected ``trio``-``asyncio`` task lifetime linking | ||||
| issue with the ``to_asyncio.open_channel_from()`` api where both sides | ||||
| where not properly waiting/signalling termination and it was possible | ||||
| for ``asyncio``-side errors to not propagate due to a race condition. | ||||
| 
 | ||||
| The implementation fix summary is: | ||||
| - add state to signal the end of the ``trio`` side task to be | ||||
|   read by the ``asyncio`` side and always cancel any ongoing | ||||
|   task in such cases. | ||||
| - always wait on the ``asyncio`` task termination from the ``trio`` | ||||
|   side on error before maybe raising said error. | ||||
| - always close the ``trio`` mem chan on exit to ensure the other | ||||
|   side can detect it and follow. | ||||
|  | @ -0,0 +1,16 @@ | |||
| Strictly support Python 3.10+, start runtime machinery reorg | ||||
| 
 | ||||
| Since we want to push forward using the new `match:` syntax for our | ||||
| internal RPC-msg loops, we officially drop 3.9 support for the next | ||||
| release which should coincide well with the first release of 3.11. | ||||
| 
 | ||||
| This patch set also officially removes the ``tractor.run()`` API (which | ||||
| has been deprecated for some time) as well as starts an initial re-org | ||||
| of the internal runtime core by: | ||||
| - renaming ``tractor._actor`` -> ``._runtime`` | ||||
| - moving the ``._runtime.ActorActor._process_messages()`` and | ||||
|   ``._async_main()`` to be module level singleton-task-functions since | ||||
|   they are only started once for each connection and actor spawn | ||||
|   respectively; this internal API thus looks more similar to (at the | ||||
|   time of writing) the ``trio``-internals in ``trio._core._run``. | ||||
| - officially remove ``tractor.run()``, now deprecated for some time. | ||||
|  | @ -0,0 +1,4 @@ | |||
| Only set `._debug.Lock.local_pdb_complete` if has been created. | ||||
| 
 | ||||
| This can be triggered by a very rare race condition (and thus we have no | ||||
| working test yet) but it is known to exist in (a) consumer project(s). | ||||
|  | @ -0,0 +1,25 @@ | |||
| Add support for ``trio >= 0.22`` and support for the new Python 3.11 | ||||
| ``[Base]ExceptionGroup`` from `pep 654`_ via the backported | ||||
| `exceptiongroup`_ package and some final fixes to the debug mode | ||||
| subsystem. | ||||
| 
 | ||||
| This port ended up driving some (hopefully) final fixes to our debugger | ||||
| subsystem including the solution to all lingering stdstreams locking | ||||
| race-conditions and deadlock scenarios. This includes extending the | ||||
| debugger tests suite as well as cancellation and ``asyncio`` mode cases. | ||||
| Some of the notable details: | ||||
| 
 | ||||
| - always reverting to the ``trio`` SIGINT handler when leaving debug | ||||
|   mode. | ||||
| - bypassing child attempts to acquire the debug lock when detected | ||||
|   to be amdist actor-runtime-cancellation. | ||||
| - allowing the root actor to cancel local but IPC-stale subactor | ||||
|   requests-tasks for the debug lock when in a "no IPC peers" state. | ||||
| 
 | ||||
| Further we refined our ``ActorNursery`` semantics to be more similar to | ||||
| ``trio`` in the sense that parent task errors are always packed into the | ||||
| actor-nursery emitted exception group and adjusted all tests and | ||||
| examples accordingly. | ||||
| 
 | ||||
| .. _pep 654: https://peps.python.org/pep-0654/#handling-exception-groups | ||||
| .. _exceptiongroup: https://github.com/python-trio/exceptiongroup | ||||
|  | @ -0,0 +1,5 @@ | |||
| Establish an explicit "backend spawning" method table; use it from CI | ||||
| 
 | ||||
| More clearly lays out the current set of (3) backends: ``['trio', | ||||
| 'mp_spawn', 'mp_forkserver']`` and adjusts the ``._spawn.py`` internals | ||||
| as well as the test suite to accommodate. | ||||
|  | @ -0,0 +1,4 @@ | |||
| Add ``key: Callable[..., Hashable]`` support to ``.trionics.maybe_open_context()`` | ||||
| 
 | ||||
| Gives users finer grained control over cache hit behaviour using | ||||
| a callable which receives the input ``kwargs: dict``. | ||||
|  | @ -0,0 +1,41 @@ | |||
| Add support for debug-lock blocking using a ``._debug.Lock._blocked: | ||||
| set[tuple]`` and add ids when no-more IPC connections with the | ||||
| root actor are detected. | ||||
| 
 | ||||
| This is an enhancement which (mostly) solves a lingering debugger | ||||
| locking race case we needed to handle: | ||||
| 
 | ||||
| - child crashes acquires TTY lock in root and attaches to ``pdb`` | ||||
| - child IPC goes down such that all channels to the root are broken | ||||
|   / non-functional. | ||||
| - root is stuck thinking the child is still in debug even though it | ||||
|   can't be contacted and the child actor machinery hasn't been | ||||
|   cancelled by its parent. | ||||
| - root get's stuck in deadlock with child since it won't send a cancel | ||||
|   request until the child is finished debugging (to avoid clobbering | ||||
|   a child that is actually using the debugger), but the child can't | ||||
|   unlock the debugger bc IPC is down and it can't contact the root. | ||||
| 
 | ||||
| To avoid this scenario add debug lock blocking list via | ||||
| `._debug.Lock._blocked: set[tuple]` which holds actor uids for any actor | ||||
| that is detected by the root as having no transport channel connections | ||||
| (of which at least one should exist if this sub-actor at some point | ||||
| acquired the debug lock). The root consequently checks this list for any | ||||
| actor that tries to (re)acquire the lock and blocks with | ||||
| a ``ContextCancelled``. Further, when a debug condition is tested in | ||||
| ``._runtime._invoke``, the context's ``._enter_debugger_on_cancel`` is | ||||
| set to `False` if the actor was put on the block list then all | ||||
| post-mortem / crash handling will be bypassed for that task. | ||||
| 
 | ||||
| In theory this approach to block list management may cause problems | ||||
| where some nested child actor acquires and releases the lock multiple | ||||
| times and it gets stuck on the block list after the first use? If this | ||||
| turns out to be an issue we can try changing the strat so blocks are | ||||
| only added when the root has zero IPC peers left? | ||||
| 
 | ||||
| Further, this adds a root-locking-task side cancel scope, | ||||
| ``Lock._root_local_task_cs_in_debug``, which can be ``.cancel()``-ed by the root | ||||
| runtime when a stale lock is detected during the IPC channel testing. | ||||
| However, right now we're NOT using this since it seems to cause test | ||||
| failures likely due to causing pre-mature cancellation and maybe needs | ||||
| a bit more experimenting? | ||||
|  | @ -0,0 +1,19 @@ | |||
| Rework our ``.trionics.BroadcastReceiver`` internals to avoid method | ||||
| recursion and approach a design and interface closer to ``trio``'s | ||||
| ``MemoryReceiveChannel``. | ||||
| 
 | ||||
| The details of the internal changes include: | ||||
| 
 | ||||
| - implementing a ``BroadcastReceiver.receive_nowait()`` and using it | ||||
|   within the async ``.receive()`` thus avoiding recursion from | ||||
|   ``.receive()``. | ||||
| - failing over to an internal ``._receive_from_underlying()`` when the | ||||
|   ``_nowait()`` call raises ``trio.WouldBlock`` | ||||
| - adding ``BroadcastState.statistics()`` for debugging and testing both | ||||
|   internals and by users. | ||||
| - add an internal ``BroadcastReceiver._raise_on_lag: bool`` which can be | ||||
|   set to avoid ``Lagged`` raising for possible use cases where a user | ||||
|   wants to choose between a [cheap or nasty | ||||
|   pattern](https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern) | ||||
|   the the particular stream (we use this in ``piker``'s dark clearing | ||||
|   engine to avoid fast feeds breaking during HFT periods). | ||||
|  | @ -0,0 +1,11 @@ | |||
| Always ``list``-cast the ``mngrs`` input to | ||||
| ``.trionics.gather_contexts()`` and ensure its size otherwise raise | ||||
| a ``ValueError``. | ||||
| 
 | ||||
| Turns out that trying to pass an inline-style generator comprehension | ||||
| doesn't seem to work inside the ``async with`` expression? Further, in | ||||
| such a case we can get a hang waiting on the all-entered event | ||||
| completion when the internal mngrs iteration is a noop. Instead we | ||||
| always greedily check a size and error on empty input; the lazy | ||||
| iteration of a generator input is not beneficial anyway since we're | ||||
| entering all manager instances in concurrent tasks. | ||||
|  | @ -0,0 +1,15 @@ | |||
| Fixes to ensure IPC (channel) breakage doesn't result in hung actor | ||||
| trees; the zombie reaping and general supervision machinery will always | ||||
| clean up and terminate. | ||||
| 
 | ||||
| This includes not only the (mostly minor) fixes to solve these cases but | ||||
| also a new extensive test suite in `test_advanced_faults.py` with an | ||||
| accompanying highly configurable example module-script in | ||||
| `examples/advanced_faults/ipc_failure_during_stream.py`. Tests ensure we | ||||
| never get hang or zombies despite operating in debug mode and attempt to | ||||
| simulate all possible IPC transport failure cases for a local-host actor | ||||
| tree. | ||||
| 
 | ||||
| Further we simplify `Context.open_stream.__aexit__()` to just call | ||||
| `MsgStream.aclose()` directly more or less avoiding a pure duplicate | ||||
| code path. | ||||
|  | @ -0,0 +1,10 @@ | |||
| Always redraw the `pdbpp` prompt on `SIGINT` during REPL use. | ||||
| 
 | ||||
| There was recent changes todo with Python 3.10 that required us to pin | ||||
| to a specific commit in `pdbpp` which have recently been fixed minus | ||||
| this last issue with `SIGINT` shielding: not clobbering or not | ||||
| showing the `(Pdb++)` prompt on ctlr-c by the user. This repairs all | ||||
| that by firstly removing the standard KBI intercepting of the std lib's | ||||
| `pdb.Pdb._cmdloop()` as well as ensuring that only the actor with REPL | ||||
| control ever reports `SIGINT` handler log msgs and prompt redraws. With | ||||
| this we move back to using pypi `pdbpp` release. | ||||
|  | @ -0,0 +1,7 @@ | |||
| Drop `trio.Process.aclose()` usage, copy into our spawning code. | ||||
| 
 | ||||
| The details are laid out in https://github.com/goodboy/tractor/issues/330. | ||||
| `trio` changed is process running quite some time ago, this just copies | ||||
| out the small bit we needed (from the old `.aclose()`) for hard kills | ||||
| where a soft runtime cancel request fails and our "zombie killer" | ||||
| implementation kicks in. | ||||
|  | @ -0,0 +1,15 @@ | |||
| Switch to using the fork & fix of `pdb++`, `pdbp`: | ||||
| https://github.com/mdmintz/pdbp | ||||
| 
 | ||||
| Allows us to sidestep a variety of issues that aren't being maintained | ||||
| in the upstream project thanks to the hard work of @mdmintz! | ||||
| 
 | ||||
| We also include some default settings adjustments as per recent | ||||
| development on the fork: | ||||
| 
 | ||||
| - sticky mode is still turned on by default but now activates when | ||||
|   a using the `ll` repl command. | ||||
| - turn off line truncation by default to avoid inter-line gaps when | ||||
|   resizing the terimnal during use. | ||||
| - when using the backtrace cmd either by `w` or `bt`, the config | ||||
|   automatically switches to non-sticky mode. | ||||
|  | @ -0,0 +1,28 @@ | |||
| [tool.towncrier] | ||||
| package = "tractor" | ||||
| filename = "NEWS.rst" | ||||
| directory = "nooz/" | ||||
| version = "0.1.0a6" | ||||
| title_format = "tractor {version} ({project_date})" | ||||
| template = "nooz/_template.rst" | ||||
| all_bullets = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "feature" | ||||
|   name = "Features" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "bugfix" | ||||
|   name = "Bug Fixes" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "doc" | ||||
|   name = "Improved Documentation" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "trivial" | ||||
|   name = "Trivial/Internal Changes" | ||||
|   showcontent = true | ||||
|  | @ -1,7 +1,8 @@ | |||
| pytest | ||||
| pytest-trio | ||||
| pdbpp | ||||
| mypy<0.920 | ||||
| trio_typing<0.7.0 | ||||
| pytest-timeout | ||||
| pdbp | ||||
| mypy | ||||
| trio_typing | ||||
| pexpect | ||||
| towncrier | ||||
|  |  | |||
							
								
								
									
										37
									
								
								setup.py
								
								
								
								
							
							
						
						
									
										37
									
								
								setup.py
								
								
								
								
							|  | @ -25,51 +25,55 @@ with open('docs/README.rst', encoding='utf-8') as f: | |||
| 
 | ||||
| setup( | ||||
|     name="tractor", | ||||
|     version='0.1.0a5.dev',  # alpha zone | ||||
|     description='structured concurrrent "actors"', | ||||
|     version='0.1.0a6dev0',  # alpha zone | ||||
|     description='structured concurrrent `trio`-"actors"', | ||||
|     long_description=readme, | ||||
|     license='AGPLv3', | ||||
|     author='Tyler Goodlet', | ||||
|     maintainer='Tyler Goodlet', | ||||
|     maintainer_email='jgbt@protonmail.com', | ||||
|     maintainer_email='goodboy_foss@protonmail.com', | ||||
|     url='https://github.com/goodboy/tractor', | ||||
|     platforms=['linux', 'windows'], | ||||
|     packages=[ | ||||
|         'tractor', | ||||
|         'tractor.experimental', | ||||
|         'tractor.trionics', | ||||
|         'tractor.testing', | ||||
|     ], | ||||
|     install_requires=[ | ||||
| 
 | ||||
|         # trio related | ||||
|         'trio>0.8', | ||||
|         # proper range spec: | ||||
|         # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5 | ||||
|         'trio >= 0.22', | ||||
|         'async_generator', | ||||
|         'trio_typing', | ||||
|         'exceptiongroup', | ||||
| 
 | ||||
|         # tooling | ||||
|         'tricycle', | ||||
|         'trio_typing', | ||||
| 
 | ||||
|         # tooling | ||||
|         'colorlog', | ||||
|         'wrapt', | ||||
| 
 | ||||
|         # IPC serialization | ||||
|         'msgspec', | ||||
| 
 | ||||
|         # debug mode REPL | ||||
|         'pdbp', | ||||
| 
 | ||||
|         # pip ref docs on these specs: | ||||
|         # https://pip.pypa.io/en/stable/reference/requirement-specifiers/#examples | ||||
|         # and pep: | ||||
|         # https://peps.python.org/pep-0440/#version-specifiers | ||||
| 
 | ||||
|         # windows deps workaround for ``pdbpp`` | ||||
|         # https://github.com/pdbpp/pdbpp/issues/498 | ||||
|         # https://github.com/pdbpp/fancycompleter/issues/37 | ||||
|         'pyreadline3 ; platform_system == "Windows"', | ||||
| 
 | ||||
|         'pdbpp', | ||||
|         # 3.10 has an outstanding unreleased issue and `pdbpp` itself | ||||
|         # pins to patched forks of its own dependencies as well. | ||||
|         "pdbpp @ git+https://github.com/pdbpp/pdbpp@master#egg=pdbpp",  # noqa: E501 | ||||
| 
 | ||||
|         # serialization | ||||
|         'msgspec >= "0.4.0"' | ||||
| 
 | ||||
|     ], | ||||
|     tests_require=['pytest'], | ||||
|     python_requires=">=3.9", | ||||
|     python_requires=">=3.10", | ||||
|     keywords=[ | ||||
|         'trio', | ||||
|         'async', | ||||
|  | @ -88,7 +92,6 @@ setup( | |||
|         "Programming Language :: Python :: Implementation :: CPython", | ||||
|         "Programming Language :: Python :: 3 :: Only", | ||||
|         "Programming Language :: Python :: 3.10", | ||||
|         "Programming Language :: Python :: 3.9", | ||||
|         "Intended Audience :: Science/Research", | ||||
|         "Intended Audience :: Developers", | ||||
|         "Topic :: System :: Distributed Computing", | ||||
|  |  | |||
|  | @ -7,16 +7,91 @@ import os | |||
| import random | ||||
| import signal | ||||
| import platform | ||||
| import pathlib | ||||
| import time | ||||
| import inspect | ||||
| from functools import partial, wraps | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| # export for tests | ||||
| from tractor.testing import tractor_test  # noqa | ||||
| 
 | ||||
| 
 | ||||
| pytest_plugins = ['pytester'] | ||||
| 
 | ||||
| 
 | ||||
| def tractor_test(fn): | ||||
|     """ | ||||
|     Use: | ||||
| 
 | ||||
|     @tractor_test | ||||
|     async def test_whatever(): | ||||
|         await ... | ||||
| 
 | ||||
|     If fixtures: | ||||
| 
 | ||||
|         - ``arb_addr`` (a socket addr tuple where arbiter is listening) | ||||
|         - ``loglevel`` (logging level passed to tractor internals) | ||||
|         - ``start_method`` (subprocess spawning backend) | ||||
| 
 | ||||
|     are defined in the `pytest` fixture space they will be automatically | ||||
|     injected to tests declaring these funcargs. | ||||
|     """ | ||||
|     @wraps(fn) | ||||
|     def wrapper( | ||||
|         *args, | ||||
|         loglevel=None, | ||||
|         arb_addr=None, | ||||
|         start_method=None, | ||||
|         **kwargs | ||||
|     ): | ||||
|         # __tracebackhide__ = True | ||||
| 
 | ||||
|         if 'arb_addr' in inspect.signature(fn).parameters: | ||||
|             # injects test suite fixture value to test as well | ||||
|             # as `run()` | ||||
|             kwargs['arb_addr'] = arb_addr | ||||
| 
 | ||||
|         if 'loglevel' in inspect.signature(fn).parameters: | ||||
|             # allows test suites to define a 'loglevel' fixture | ||||
|             # that activates the internal logging | ||||
|             kwargs['loglevel'] = loglevel | ||||
| 
 | ||||
|         if start_method is None: | ||||
|             if platform.system() == "Windows": | ||||
|                 start_method = 'trio' | ||||
| 
 | ||||
|         if 'start_method' in inspect.signature(fn).parameters: | ||||
|             # set of subprocess spawning backends | ||||
|             kwargs['start_method'] = start_method | ||||
| 
 | ||||
|         if kwargs: | ||||
| 
 | ||||
|             # use explicit root actor start | ||||
| 
 | ||||
|             async def _main(): | ||||
|                 async with tractor.open_root_actor( | ||||
|                     # **kwargs, | ||||
|                     arbiter_addr=arb_addr, | ||||
|                     loglevel=loglevel, | ||||
|                     start_method=start_method, | ||||
| 
 | ||||
|                     # TODO: only enable when pytest is passed --pdb | ||||
|                     # debug_mode=True, | ||||
| 
 | ||||
|                 ): | ||||
|                     await fn(*args, **kwargs) | ||||
| 
 | ||||
|             main = _main | ||||
| 
 | ||||
|         else: | ||||
|             # use implicit root actor start | ||||
|             main = partial(fn, *args, **kwargs) | ||||
| 
 | ||||
|         return trio.run(main) | ||||
| 
 | ||||
|     return wrapper | ||||
| 
 | ||||
| 
 | ||||
| _arb_addr = '127.0.0.1', random.randint(1000, 9999) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -39,14 +114,21 @@ no_windows = pytest.mark.skipif( | |||
| ) | ||||
| 
 | ||||
| 
 | ||||
| def repodir(): | ||||
|     """Return the abspath to the repo directory. | ||||
|     """ | ||||
|     dirname = os.path.dirname | ||||
|     dirpath = os.path.abspath( | ||||
|         dirname(dirname(os.path.realpath(__file__))) | ||||
|         ) | ||||
|     return dirpath | ||||
| def repodir() -> pathlib.Path: | ||||
|     ''' | ||||
|     Return the abspath to the repo directory. | ||||
| 
 | ||||
|     ''' | ||||
|     # 2 parents up to step up through tests/<repo_dir> | ||||
|     return pathlib.Path(__file__).parent.parent.absolute() | ||||
| 
 | ||||
| 
 | ||||
| def examples_dir() -> pathlib.Path: | ||||
|     ''' | ||||
|     Return the abspath to the examples directory as `pathlib.Path`. | ||||
| 
 | ||||
|     ''' | ||||
|     return repodir() / 'examples' | ||||
| 
 | ||||
| 
 | ||||
| def pytest_addoption(parser): | ||||
|  | @ -64,11 +146,7 @@ def pytest_addoption(parser): | |||
| 
 | ||||
| def pytest_configure(config): | ||||
|     backend = config.option.spawn_backend | ||||
| 
 | ||||
|     if backend == 'mp': | ||||
|         tractor._spawn.try_set_start_method('spawn') | ||||
|     elif backend == 'trio': | ||||
|         tractor._spawn.try_set_start_method(backend) | ||||
|     tractor._spawn.try_set_start_method(backend) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session', autouse=True) | ||||
|  | @ -81,15 +159,18 @@ def loglevel(request): | |||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session') | ||||
| def spawn_backend(request): | ||||
| def spawn_backend(request) -> str: | ||||
|     return request.config.option.spawn_backend | ||||
| 
 | ||||
| 
 | ||||
| _ci_env: bool = os.environ.get('CI', False) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session') | ||||
| def ci_env() -> bool: | ||||
|     """Detect CI envoirment. | ||||
|     """ | ||||
|     return os.environ.get('TRAVIS', False) or os.environ.get('CI', False) | ||||
|     return _ci_env | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session') | ||||
|  | @ -99,24 +180,24 @@ def arb_addr(): | |||
| 
 | ||||
| def pytest_generate_tests(metafunc): | ||||
|     spawn_backend = metafunc.config.option.spawn_backend | ||||
| 
 | ||||
|     if not spawn_backend: | ||||
|         # XXX some weird windows bug with `pytest`? | ||||
|         spawn_backend = 'mp' | ||||
|     assert spawn_backend in ('mp', 'trio') | ||||
|         spawn_backend = 'trio' | ||||
| 
 | ||||
|     # TODO: maybe just use the literal `._spawn.SpawnMethodKey`? | ||||
|     assert spawn_backend in ( | ||||
|         'mp_spawn', | ||||
|         'mp_forkserver', | ||||
|         'trio', | ||||
|     ) | ||||
| 
 | ||||
|     # NOTE: used to be used to dyanmically parametrize tests for when | ||||
|     # you just passed --spawn-backend=`mp` on the cli, but now we expect | ||||
|     # that cli input to be manually specified, BUT, maybe we'll do | ||||
|     # something like this again in the future? | ||||
|     if 'start_method' in metafunc.fixturenames: | ||||
|         if spawn_backend == 'mp': | ||||
|             from multiprocessing import get_all_start_methods | ||||
|             methods = get_all_start_methods() | ||||
|             if 'fork' in methods: | ||||
|                 # fork not available on windows, so check before | ||||
|                 # removing XXX: the fork method is in general | ||||
|                 # incompatible with trio's global scheduler state | ||||
|                 methods.remove('fork') | ||||
|         elif spawn_backend == 'trio': | ||||
|             methods = ['trio'] | ||||
| 
 | ||||
|         metafunc.parametrize("start_method", methods, scope='module') | ||||
|         metafunc.parametrize("start_method", [spawn_backend], scope='module') | ||||
| 
 | ||||
| 
 | ||||
| def sig_prog(proc, sig): | ||||
|  | @ -132,16 +213,22 @@ def sig_prog(proc, sig): | |||
| 
 | ||||
| 
 | ||||
| @pytest.fixture | ||||
| def daemon(loglevel, testdir, arb_addr): | ||||
|     """Run a daemon actor as a "remote arbiter". | ||||
|     """ | ||||
| def daemon( | ||||
|     loglevel: str, | ||||
|     testdir, | ||||
|     arb_addr: tuple[str, int], | ||||
| ): | ||||
|     ''' | ||||
|     Run a daemon actor as a "remote arbiter". | ||||
| 
 | ||||
|     ''' | ||||
|     if loglevel in ('trace', 'debug'): | ||||
|         # too much logging will lock up the subproc (smh) | ||||
|         loglevel = 'info' | ||||
| 
 | ||||
|     cmdargs = [ | ||||
|         sys.executable, '-c', | ||||
|         "import tractor; tractor.run_daemon([], arbiter_addr={}, loglevel={})" | ||||
|         "import tractor; tractor.run_daemon([], registry_addr={}, loglevel={})" | ||||
|         .format( | ||||
|             arb_addr, | ||||
|             "'{}'".format(loglevel) if loglevel else None) | ||||
|  |  | |||
|  | @ -0,0 +1,193 @@ | |||
| ''' | ||||
| Sketchy network blackoutz, ugly byzantine gens, puedes eschuchar la | ||||
| cancelacion?.. | ||||
| 
 | ||||
| ''' | ||||
| from functools import partial | ||||
| 
 | ||||
| import pytest | ||||
| from _pytest.pathlib import import_path | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| from conftest import ( | ||||
|     examples_dir, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'debug_mode', | ||||
|     [False, True], | ||||
|     ids=['no_debug_mode', 'debug_mode'], | ||||
| ) | ||||
| @pytest.mark.parametrize( | ||||
|     'ipc_break', | ||||
|     [ | ||||
|         # no breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': False, | ||||
|             'break_child_ipc_after': False, | ||||
|         }, | ||||
| 
 | ||||
|         # only parent breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': 500, | ||||
|             'break_child_ipc_after': False, | ||||
|         }, | ||||
| 
 | ||||
|         # only child breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': False, | ||||
|             'break_child_ipc_after': 500, | ||||
|         }, | ||||
| 
 | ||||
|         # both: break parent first | ||||
|         { | ||||
|             'break_parent_ipc_after': 500, | ||||
|             'break_child_ipc_after': 800, | ||||
|         }, | ||||
|         # both: break child first | ||||
|         { | ||||
|             'break_parent_ipc_after': 800, | ||||
|             'break_child_ipc_after': 500, | ||||
|         }, | ||||
| 
 | ||||
|     ], | ||||
|     ids=[ | ||||
|         'no_break', | ||||
|         'break_parent', | ||||
|         'break_child', | ||||
|         'break_both_parent_first', | ||||
|         'break_both_child_first', | ||||
|     ], | ||||
| ) | ||||
| def test_ipc_channel_break_during_stream( | ||||
|     debug_mode: bool, | ||||
|     spawn_backend: str, | ||||
|     ipc_break: dict | None, | ||||
| ): | ||||
|     ''' | ||||
|     Ensure we can have an IPC channel break its connection during | ||||
|     streaming and it's still possible for the (simulated) user to kill | ||||
|     the actor tree using SIGINT. | ||||
| 
 | ||||
|     We also verify the type of connection error expected in the parent | ||||
|     depending on which side if the IPC breaks first. | ||||
| 
 | ||||
|     ''' | ||||
|     if spawn_backend != 'trio': | ||||
|         if debug_mode: | ||||
|             pytest.skip('`debug_mode` only supported on `trio` spawner') | ||||
| 
 | ||||
|         # non-`trio` spawners should never hit the hang condition that | ||||
|         # requires the user to do ctl-c to cancel the actor tree. | ||||
|         expect_final_exc = trio.ClosedResourceError | ||||
| 
 | ||||
|     mod = import_path( | ||||
|         examples_dir() / 'advanced_faults' / 'ipc_failure_during_stream.py', | ||||
|         root=examples_dir(), | ||||
|     ) | ||||
| 
 | ||||
|     expect_final_exc = KeyboardInterrupt | ||||
| 
 | ||||
|     # when ONLY the child breaks we expect the parent to get a closed | ||||
|     # resource error on the next `MsgStream.receive()` and then fail out | ||||
|     # and cancel the child from there. | ||||
|     if ( | ||||
| 
 | ||||
|         # only child breaks | ||||
|         ( | ||||
|             ipc_break['break_child_ipc_after'] | ||||
|             and ipc_break['break_parent_ipc_after'] is False | ||||
|         ) | ||||
| 
 | ||||
|         # both break but, parent breaks first | ||||
|         or ( | ||||
|             ipc_break['break_child_ipc_after'] is not False | ||||
|             and ( | ||||
|                 ipc_break['break_parent_ipc_after'] | ||||
|                 > ipc_break['break_child_ipc_after'] | ||||
|             ) | ||||
|         ) | ||||
| 
 | ||||
|     ): | ||||
|         expect_final_exc = trio.ClosedResourceError | ||||
| 
 | ||||
|     # when the parent IPC side dies (even if the child's does as well | ||||
|     # but the child fails BEFORE the parent) we expect the channel to be | ||||
|     # sent a stop msg from the child at some point which will signal the | ||||
|     # parent that the stream has been terminated. | ||||
|     # NOTE: when the parent breaks "after" the child you get this same | ||||
|     # case as well, the child breaks the IPC channel with a stop msg | ||||
|     # before any closure takes place. | ||||
|     elif ( | ||||
|         # only parent breaks | ||||
|         ( | ||||
|             ipc_break['break_parent_ipc_after'] | ||||
|             and ipc_break['break_child_ipc_after'] is False | ||||
|         ) | ||||
| 
 | ||||
|         # both break but, child breaks first | ||||
|         or ( | ||||
|             ipc_break['break_parent_ipc_after'] is not False | ||||
|             and ( | ||||
|                 ipc_break['break_child_ipc_after'] | ||||
|                 > ipc_break['break_parent_ipc_after'] | ||||
|             ) | ||||
|         ) | ||||
|     ): | ||||
|         expect_final_exc = trio.EndOfChannel | ||||
| 
 | ||||
|     with pytest.raises(expect_final_exc): | ||||
|         trio.run( | ||||
|             partial( | ||||
|                 mod.main, | ||||
|                 debug_mode=debug_mode, | ||||
|                 start_method=spawn_backend, | ||||
|                 **ipc_break, | ||||
|             ) | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def break_ipc_after_started( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
|     await ctx.started() | ||||
|     async with ctx.open_stream() as stream: | ||||
|         await stream.aclose() | ||||
|         await trio.sleep(0.2) | ||||
|         await ctx.chan.send(None) | ||||
|         print('child broke IPC and terminating') | ||||
| 
 | ||||
| 
 | ||||
| def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages(): | ||||
|     ''' | ||||
|     Verify that is a subactor's IPC goes down just after bringing up a stream | ||||
|     the parent can trigger a SIGINT and the child will be reaped out-of-IPC by | ||||
|     the localhost process supervision machinery: aka "zombie lord". | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.start_actor( | ||||
|                 'ipc_breaker', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             with trio.move_on_after(1): | ||||
|                 async with ( | ||||
|                     portal.open_context( | ||||
|                         break_ipc_after_started | ||||
|                     ) as (ctx, sent), | ||||
|                 ): | ||||
|                     async with ctx.open_stream(): | ||||
|                         await trio.sleep(0.5) | ||||
| 
 | ||||
|                     print('parent waiting on context') | ||||
| 
 | ||||
|             print('parent exited context') | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|  | @ -5,7 +5,6 @@ Advanced streaming patterns using bidirectional streams and contexts. | |||
| from collections import Counter | ||||
| import itertools | ||||
| import platform | ||||
| from typing import Set, Dict, List | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
|  | @ -15,7 +14,7 @@ def is_win(): | |||
|     return platform.system() == 'Windows' | ||||
| 
 | ||||
| 
 | ||||
| _registry: Dict[str, Set[tractor.ReceiveMsgStream]] = { | ||||
| _registry: dict[str, set[tractor.MsgStream]] = { | ||||
|     'even': set(), | ||||
|     'odd': set(), | ||||
| } | ||||
|  | @ -77,7 +76,7 @@ async def subscribe( | |||
| 
 | ||||
| async def consumer( | ||||
| 
 | ||||
|     subs: List[str], | ||||
|     subs: list[str], | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|  |  | |||
|  | @ -8,6 +8,10 @@ import platform | |||
| import time | ||||
| from itertools import repeat | ||||
| 
 | ||||
| from exceptiongroup import ( | ||||
|     BaseExceptionGroup, | ||||
|     ExceptionGroup, | ||||
| ) | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
|  | @ -56,29 +60,49 @@ def test_remote_error(arb_addr, args_err): | |||
|             arbiter_addr=arb_addr, | ||||
|         ) as nursery: | ||||
| 
 | ||||
|             # on a remote type error caused by bad input args | ||||
|             # this should raise directly which means we **don't** get | ||||
|             # an exception group outside the nursery since the error | ||||
|             # here and the far end task error are one in the same? | ||||
|             portal = await nursery.run_in_actor( | ||||
|                 assert_err, name='errorer', **args | ||||
|             ) | ||||
| 
 | ||||
|             # get result(s) from main task | ||||
|             try: | ||||
|                 # this means the root actor will also raise a local | ||||
|                 # parent task error and thus an eg will propagate out | ||||
|                 # of this actor nursery. | ||||
|                 await portal.result() | ||||
|             except tractor.RemoteActorError as err: | ||||
|                 assert err.type == errtype | ||||
|                 print("Look Maa that actor failed hard, hehh") | ||||
|                 raise | ||||
| 
 | ||||
|     with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
|     # ensure boxed errors | ||||
|     if args: | ||||
|         with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||
|             trio.run(main) | ||||
| 
 | ||||
|     # ensure boxed error is correct | ||||
|     assert excinfo.value.type == errtype | ||||
|         assert excinfo.value.type == errtype | ||||
| 
 | ||||
|     else: | ||||
|         # the root task will also error on the `.result()` call | ||||
|         # so we expect an error from there AND the child. | ||||
|         with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|             trio.run(main) | ||||
| 
 | ||||
|         # ensure boxed errors | ||||
|         for exc in excinfo.value.exceptions: | ||||
|             assert exc.type == errtype | ||||
| 
 | ||||
| 
 | ||||
| def test_multierror(arb_addr): | ||||
|     """Verify we raise a ``trio.MultiError`` out of a nursery where | ||||
|     ''' | ||||
|     Verify we raise a ``BaseExceptionGroup`` out of a nursery where | ||||
|     more then one actor errors. | ||||
|     """ | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|  | @ -95,10 +119,10 @@ def test_multierror(arb_addr): | |||
|                 print("Look Maa that first actor failed hard, hehh") | ||||
|                 raise | ||||
| 
 | ||||
|         # here we should get a `trio.MultiError` containing exceptions | ||||
|         # here we should get a ``BaseExceptionGroup`` containing exceptions | ||||
|         # from both subactors | ||||
| 
 | ||||
|     with pytest.raises(trio.MultiError): | ||||
|     with pytest.raises(BaseExceptionGroup): | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -107,7 +131,7 @@ def test_multierror(arb_addr): | |||
|     'num_subactors', range(25, 26), | ||||
| ) | ||||
| def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | ||||
|     """Verify we raise a ``trio.MultiError`` out of a nursery where | ||||
|     """Verify we raise a ``BaseExceptionGroup`` out of a nursery where | ||||
|     more then one actor errors and also with a delay before failure | ||||
|     to test failure during an ongoing spawning. | ||||
|     """ | ||||
|  | @ -123,10 +147,11 @@ def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | |||
|                     delay=delay | ||||
|                 ) | ||||
| 
 | ||||
|     with pytest.raises(trio.MultiError) as exc_info: | ||||
|     # with pytest.raises(trio.MultiError) as exc_info: | ||||
|     with pytest.raises(BaseExceptionGroup) as exc_info: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     assert exc_info.type == tractor.MultiError | ||||
|     assert exc_info.type == ExceptionGroup | ||||
|     err = exc_info.value | ||||
|     exceptions = err.exceptions | ||||
| 
 | ||||
|  | @ -214,8 +239,8 @@ async def test_cancel_infinite_streamer(start_method): | |||
|     [ | ||||
|         # daemon actors sit idle while single task actors error out | ||||
|         (1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None), | ||||
|         (2, tractor.MultiError, AssertionError, (assert_err, {}), None), | ||||
|         (3, tractor.MultiError, AssertionError, (assert_err, {}), None), | ||||
|         (2, BaseExceptionGroup, AssertionError, (assert_err, {}), None), | ||||
|         (3, BaseExceptionGroup, AssertionError, (assert_err, {}), None), | ||||
| 
 | ||||
|         # 1 daemon actor errors out while single task actors sleep forever | ||||
|         (3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}), | ||||
|  | @ -226,7 +251,7 @@ async def test_cancel_infinite_streamer(start_method): | |||
|          (do_nuthin, {}), (assert_err, {'delay': 1}, True)), | ||||
|         # daemon complete quickly delay while single task | ||||
|         # actors error after brief delay | ||||
|         (3, tractor.MultiError, AssertionError, | ||||
|         (3, BaseExceptionGroup, AssertionError, | ||||
|          (assert_err, {'delay': 1}), (do_nuthin, {}, False)), | ||||
|     ], | ||||
|     ids=[ | ||||
|  | @ -293,7 +318,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): | |||
|         # should error here with a ``RemoteActorError`` or ``MultiError`` | ||||
| 
 | ||||
|     except first_err as err: | ||||
|         if isinstance(err, tractor.MultiError): | ||||
|         if isinstance(err, BaseExceptionGroup): | ||||
|             assert len(err.exceptions) == num_actors | ||||
|             for exc in err.exceptions: | ||||
|                 if isinstance(exc, tractor.RemoteActorError): | ||||
|  | @ -337,7 +362,7 @@ async def spawn_and_error(breadth, depth) -> None: | |||
| @tractor_test | ||||
| async def test_nested_multierrors(loglevel, start_method): | ||||
|     ''' | ||||
|     Test that failed actor sets are wrapped in `trio.MultiError`s. This | ||||
|     Test that failed actor sets are wrapped in `BaseExceptionGroup`s. This | ||||
|     test goes only 2 nurseries deep but we should eventually have tests | ||||
|     for arbitrary n-depth actor trees. | ||||
| 
 | ||||
|  | @ -365,7 +390,7 @@ async def test_nested_multierrors(loglevel, start_method): | |||
|                         breadth=subactor_breadth, | ||||
|                         depth=depth, | ||||
|                     ) | ||||
|         except trio.MultiError as err: | ||||
|         except BaseExceptionGroup as err: | ||||
|             assert len(err.exceptions) == subactor_breadth | ||||
|             for subexc in err.exceptions: | ||||
| 
 | ||||
|  | @ -383,10 +408,10 @@ async def test_nested_multierrors(loglevel, start_method): | |||
|                         assert subexc.type in ( | ||||
|                             tractor.RemoteActorError, | ||||
|                             trio.Cancelled, | ||||
|                             trio.MultiError | ||||
|                             BaseExceptionGroup, | ||||
|                         ) | ||||
| 
 | ||||
|                     elif isinstance(subexc, trio.MultiError): | ||||
|                     elif isinstance(subexc, BaseExceptionGroup): | ||||
|                         for subsub in subexc.exceptions: | ||||
| 
 | ||||
|                             if subsub in (tractor.RemoteActorError,): | ||||
|  | @ -394,7 +419,7 @@ async def test_nested_multierrors(loglevel, start_method): | |||
| 
 | ||||
|                             assert type(subsub) in ( | ||||
|                                 trio.Cancelled, | ||||
|                                 trio.MultiError, | ||||
|                                 BaseExceptionGroup, | ||||
|                             ) | ||||
|                 else: | ||||
|                     assert isinstance(subexc, tractor.RemoteActorError) | ||||
|  | @ -406,13 +431,13 @@ async def test_nested_multierrors(loglevel, start_method): | |||
|                     if is_win(): | ||||
|                         if isinstance(subexc, tractor.RemoteActorError): | ||||
|                             assert subexc.type in ( | ||||
|                                 trio.MultiError, | ||||
|                                 BaseExceptionGroup, | ||||
|                                 tractor.RemoteActorError | ||||
|                             ) | ||||
|                         else: | ||||
|                             assert isinstance(subexc, trio.MultiError) | ||||
|                             assert isinstance(subexc, BaseExceptionGroup) | ||||
|                     else: | ||||
|                         assert subexc.type is trio.MultiError | ||||
|                         assert subexc.type is ExceptionGroup | ||||
|                 else: | ||||
|                     assert subexc.type in ( | ||||
|                         tractor.RemoteActorError, | ||||
|  | @ -435,7 +460,7 @@ def test_cancel_via_SIGINT( | |||
|         with trio.fail_after(2): | ||||
|             async with tractor.open_nursery() as tn: | ||||
|                 await tn.start_actor('sucka') | ||||
|                 if spawn_backend == 'mp': | ||||
|                 if 'mp' in spawn_backend: | ||||
|                     time.sleep(0.1) | ||||
|                 os.kill(pid, signal.SIGINT) | ||||
|                 await trio.sleep_forever() | ||||
|  | @ -474,7 +499,7 @@ def test_cancel_via_SIGINT_other_task( | |||
|         with trio.fail_after(timeout): | ||||
|             async with trio.open_nursery() as n: | ||||
|                 await n.start(spawn_and_sleep_forever) | ||||
|                 if spawn_backend == 'mp': | ||||
|                 if 'mp' in spawn_backend: | ||||
|                     time.sleep(0.1) | ||||
|                 os.kill(pid, signal.SIGINT) | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,5 +1,6 @@ | |||
| import itertools | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from tractor import open_actor_cluster | ||||
|  | @ -11,26 +12,72 @@ from conftest import tractor_test | |||
| MESSAGE = 'tractoring at full speed' | ||||
| 
 | ||||
| 
 | ||||
| def test_empty_mngrs_input_raises() -> None: | ||||
| 
 | ||||
|     async def main(): | ||||
|         with trio.fail_after(1): | ||||
|             async with ( | ||||
|                 open_actor_cluster( | ||||
|                     modules=[__name__], | ||||
| 
 | ||||
|                     # NOTE: ensure we can passthrough runtime opts | ||||
|                     loglevel='info', | ||||
|                     # debug_mode=True, | ||||
| 
 | ||||
|                 ) as portals, | ||||
| 
 | ||||
|                 gather_contexts( | ||||
|                     # NOTE: it's the use of inline-generator syntax | ||||
|                     # here that causes the empty input. | ||||
|                     mngrs=( | ||||
|                         p.open_context(worker) for p in portals.values() | ||||
|                     ), | ||||
|                 ), | ||||
|             ): | ||||
|                 assert 0 | ||||
| 
 | ||||
|     with pytest.raises(ValueError): | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def worker(ctx: tractor.Context) -> None: | ||||
| async def worker( | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     await ctx.started() | ||||
|     async with ctx.open_stream(backpressure=True) as stream: | ||||
| 
 | ||||
|     async with ctx.open_stream( | ||||
|         backpressure=True, | ||||
|     ) as stream: | ||||
| 
 | ||||
|         # TODO: this with the below assert causes a hang bug? | ||||
|         # with trio.move_on_after(1): | ||||
| 
 | ||||
|         async for msg in stream: | ||||
|             # do something with msg | ||||
|             print(msg) | ||||
|             assert msg == MESSAGE | ||||
| 
 | ||||
|         # TODO: does this ever cause a hang | ||||
|         # assert 0 | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_streaming_to_actor_cluster() -> None: | ||||
| 
 | ||||
|     async with ( | ||||
|         open_actor_cluster(modules=[__name__]) as portals, | ||||
| 
 | ||||
|         gather_contexts( | ||||
|             mngrs=[p.open_context(worker) for p in portals.values()], | ||||
|         ) as contexts, | ||||
| 
 | ||||
|         gather_contexts( | ||||
|             mngrs=[ctx[0].open_stream() for ctx in contexts], | ||||
|         ) as streams, | ||||
| 
 | ||||
|     ): | ||||
|         with trio.move_on_after(1): | ||||
|             for stream in itertools.cycle(streams): | ||||
|  |  | |||
|  | @ -265,42 +265,44 @@ async def test_callee_closes_ctx_after_stream_open(): | |||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             close_ctx_immediately, | ||||
|         with trio.fail_after(2): | ||||
|             async with portal.open_context( | ||||
|                 close_ctx_immediately, | ||||
| 
 | ||||
|             # flag to avoid waiting the final result | ||||
|             # cancel_on_exit=True, | ||||
|                 # flag to avoid waiting the final result | ||||
|                 # cancel_on_exit=True, | ||||
| 
 | ||||
|         ) as (ctx, sent): | ||||
|             ) as (ctx, sent): | ||||
| 
 | ||||
|             assert sent is None | ||||
|                 assert sent is None | ||||
| 
 | ||||
|             with trio.fail_after(0.5): | ||||
|                 async with ctx.open_stream() as stream: | ||||
|                 with trio.fail_after(0.5): | ||||
|                     async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|                     # should fall through since ``StopAsyncIteration`` | ||||
|                     # should be raised through translation of | ||||
|                     # a ``trio.EndOfChannel`` by | ||||
|                     # ``trio.abc.ReceiveChannel.__anext__()`` | ||||
|                     async for _ in stream: | ||||
|                         assert 0 | ||||
|                     else: | ||||
|                         # should fall through since ``StopAsyncIteration`` | ||||
|                         # should be raised through translation of | ||||
|                         # a ``trio.EndOfChannel`` by | ||||
|                         # ``trio.abc.ReceiveChannel.__anext__()`` | ||||
|                         async for _ in stream: | ||||
|                             assert 0 | ||||
|                         else: | ||||
| 
 | ||||
|                         # verify stream is now closed | ||||
|                         try: | ||||
|                             await stream.receive() | ||||
|                         except trio.EndOfChannel: | ||||
|                             # verify stream is now closed | ||||
|                             try: | ||||
|                                 await stream.receive() | ||||
|                             except trio.EndOfChannel: | ||||
|                                 pass | ||||
| 
 | ||||
|                 # TODO: should be just raise the closed resource err | ||||
|                 # directly here to enforce not allowing a re-open | ||||
|                 # of a stream to the context (at least until a time of | ||||
|                 # if/when we decide that's a good idea?) | ||||
|                 try: | ||||
|                     with trio.fail_after(0.5): | ||||
|                         async with ctx.open_stream() as stream: | ||||
|                             pass | ||||
| 
 | ||||
|             # TODO: should be just raise the closed resource err | ||||
|             # directly here to enforce not allowing a re-open | ||||
|             # of a stream to the context (at least until a time of | ||||
|             # if/when we decide that's a good idea?) | ||||
|             try: | ||||
|                 async with ctx.open_stream() as stream: | ||||
|                 except trio.ClosedResourceError: | ||||
|                     pass | ||||
|             except trio.ClosedResourceError: | ||||
|                 pass | ||||
| 
 | ||||
|         await portal.cancel_actor() | ||||
| 
 | ||||
|  | @ -569,7 +571,7 @@ def test_one_end_stream_not_opened(overrun_by): | |||
| 
 | ||||
|     ''' | ||||
|     overrunner, buf_size_increase, entrypoint = overrun_by | ||||
|     from tractor._actor import Actor | ||||
|     from tractor._runtime import Actor | ||||
|     buf_size = buf_size_increase + Actor.msg_buffer_size | ||||
| 
 | ||||
|     async def main(): | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -12,17 +12,17 @@ import shutil | |||
| 
 | ||||
| import pytest | ||||
| 
 | ||||
| from conftest import repodir | ||||
| 
 | ||||
| 
 | ||||
| def examples_dir(): | ||||
|     """Return the abspath to the examples directory. | ||||
|     """ | ||||
|     return os.path.join(repodir(), 'examples') | ||||
| from conftest import ( | ||||
|     examples_dir, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture | ||||
| def run_example_in_subproc(loglevel, testdir, arb_addr): | ||||
| def run_example_in_subproc( | ||||
|     loglevel: str, | ||||
|     testdir, | ||||
|     arb_addr: tuple[str, int], | ||||
| ): | ||||
| 
 | ||||
|     @contextmanager | ||||
|     def run(script_code): | ||||
|  | @ -32,8 +32,8 @@ def run_example_in_subproc(loglevel, testdir, arb_addr): | |||
|             # on windows we need to create a special __main__.py which will | ||||
|             # be executed with ``python -m <modulename>`` on windows.. | ||||
|             shutil.copyfile( | ||||
|                 os.path.join(examples_dir(), '__main__.py'), | ||||
|                 os.path.join(str(testdir), '__main__.py') | ||||
|                 examples_dir() / '__main__.py', | ||||
|                 str(testdir / '__main__.py'), | ||||
|             ) | ||||
| 
 | ||||
|             # drop the ``if __name__ == '__main__'`` guard onwards from | ||||
|  | @ -81,11 +81,15 @@ def run_example_in_subproc(loglevel, testdir, arb_addr): | |||
|     'example_script', | ||||
| 
 | ||||
|     # walk yields: (dirpath, dirnames, filenames) | ||||
|     [(p[0], f) for p in os.walk(examples_dir()) for f in p[2] | ||||
|     [ | ||||
|         (p[0], f) for p in os.walk(examples_dir()) for f in p[2] | ||||
| 
 | ||||
|         if '__' not in f | ||||
|         and f[0] != '_' | ||||
|         and 'debugging' not in p[0]], | ||||
|         and 'debugging' not in p[0] | ||||
|         and 'integration' not in p[0] | ||||
|         and 'advanced_faults' not in p[0] | ||||
|     ], | ||||
| 
 | ||||
|     ids=lambda t: t[1], | ||||
| ) | ||||
|  | @ -113,9 +117,19 @@ def test_example(run_example_in_subproc, example_script): | |||
|             # print(f'STDOUT: {out}') | ||||
| 
 | ||||
|             # if we get some gnarly output let's aggregate and raise | ||||
|             errmsg = err.decode() | ||||
|             errlines = errmsg.splitlines() | ||||
|             if err and 'Error' in errlines[-1]: | ||||
|                 raise Exception(errmsg) | ||||
|             if err: | ||||
|                 errmsg = err.decode() | ||||
|                 errlines = errmsg.splitlines() | ||||
|                 last_error = errlines[-1] | ||||
|                 if ( | ||||
|                     'Error' in last_error | ||||
| 
 | ||||
|                     # XXX: currently we print this to console, but maybe | ||||
|                     # shouldn't eventually once we figure out what's | ||||
|                     # a better way to be explicit about aio side | ||||
|                     # cancels? | ||||
|                     and 'asyncio.exceptions.CancelledError' not in last_error | ||||
|                 ): | ||||
|                     raise Exception(errmsg) | ||||
| 
 | ||||
|             assert proc.returncode == 0 | ||||
|  |  | |||
|  | @ -8,6 +8,7 @@ import builtins | |||
| import itertools | ||||
| import importlib | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
|  | @ -170,11 +171,11 @@ async def trio_ctx( | |||
|     # message. | ||||
|     with trio.fail_after(2): | ||||
|         async with ( | ||||
|             trio.open_nursery() as n, | ||||
| 
 | ||||
|             tractor.to_asyncio.open_channel_from( | ||||
|                 sleep_and_err, | ||||
|             ) as (first, chan), | ||||
| 
 | ||||
|             trio.open_nursery() as n, | ||||
|         ): | ||||
| 
 | ||||
|             assert first == 'start' | ||||
|  | @ -203,24 +204,25 @@ def test_context_spawns_aio_task_that_errors( | |||
|     ''' | ||||
|     async def main(): | ||||
| 
 | ||||
|         async with tractor.open_nursery() as n: | ||||
|             p = await n.start_actor( | ||||
|                 'aio_daemon', | ||||
|                 enable_modules=[__name__], | ||||
|                 infect_asyncio=True, | ||||
|                 # debug_mode=True, | ||||
|                 loglevel='cancel', | ||||
|             ) | ||||
|             async with p.open_context( | ||||
|                 trio_ctx, | ||||
|             ) as (ctx, first): | ||||
|         with trio.fail_after(2): | ||||
|             async with tractor.open_nursery() as n: | ||||
|                 p = await n.start_actor( | ||||
|                     'aio_daemon', | ||||
|                     enable_modules=[__name__], | ||||
|                     infect_asyncio=True, | ||||
|                     # debug_mode=True, | ||||
|                     loglevel='cancel', | ||||
|                 ) | ||||
|                 async with p.open_context( | ||||
|                     trio_ctx, | ||||
|                 ) as (ctx, first): | ||||
| 
 | ||||
|                 assert first == 'start' | ||||
|                     assert first == 'start' | ||||
| 
 | ||||
|                 if parent_cancels: | ||||
|                     await p.cancel_actor() | ||||
|                     if parent_cancels: | ||||
|                         await p.cancel_actor() | ||||
| 
 | ||||
|                 await trio.sleep_forever() | ||||
|                     await trio.sleep_forever() | ||||
| 
 | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
|  | @ -408,11 +410,12 @@ def test_trio_error_cancels_intertask_chan(arb_addr): | |||
|             # should trigger remote actor error | ||||
|             await portal.result() | ||||
| 
 | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|     with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     # ensure boxed error is correct | ||||
|     assert excinfo.value.type == Exception | ||||
|     # ensure boxed errors | ||||
|     for exc in excinfo.value.exceptions: | ||||
|         assert exc.type == Exception | ||||
| 
 | ||||
| 
 | ||||
| def test_trio_closes_early_and_channel_exits(arb_addr): | ||||
|  | @ -441,11 +444,12 @@ def test_aio_errors_and_channel_propagates_and_closes(arb_addr): | |||
|             # should trigger remote actor error | ||||
|             await portal.result() | ||||
| 
 | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|     with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     # ensure boxed error is correct | ||||
|     assert excinfo.value.type == Exception | ||||
|     # ensure boxed errors | ||||
|     for exc in excinfo.value.exceptions: | ||||
|         assert exc.type == Exception | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
|  |  | |||
|  | @ -7,9 +7,10 @@ import platform | |||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| from tractor.testing import tractor_test | ||||
| import pytest | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| def test_must_define_ctx(): | ||||
| 
 | ||||
|  | @ -250,7 +251,7 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend): | |||
| 
 | ||||
|     results, diff = time_quad_ex | ||||
|     assert results | ||||
|     this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 2.666 | ||||
|     this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 3 | ||||
|     assert diff < this_fast | ||||
| 
 | ||||
| 
 | ||||
|  | @ -11,25 +11,18 @@ from conftest import tractor_test | |||
| 
 | ||||
| 
 | ||||
| @pytest.mark.trio | ||||
| async def test_no_arbitter(): | ||||
| async def test_no_runtime(): | ||||
|     """An arbitter must be established before any nurseries | ||||
|     can be created. | ||||
| 
 | ||||
|     (In other words ``tractor.open_root_actor()`` must be engaged at | ||||
|     some point?) | ||||
|     """ | ||||
|     with pytest.raises(RuntimeError): | ||||
|         with tractor.open_nursery(): | ||||
|     with pytest.raises(RuntimeError) : | ||||
|         async with tractor.find_actor('doggy'): | ||||
|             pass | ||||
| 
 | ||||
| 
 | ||||
| def test_no_main(): | ||||
|     """An async function **must** be passed to ``tractor.run()``. | ||||
|     """ | ||||
|     with pytest.raises(TypeError): | ||||
|         tractor.run(None) | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_self_is_registered(arb_addr): | ||||
|     "Verify waiting on the arbiter to register itself using the standard api." | ||||
|  |  | |||
|  | @ -4,9 +4,10 @@ from itertools import cycle | |||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from tractor.testing import tractor_test | ||||
| from tractor.experimental import msgpub | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| def test_type_checks(): | ||||
| 
 | ||||
|  |  | |||
|  | @ -0,0 +1,73 @@ | |||
| """ | ||||
| Verifying internal runtime state and undocumented extras. | ||||
| 
 | ||||
| """ | ||||
| import os | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| _file_path: str = '' | ||||
| 
 | ||||
| 
 | ||||
| def unlink_file(): | ||||
|     print('Removing tmp file!') | ||||
|     os.remove(_file_path) | ||||
| 
 | ||||
| 
 | ||||
| async def crash_and_clean_tmpdir( | ||||
|     tmp_file_path: str, | ||||
|     error: bool = True, | ||||
| ): | ||||
|     global _file_path | ||||
|     _file_path = tmp_file_path | ||||
| 
 | ||||
|     actor = tractor.current_actor() | ||||
|     actor.lifetime_stack.callback(unlink_file) | ||||
| 
 | ||||
|     assert os.path.isfile(tmp_file_path) | ||||
|     await trio.sleep(0.1) | ||||
|     if error: | ||||
|         assert 0 | ||||
|     else: | ||||
|         actor.cancel_soon() | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'error_in_child', | ||||
|     [True, False], | ||||
| ) | ||||
| @tractor_test | ||||
| async def test_lifetime_stack_wipes_tmpfile( | ||||
|     tmp_path, | ||||
|     error_in_child: bool, | ||||
| ): | ||||
|     child_tmp_file = tmp_path / "child.txt" | ||||
|     child_tmp_file.touch() | ||||
|     assert child_tmp_file.exists() | ||||
|     path = str(child_tmp_file) | ||||
| 
 | ||||
|     try: | ||||
|         with trio.move_on_after(0.5): | ||||
|             async with tractor.open_nursery() as n: | ||||
|                     await (  # inlined portal | ||||
|                         await n.run_in_actor( | ||||
|                             crash_and_clean_tmpdir, | ||||
|                             tmp_file_path=path, | ||||
|                             error=error_in_child, | ||||
|                         ) | ||||
|                     ).result() | ||||
| 
 | ||||
|     except ( | ||||
|         tractor.RemoteActorError, | ||||
|         tractor.BaseExceptionGroup, | ||||
|     ): | ||||
|         pass | ||||
| 
 | ||||
|     # tmp file should have been wiped by | ||||
|     # teardown stack. | ||||
|     assert not child_tmp_file.exists() | ||||
|  | @ -1,7 +1,8 @@ | |||
| """ | ||||
| Spawning basics | ||||
| 
 | ||||
| """ | ||||
| from typing import Dict, Tuple, Optional | ||||
| from typing import Optional | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
|  | @ -14,8 +15,8 @@ data_to_pass_down = {'doggy': 10, 'kitty': 4} | |||
| 
 | ||||
| async def spawn( | ||||
|     is_arbiter: bool, | ||||
|     data: Dict, | ||||
|     arb_addr: Tuple[str, int], | ||||
|     data: dict, | ||||
|     arb_addr: tuple[str, int], | ||||
| ): | ||||
|     namespaces = [__name__] | ||||
| 
 | ||||
|  | @ -141,7 +142,7 @@ def test_loglevel_propagated_to_subactor( | |||
|     capfd, | ||||
|     arb_addr, | ||||
| ): | ||||
|     if start_method == 'forkserver': | ||||
|     if start_method == 'mp_forkserver': | ||||
|         pytest.skip( | ||||
|             "a bug with `capfd` seems to make forkserver capture not work?") | ||||
| 
 | ||||
|  | @ -150,13 +151,13 @@ def test_loglevel_propagated_to_subactor( | |||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             name='arbiter', | ||||
|             loglevel=level, | ||||
|             start_method=start_method, | ||||
|             arbiter_addr=arb_addr, | ||||
| 
 | ||||
|         ) as tn: | ||||
|             await tn.run_in_actor( | ||||
|                 check_loglevel, | ||||
|                 loglevel=level, | ||||
|                 level=level, | ||||
|             ) | ||||
| 
 | ||||
|  |  | |||
|  | @ -6,13 +6,16 @@ from contextlib import asynccontextmanager | |||
| from functools import partial | ||||
| from itertools import cycle | ||||
| import time | ||||
| from typing import Optional, List, Tuple | ||||
| from typing import Optional | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| from trio.lowlevel import current_task | ||||
| import tractor | ||||
| from tractor.trionics import broadcast_receiver, Lagged | ||||
| from tractor.trionics import ( | ||||
|     broadcast_receiver, | ||||
|     Lagged, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
|  | @ -37,7 +40,7 @@ async def echo_sequences( | |||
| 
 | ||||
| async def ensure_sequence( | ||||
| 
 | ||||
|     stream: tractor.ReceiveMsgStream, | ||||
|     stream: tractor.MsgStream, | ||||
|     sequence: list, | ||||
|     delay: Optional[float] = None, | ||||
| 
 | ||||
|  | @ -62,8 +65,8 @@ async def ensure_sequence( | |||
| @asynccontextmanager | ||||
| async def open_sequence_streamer( | ||||
| 
 | ||||
|     sequence: List[int], | ||||
|     arb_addr: Tuple[str, int], | ||||
|     sequence: list[int], | ||||
|     arb_addr: tuple[str, int], | ||||
|     start_method: str, | ||||
| 
 | ||||
| ) -> tractor.MsgStream: | ||||
|  | @ -211,7 +214,8 @@ def test_faster_task_to_recv_is_cancelled_by_slower( | |||
|     arb_addr, | ||||
|     start_method, | ||||
| ): | ||||
|     '''Ensure that if a faster task consuming from a stream is cancelled | ||||
|     ''' | ||||
|     Ensure that if a faster task consuming from a stream is cancelled | ||||
|     the slower task can continue to receive all expected values. | ||||
| 
 | ||||
|     ''' | ||||
|  | @ -460,3 +464,51 @@ def test_first_recver_is_cancelled(): | |||
|                     assert value == 1 | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_no_raise_on_lag(): | ||||
|     ''' | ||||
|     Run a simple 2-task broadcast where one task is slow but configured | ||||
|     so that it does not raise `Lagged` on overruns using | ||||
|     `raise_on_lasg=False` and verify that the task does not raise. | ||||
| 
 | ||||
|     ''' | ||||
|     size = 100 | ||||
|     tx, rx = trio.open_memory_channel(size) | ||||
|     brx = broadcast_receiver(rx, size) | ||||
| 
 | ||||
|     async def slow(): | ||||
|         async with brx.subscribe( | ||||
|             raise_on_lag=False, | ||||
|         ) as br: | ||||
|             async for msg in br: | ||||
|                 print(f'slow task got: {msg}') | ||||
|                 await trio.sleep(0.1) | ||||
| 
 | ||||
|     async def fast(): | ||||
|         async with brx.subscribe() as br: | ||||
|             async for msg in br: | ||||
|                 print(f'fast task got: {msg}') | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with ( | ||||
|             tractor.open_root_actor( | ||||
|                 # NOTE: so we see the warning msg emitted by the bcaster | ||||
|                 # internals when the no raise flag is set. | ||||
|                 loglevel='warning', | ||||
|             ), | ||||
|             trio.open_nursery() as n, | ||||
|         ): | ||||
|             n.start_soon(slow) | ||||
|             n.start_soon(fast) | ||||
| 
 | ||||
|             for i in range(1000): | ||||
|                 await tx.send(i) | ||||
| 
 | ||||
|             # simulate user nailing ctl-c after realizing | ||||
|             # there's a lag in the slow task. | ||||
|             await trio.sleep(1) | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|  |  | |||
|  | @ -1,7 +0,0 @@ | |||
| [tool.towncrier] | ||||
| package = "tractor" | ||||
| filename = "NEWS.rst" | ||||
| directory = "nooz/" | ||||
| title_format = "tractor {version} ({project_date})" | ||||
| version = "0.1.0a4" | ||||
| template = "nooz/_template.rst" | ||||
|  | @ -18,13 +18,12 @@ | |||
| tractor: structured concurrent "actors". | ||||
| 
 | ||||
| """ | ||||
| from trio import MultiError | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| 
 | ||||
| from ._clustering import open_actor_cluster | ||||
| from ._ipc import Channel | ||||
| from ._streaming import ( | ||||
|     Context, | ||||
|     ReceiveMsgStream, | ||||
|     MsgStream, | ||||
|     stream, | ||||
|     context, | ||||
|  | @ -36,27 +35,37 @@ from ._discovery import ( | |||
|     query_actor, | ||||
| ) | ||||
| from ._supervise import open_nursery | ||||
| from ._state import current_actor, is_root_process | ||||
| from ._state import ( | ||||
|     current_actor, | ||||
|     is_root_process, | ||||
| ) | ||||
| from ._exceptions import ( | ||||
|     RemoteActorError, | ||||
|     ModuleNotExposed, | ||||
|     ContextCancelled, | ||||
| ) | ||||
| from ._debug import breakpoint, post_mortem | ||||
| from ._debug import ( | ||||
|     breakpoint, | ||||
|     post_mortem, | ||||
| ) | ||||
| from . import msg | ||||
| from ._root import run, run_daemon, open_root_actor | ||||
| from ._root import ( | ||||
|     run_daemon, | ||||
|     open_root_actor, | ||||
| ) | ||||
| from ._portal import Portal | ||||
| from ._runtime import Actor | ||||
| 
 | ||||
| 
 | ||||
| __all__ = [ | ||||
|     'Actor', | ||||
|     'Channel', | ||||
|     'Context', | ||||
|     'ContextCancelled', | ||||
|     'ModuleNotExposed', | ||||
|     'MsgStream', | ||||
|     'MultiError', | ||||
|     'BaseExceptionGroup', | ||||
|     'Portal', | ||||
|     'ReceiveMsgStream', | ||||
|     'RemoteActorError', | ||||
|     'breakpoint', | ||||
|     'context', | ||||
|  | @ -70,7 +79,6 @@ __all__ = [ | |||
|     'open_root_actor', | ||||
|     'post_mortem', | ||||
|     'query_actor', | ||||
|     'run', | ||||
|     'run_daemon', | ||||
|     'stream', | ||||
|     'to_asyncio', | ||||
|  |  | |||
|  | @ -24,7 +24,7 @@ import argparse | |||
| 
 | ||||
| from ast import literal_eval | ||||
| 
 | ||||
| from ._actor import Actor | ||||
| from ._runtime import Actor | ||||
| from ._entry import _trio_main | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -32,9 +32,12 @@ import tractor | |||
| async def open_actor_cluster( | ||||
|     modules: list[str], | ||||
|     count: int = cpu_count(), | ||||
|     names: Optional[list[str]] = None, | ||||
|     start_method: Optional[str] = None, | ||||
|     names: list[str] | None = None, | ||||
|     hard_kill: bool = False, | ||||
| 
 | ||||
|     # passed through verbatim to ``open_root_actor()`` | ||||
|     **runtime_kwargs, | ||||
| 
 | ||||
| ) -> AsyncGenerator[ | ||||
|     dict[str, tractor.Portal], | ||||
|     None, | ||||
|  | @ -49,7 +52,9 @@ async def open_actor_cluster( | |||
|         raise ValueError( | ||||
|             'Number of names is {len(names)} but count it {count}') | ||||
| 
 | ||||
|     async with tractor.open_nursery(start_method=start_method) as an: | ||||
|     async with tractor.open_nursery( | ||||
|         **runtime_kwargs, | ||||
|     ) as an: | ||||
|         async with trio.open_nursery() as n: | ||||
|             uid = tractor.current_actor().uid | ||||
| 
 | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -18,7 +18,11 @@ | |||
| Actor discovery API. | ||||
| 
 | ||||
| """ | ||||
| from typing import Tuple, Optional, Union, AsyncGenerator | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     Union, | ||||
|     AsyncGenerator, | ||||
| ) | ||||
| from contextlib import asynccontextmanager as acm | ||||
| 
 | ||||
| from ._ipc import _connect_chan, Channel | ||||
|  | @ -104,7 +108,7 @@ async def query_actor( | |||
| @acm | ||||
| async def find_actor( | ||||
|     name: str, | ||||
|     arbiter_sockaddr: Tuple[str, int] = None | ||||
|     arbiter_sockaddr: tuple[str, int] | None = None | ||||
| 
 | ||||
| ) -> AsyncGenerator[Optional[Portal], None]: | ||||
|     ''' | ||||
|  | @ -130,7 +134,7 @@ async def find_actor( | |||
| @acm | ||||
| async def wait_for_actor( | ||||
|     name: str, | ||||
|     arbiter_sockaddr: Tuple[str, int] = None | ||||
|     arbiter_sockaddr: tuple[str, int] | None = None | ||||
| ) -> AsyncGenerator[Portal, None]: | ||||
|     """Wait on an actor to register with the arbiter. | ||||
| 
 | ||||
|  |  | |||
|  | @ -18,15 +18,28 @@ | |||
| Sub-process entry points. | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| from functools import partial | ||||
| from typing import Tuple, Any | ||||
| import signal | ||||
| from typing import ( | ||||
|     Any, | ||||
|     TYPE_CHECKING, | ||||
| ) | ||||
| 
 | ||||
| import trio  # type: ignore | ||||
| 
 | ||||
| from .log import get_console_log, get_logger | ||||
| from .log import ( | ||||
|     get_console_log, | ||||
|     get_logger, | ||||
| ) | ||||
| from . import _state | ||||
| from .to_asyncio import run_as_asyncio_guest | ||||
| from ._runtime import ( | ||||
|     async_main, | ||||
|     Actor, | ||||
| ) | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     from ._spawn import SpawnMethodKey | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
|  | @ -34,11 +47,11 @@ log = get_logger(__name__) | |||
| 
 | ||||
| def _mp_main( | ||||
| 
 | ||||
|     actor: 'Actor',  # type: ignore | ||||
|     accept_addr: Tuple[str, int], | ||||
|     forkserver_info: Tuple[Any, Any, Any, Any, Any], | ||||
|     start_method: str, | ||||
|     parent_addr: Tuple[str, int] = None, | ||||
|     actor: Actor,  # type: ignore | ||||
|     accept_addr: tuple[str, int], | ||||
|     forkserver_info: tuple[Any, Any, Any, Any, Any], | ||||
|     start_method: SpawnMethodKey, | ||||
|     parent_addr: tuple[str, int] | None = None, | ||||
|     infect_asyncio: bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|  | @ -63,7 +76,8 @@ def _mp_main( | |||
| 
 | ||||
|     log.debug(f"parent_addr is {parent_addr}") | ||||
|     trio_main = partial( | ||||
|         actor._async_main, | ||||
|         async_main, | ||||
|         actor, | ||||
|         accept_addr, | ||||
|         parent_addr=parent_addr | ||||
|     ) | ||||
|  | @ -82,9 +96,9 @@ def _mp_main( | |||
| 
 | ||||
| def _trio_main( | ||||
| 
 | ||||
|     actor: 'Actor',  # type: ignore | ||||
|     actor: Actor,  # type: ignore | ||||
|     *, | ||||
|     parent_addr: Tuple[str, int] = None, | ||||
|     parent_addr: tuple[str, int] | None = None, | ||||
|     infect_asyncio: bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|  | @ -106,7 +120,8 @@ def _trio_main( | |||
| 
 | ||||
|     log.debug(f"parent_addr is {parent_addr}") | ||||
|     trio_main = partial( | ||||
|         actor._async_main, | ||||
|         async_main, | ||||
|         actor, | ||||
|         parent_addr=parent_addr | ||||
|     ) | ||||
| 
 | ||||
|  |  | |||
|  | @ -18,11 +18,16 @@ | |||
| Our classy exception set. | ||||
| 
 | ||||
| """ | ||||
| from typing import Dict, Any, Optional, Type | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Optional, | ||||
|     Type, | ||||
| ) | ||||
| import importlib | ||||
| import builtins | ||||
| import traceback | ||||
| 
 | ||||
| import exceptiongroup as eg | ||||
| import trio | ||||
| 
 | ||||
| 
 | ||||
|  | @ -48,9 +53,6 @@ class RemoteActorError(Exception): | |||
|         self.type = suberror_type | ||||
|         self.msgdata = msgdata | ||||
| 
 | ||||
|     # TODO: a trio.MultiError.catch like context manager | ||||
|     # for catching underlying remote errors of a particular type | ||||
| 
 | ||||
| 
 | ||||
| class InternalActorError(RemoteActorError): | ||||
|     """Remote internal ``tractor`` error indicating | ||||
|  | @ -95,7 +97,7 @@ def pack_error( | |||
|     exc: BaseException, | ||||
|     tb=None, | ||||
| 
 | ||||
| ) -> Dict[str, Any]: | ||||
| ) -> dict[str, Any]: | ||||
|     """Create an "error message" for tranmission over | ||||
|     a channel (aka the wire). | ||||
|     """ | ||||
|  | @ -114,15 +116,17 @@ def pack_error( | |||
| 
 | ||||
| def unpack_error( | ||||
| 
 | ||||
|     msg: Dict[str, Any], | ||||
|     msg: dict[str, Any], | ||||
|     chan=None, | ||||
|     err_type=RemoteActorError | ||||
| 
 | ||||
| ) -> Exception: | ||||
|     """Unpack an 'error' message from the wire | ||||
|     ''' | ||||
|     Unpack an 'error' message from the wire | ||||
|     into a local ``RemoteActorError``. | ||||
| 
 | ||||
|     """ | ||||
|     ''' | ||||
|     __tracebackhide__ = True | ||||
|     error = msg['error'] | ||||
| 
 | ||||
|     tb_str = error.get('tb_str', '') | ||||
|  | @ -135,7 +139,12 @@ def unpack_error( | |||
|         suberror_type = trio.Cancelled | ||||
| 
 | ||||
|     else:  # try to lookup a suitable local error type | ||||
|         for ns in [builtins, _this_mod, trio]: | ||||
|         for ns in [ | ||||
|             builtins, | ||||
|             _this_mod, | ||||
|             eg, | ||||
|             trio, | ||||
|         ]: | ||||
|             try: | ||||
|                 suberror_type = getattr(ns, type_name) | ||||
|                 break | ||||
|  | @ -154,12 +163,15 @@ def unpack_error( | |||
| 
 | ||||
| 
 | ||||
| def is_multi_cancelled(exc: BaseException) -> bool: | ||||
|     """Predicate to determine if a ``trio.MultiError`` contains only | ||||
|     ``trio.Cancelled`` sub-exceptions (and is likely the result of | ||||
|     ''' | ||||
|     Predicate to determine if a possible ``eg.BaseExceptionGroup`` contains | ||||
|     only ``trio.Cancelled`` sub-exceptions (and is likely the result of | ||||
|     cancelling a collection of subtasks. | ||||
| 
 | ||||
|     """ | ||||
|     return not trio.MultiError.filter( | ||||
|         lambda exc: exc if not isinstance(exc, trio.Cancelled) else None, | ||||
|         exc, | ||||
|     ) | ||||
|     ''' | ||||
|     if isinstance(exc, eg.BaseExceptionGroup): | ||||
|         return exc.subgroup( | ||||
|             lambda exc: isinstance(exc, trio.Cancelled) | ||||
|         ) is not None | ||||
| 
 | ||||
|     return False | ||||
|  |  | |||
|  | @ -341,7 +341,7 @@ class Channel: | |||
| 
 | ||||
|     async def connect( | ||||
|         self, | ||||
|         destaddr: tuple[Any, ...] = None, | ||||
|         destaddr: tuple[Any, ...] | None = None, | ||||
|         **kwargs | ||||
| 
 | ||||
|     ) -> MsgTransport: | ||||
|  |  | |||
|  | @ -35,6 +35,7 @@ import warnings | |||
| import trio | ||||
| from async_generator import asynccontextmanager | ||||
| 
 | ||||
| from .trionics import maybe_open_nursery | ||||
| from ._state import current_actor | ||||
| from ._ipc import Channel | ||||
| from .log import get_logger | ||||
|  | @ -44,43 +45,27 @@ from ._exceptions import ( | |||
|     NoResult, | ||||
|     ContextCancelled, | ||||
| ) | ||||
| from ._streaming import Context, ReceiveMsgStream | ||||
| from ._streaming import ( | ||||
|     Context, | ||||
|     MsgStream, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| @asynccontextmanager | ||||
| async def maybe_open_nursery( | ||||
|     nursery: trio.Nursery = None, | ||||
|     shield: bool = False, | ||||
| ) -> AsyncGenerator[trio.Nursery, Any]: | ||||
|     ''' | ||||
|     Create a new nursery if None provided. | ||||
| 
 | ||||
|     Blocks on exit as expected if no input nursery is provided. | ||||
| 
 | ||||
|     ''' | ||||
|     if nursery is not None: | ||||
|         yield nursery | ||||
|     else: | ||||
|         async with trio.open_nursery() as nursery: | ||||
|             nursery.cancel_scope.shield = shield | ||||
|             yield nursery | ||||
| 
 | ||||
| 
 | ||||
| def _unwrap_msg( | ||||
| 
 | ||||
|     msg: dict[str, Any], | ||||
|     channel: Channel | ||||
| 
 | ||||
| ) -> Any: | ||||
|     __tracebackhide__ = True | ||||
|     try: | ||||
|         return msg['return'] | ||||
|     except KeyError: | ||||
|         # internal error should never get here | ||||
|         assert msg.get('cid'), "Received internal error at portal?" | ||||
|         raise unpack_error(msg, channel) | ||||
|         raise unpack_error(msg, channel) from None | ||||
| 
 | ||||
| 
 | ||||
| class MessagingError(Exception): | ||||
|  | @ -119,7 +104,7 @@ class Portal: | |||
|         # it is expected that ``result()`` will be awaited at some | ||||
|         # point. | ||||
|         self._expect_result: Optional[Context] = None | ||||
|         self._streams: set[ReceiveMsgStream] = set() | ||||
|         self._streams: set[MsgStream] = set() | ||||
|         self.actor = current_actor() | ||||
| 
 | ||||
|     async def _submit_for_result( | ||||
|  | @ -154,6 +139,7 @@ class Portal: | |||
|         Return the result(s) from the remote actor's "main" task. | ||||
| 
 | ||||
|         ''' | ||||
|         # __tracebackhide__ = True | ||||
|         # Check for non-rpc errors slapped on the | ||||
|         # channel for which we always raise | ||||
|         exc = self.channel._exc | ||||
|  | @ -203,7 +189,7 @@ class Portal: | |||
| 
 | ||||
|     async def cancel_actor( | ||||
|         self, | ||||
|         timeout: float = None, | ||||
|         timeout: float | None = None, | ||||
| 
 | ||||
|     ) -> bool: | ||||
|         ''' | ||||
|  | @ -333,7 +319,7 @@ class Portal: | |||
|         async_gen_func: Callable,  # typing: ignore | ||||
|         **kwargs, | ||||
| 
 | ||||
|     ) -> AsyncGenerator[ReceiveMsgStream, None]: | ||||
|     ) -> AsyncGenerator[MsgStream, None]: | ||||
| 
 | ||||
|         if not inspect.isasyncgenfunction(async_gen_func): | ||||
|             if not ( | ||||
|  | @ -358,7 +344,7 @@ class Portal: | |||
| 
 | ||||
|         try: | ||||
|             # deliver receive only stream | ||||
|             async with ReceiveMsgStream( | ||||
|             async with MsgStream( | ||||
|                 ctx, ctx._recv_chan, | ||||
|             ) as rchan: | ||||
|                 self._streams.add(rchan) | ||||
|  | @ -478,7 +464,6 @@ class Portal: | |||
|             # sure it's worth being pedantic: | ||||
|             # Exception, | ||||
|             # trio.Cancelled, | ||||
|             # trio.MultiError, | ||||
|             # KeyboardInterrupt, | ||||
| 
 | ||||
|         ) as err: | ||||
|  | @ -511,10 +496,14 @@ class Portal: | |||
|             if ctx.chan.connected(): | ||||
|                 log.info( | ||||
|                     'Waiting on final context-task result for\n' | ||||
|                     f'task:{cid}\n' | ||||
|                     f'actor:{uid}' | ||||
|                     f'task: {cid}\n' | ||||
|                     f'actor: {uid}' | ||||
|                 ) | ||||
|                 result = await ctx.result() | ||||
|                 log.runtime( | ||||
|                     f'Context {fn_name} returned ' | ||||
|                     f'value from callee `{result}`' | ||||
|                 ) | ||||
| 
 | ||||
|             # though it should be impossible for any tasks | ||||
|             # operating *in* this scope to have survived | ||||
|  | @ -536,14 +525,22 @@ class Portal: | |||
|                         f'task:{cid}\n' | ||||
|                         f'actor:{uid}' | ||||
|                     ) | ||||
|             else: | ||||
|                 log.runtime( | ||||
|                     f'Context {fn_name} returned ' | ||||
|                     f'value from callee `{result}`' | ||||
|                 ) | ||||
|             # XXX: (MEGA IMPORTANT) if this is a root opened process we | ||||
|             # wait for any immediate child in debug before popping the | ||||
|             # context from the runtime msg loop otherwise inside | ||||
|             # ``Actor._push_result()`` the msg will be discarded and in | ||||
|             # the case where that msg is global debugger unlock (via | ||||
|             # a "stop" msg for a stream), this can result in a deadlock | ||||
|             # where the root is waiting on the lock to clear but the | ||||
|             # child has already cleared it and clobbered IPC. | ||||
|             from ._debug import maybe_wait_for_debugger | ||||
|             await maybe_wait_for_debugger() | ||||
| 
 | ||||
|             # remove the context from runtime tracking | ||||
|             self.actor._contexts.pop((self.channel.uid, ctx.cid)) | ||||
|             self.actor._contexts.pop( | ||||
|                 (self.channel.uid, ctx.cid), | ||||
|                 None, | ||||
|             ) | ||||
| 
 | ||||
| 
 | ||||
| @dataclass | ||||
|  | @ -600,9 +597,11 @@ async def open_portal( | |||
| 
 | ||||
|         msg_loop_cs: Optional[trio.CancelScope] = None | ||||
|         if start_msg_loop: | ||||
|             from ._runtime import process_messages | ||||
|             msg_loop_cs = await nursery.start( | ||||
|                 partial( | ||||
|                     actor._process_messages, | ||||
|                     process_messages, | ||||
|                     actor, | ||||
|                     channel, | ||||
|                     # if the local task is cancelled we want to keep | ||||
|                     # the msg loop running until our block ends | ||||
|  |  | |||
							
								
								
									
										190
									
								
								tractor/_root.py
								
								
								
								
							
							
						
						
									
										190
									
								
								tractor/_root.py
								
								
								
								
							|  | @ -22,14 +22,21 @@ from contextlib import asynccontextmanager | |||
| from functools import partial | ||||
| import importlib | ||||
| import logging | ||||
| import signal | ||||
| import sys | ||||
| import os | ||||
| from typing import Tuple, Optional, List, Any | ||||
| import typing | ||||
| import warnings | ||||
| 
 | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| 
 | ||||
| from ._actor import Actor, Arbiter | ||||
| from ._runtime import ( | ||||
|     Actor, | ||||
|     Arbiter, | ||||
|     async_main, | ||||
| ) | ||||
| from . import _debug | ||||
| from . import _spawn | ||||
| from . import _state | ||||
|  | @ -49,37 +56,45 @@ logger = log.get_logger('tractor') | |||
| @asynccontextmanager | ||||
| async def open_root_actor( | ||||
| 
 | ||||
|     *, | ||||
|     # defaults are above | ||||
|     arbiter_addr: Optional[Tuple[str, int]] = ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     ), | ||||
|     arbiter_addr: tuple[str, int] | None = None, | ||||
| 
 | ||||
|     name: Optional[str] = 'root', | ||||
|     # defaults are above | ||||
|     registry_addr: tuple[str, int] | None = None, | ||||
| 
 | ||||
|     name: str | None = 'root', | ||||
| 
 | ||||
|     # either the `multiprocessing` start method: | ||||
|     # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods | ||||
|     # OR `trio` (the new default). | ||||
|     start_method: Optional[str] = None, | ||||
|     start_method: _spawn.SpawnMethodKey | None = None, | ||||
| 
 | ||||
|     # enables the multi-process debugger support | ||||
|     debug_mode: bool = False, | ||||
| 
 | ||||
|     # internal logging | ||||
|     loglevel: Optional[str] = None, | ||||
|     loglevel: str | None = None, | ||||
| 
 | ||||
|     enable_modules: Optional[List] = None, | ||||
|     rpc_module_paths: Optional[List] = None, | ||||
|     enable_modules: list | None = None, | ||||
|     rpc_module_paths: list | None = None, | ||||
| 
 | ||||
| ) -> typing.Any: | ||||
|     """Async entry point for ``tractor``. | ||||
|     ''' | ||||
|     Runtime init entry point for ``tractor``. | ||||
| 
 | ||||
|     """ | ||||
|     ''' | ||||
|     # Override the global debugger hook to make it play nice with | ||||
|     # ``trio``, see: | ||||
|     # ``trio``, see much discussion in: | ||||
|     # https://github.com/python-trio/trio/issues/1155#issuecomment-742964018 | ||||
|     builtin_bp_handler = sys.breakpointhook | ||||
|     orig_bp_path: str | None = os.environ.get('PYTHONBREAKPOINT', None) | ||||
|     os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace' | ||||
| 
 | ||||
|     # attempt to retreive ``trio``'s sigint handler and stash it | ||||
|     # on our debugger lock state. | ||||
|     _debug.Lock._trio_handler = signal.getsignal(signal.SIGINT) | ||||
| 
 | ||||
|     # mark top most level process as root actor | ||||
|     _state._runtime_vars['_is_root'] = True | ||||
| 
 | ||||
|  | @ -98,18 +113,24 @@ async def open_root_actor( | |||
|     if start_method is not None: | ||||
|         _spawn.try_set_start_method(start_method) | ||||
| 
 | ||||
|     arbiter_addr = (host, port) = arbiter_addr or ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     if arbiter_addr is not None: | ||||
|         warnings.warn( | ||||
|             '`arbiter_addr` is now deprecated and has been renamed to' | ||||
|             '`registry_addr`.\nUse that instead..', | ||||
|             DeprecationWarning, | ||||
|             stacklevel=2, | ||||
|         ) | ||||
| 
 | ||||
|     registry_addr = (host, port) = ( | ||||
|         registry_addr | ||||
|         or arbiter_addr | ||||
|         or ( | ||||
|             _default_arbiter_host, | ||||
|             _default_arbiter_port, | ||||
|         ) | ||||
|     ) | ||||
| 
 | ||||
|     if loglevel is None: | ||||
|         loglevel = log.get_loglevel() | ||||
|     else: | ||||
|         log._default_loglevel = loglevel | ||||
|         log.get_console_log(loglevel) | ||||
| 
 | ||||
|     assert loglevel | ||||
|     loglevel = (loglevel or log._default_loglevel).upper() | ||||
| 
 | ||||
|     if debug_mode and _spawn._spawn_method == 'trio': | ||||
|         _state._runtime_vars['_debug_mode'] = True | ||||
|  | @ -124,7 +145,7 @@ async def open_root_actor( | |||
|             logging.getLevelName( | ||||
|                 # lul, need the upper case for the -> int map? | ||||
|                 # sweet "dynamic function behaviour" stdlib... | ||||
|                 loglevel.upper() | ||||
|                 loglevel, | ||||
|             ) > logging.getLevelName('PDB') | ||||
|         ): | ||||
|             loglevel = 'PDB' | ||||
|  | @ -134,20 +155,25 @@ async def open_root_actor( | |||
|             "Debug mode is only supported for the `trio` backend!" | ||||
|         ) | ||||
| 
 | ||||
|     # make a temporary connection to see if an arbiter exists | ||||
|     arbiter_found = False | ||||
|     log.get_console_log(loglevel) | ||||
| 
 | ||||
|     try: | ||||
|         # make a temporary connection to see if an arbiter exists, | ||||
|         # if one can't be made quickly we assume none exists. | ||||
|         arbiter_found = False | ||||
| 
 | ||||
|         # TODO: this connect-and-bail forces us to have to carefully | ||||
|         # rewrap TCP 104-connection-reset errors as EOF so as to avoid | ||||
|         # propagating cancel-causing errors to the channel-msg loop | ||||
|         # machinery.  Likely it would be better to eventually have | ||||
|         # a "discovery" protocol with basic handshake instead. | ||||
|         async with _connect_chan(host, port): | ||||
|             arbiter_found = True | ||||
|         with trio.move_on_after(1): | ||||
|             async with _connect_chan(host, port): | ||||
|                 arbiter_found = True | ||||
| 
 | ||||
|     except OSError: | ||||
|         logger.warning(f"No actor could be found @ {host}:{port}") | ||||
|         # TODO: make this a "discovery" log level? | ||||
|         logger.warning(f"No actor registry found @ {host}:{port}") | ||||
| 
 | ||||
|     # create a local actor and start up its main routine/task | ||||
|     if arbiter_found: | ||||
|  | @ -157,7 +183,7 @@ async def open_root_actor( | |||
| 
 | ||||
|         actor = Actor( | ||||
|             name or 'anonymous', | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             arbiter_addr=registry_addr, | ||||
|             loglevel=loglevel, | ||||
|             enable_modules=enable_modules, | ||||
|         ) | ||||
|  | @ -173,7 +199,7 @@ async def open_root_actor( | |||
| 
 | ||||
|         actor = Arbiter( | ||||
|             name or 'arbiter', | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             arbiter_addr=registry_addr, | ||||
|             loglevel=loglevel, | ||||
|             enable_modules=enable_modules, | ||||
|         ) | ||||
|  | @ -189,13 +215,14 @@ async def open_root_actor( | |||
|         # start the actor runtime in a new task | ||||
|         async with trio.open_nursery() as nursery: | ||||
| 
 | ||||
|             # ``Actor._async_main()`` creates an internal nursery and | ||||
|             # ``_runtime.async_main()`` creates an internal nursery and | ||||
|             # thus blocks here until the entire underlying actor tree has | ||||
|             # terminated thereby conducting structured concurrency. | ||||
| 
 | ||||
|             await nursery.start( | ||||
|                 partial( | ||||
|                     actor._async_main, | ||||
|                     async_main, | ||||
|                     actor, | ||||
|                     accept_addr=(host, port), | ||||
|                     parent_addr=None | ||||
|                 ) | ||||
|  | @ -203,7 +230,10 @@ async def open_root_actor( | |||
|             try: | ||||
|                 yield actor | ||||
| 
 | ||||
|             except (Exception, trio.MultiError) as err: | ||||
|             except ( | ||||
|                 Exception, | ||||
|                 BaseExceptionGroup, | ||||
|             ) as err: | ||||
| 
 | ||||
|                 entered = await _debug._maybe_enter_pm(err) | ||||
| 
 | ||||
|  | @ -216,7 +246,8 @@ async def open_root_actor( | |||
|             finally: | ||||
|                 # NOTE: not sure if we'll ever need this but it's | ||||
|                 # possibly better for even more determinism? | ||||
|                 # logger.cancel(f'Waiting on {len(nurseries)} nurseries in root..') | ||||
|                 # logger.cancel( | ||||
|                 #     f'Waiting on {len(nurseries)} nurseries in root..') | ||||
|                 # nurseries = actor._actoruid2nursery.values() | ||||
|                 # async with trio.open_nursery() as tempn: | ||||
|                 #     for an in nurseries: | ||||
|  | @ -226,64 +257,40 @@ async def open_root_actor( | |||
|                 await actor.cancel() | ||||
|     finally: | ||||
|         _state._current_actor = None | ||||
| 
 | ||||
|         # restore breakpoint hook state | ||||
|         sys.breakpointhook = builtin_bp_handler | ||||
|         if orig_bp_path is not None: | ||||
|             os.environ['PYTHONBREAKPOINT'] = orig_bp_path | ||||
|         else: | ||||
|             # clear env back to having no entry | ||||
|             os.environ.pop('PYTHONBREAKPOINT') | ||||
| 
 | ||||
|         logger.runtime("Root actor terminated") | ||||
| 
 | ||||
| 
 | ||||
| def run( | ||||
| 
 | ||||
|     # target | ||||
|     async_fn: typing.Callable[..., typing.Awaitable], | ||||
|     *args, | ||||
| 
 | ||||
|     # runtime kwargs | ||||
|     name: Optional[str] = 'root', | ||||
|     arbiter_addr: Tuple[str, int] = ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     ), | ||||
| 
 | ||||
|     start_method: Optional[str] = None, | ||||
|     debug_mode: bool = False, | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> Any: | ||||
|     """Run a trio-actor async function in process. | ||||
| 
 | ||||
|     This is tractor's main entry and the start point for any async actor. | ||||
|     """ | ||||
|     async def _main(): | ||||
| 
 | ||||
|         async with open_root_actor( | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             name=name, | ||||
|             start_method=start_method, | ||||
|             debug_mode=debug_mode, | ||||
|             **kwargs, | ||||
|         ): | ||||
| 
 | ||||
|             return await async_fn(*args) | ||||
| 
 | ||||
|     warnings.warn( | ||||
|         "`tractor.run()` is now deprecated. `tractor` now" | ||||
|         " implicitly starts the root actor on first actor nursery" | ||||
|         " use. If you want to start the root actor manually, use" | ||||
|         " `tractor.open_root_actor()`.", | ||||
|         DeprecationWarning, | ||||
|         stacklevel=2, | ||||
|     ) | ||||
|     return trio.run(_main) | ||||
| 
 | ||||
| 
 | ||||
| def run_daemon( | ||||
|     enable_modules: list[str], | ||||
| 
 | ||||
|     # runtime kwargs | ||||
|     name: str | None = 'root', | ||||
|     registry_addr: tuple[str, int] = ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     ), | ||||
| 
 | ||||
|     start_method: str | None = None, | ||||
|     debug_mode: bool = False, | ||||
|     **kwargs | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Spawn daemon actor which will respond to RPC. | ||||
|     Spawn daemon actor which will respond to RPC; the main task simply | ||||
|     starts the runtime and then sleeps forever. | ||||
| 
 | ||||
|     This is a convenience wrapper around | ||||
|     ``tractor.run(trio.sleep(float('inf')))`` such that the first actor spawned | ||||
|     is meant to run forever responding to RPC requests. | ||||
|     This is a very minimal convenience wrapper around starting | ||||
|     a "run-until-cancelled" root actor which can be started with a set | ||||
|     of enabled modules for RPC request handling. | ||||
| 
 | ||||
|     ''' | ||||
|     kwargs['enable_modules'] = list(enable_modules) | ||||
|  | @ -291,4 +298,15 @@ def run_daemon( | |||
|     for path in enable_modules: | ||||
|         importlib.import_module(path) | ||||
| 
 | ||||
|     return run(partial(trio.sleep, float('inf')), **kwargs) | ||||
|     async def _main(): | ||||
| 
 | ||||
|         async with open_root_actor( | ||||
|             registry_addr=registry_addr, | ||||
|             name=name, | ||||
|             start_method=start_method, | ||||
|             debug_mode=debug_mode, | ||||
|             **kwargs, | ||||
|         ): | ||||
|             return await trio.sleep_forever() | ||||
| 
 | ||||
|     return trio.run(_main) | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -22,10 +22,15 @@ from __future__ import annotations | |||
| import sys | ||||
| import platform | ||||
| from typing import ( | ||||
|     Any, Optional, Callable, TypeVar, TYPE_CHECKING | ||||
|     Any, | ||||
|     Awaitable, | ||||
|     Literal, | ||||
|     Callable, | ||||
|     TypeVar, | ||||
|     TYPE_CHECKING, | ||||
| ) | ||||
| from collections.abc import Awaitable | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| 
 | ||||
|  | @ -39,23 +44,28 @@ from ._state import ( | |||
|     is_root_process, | ||||
|     debug_mode, | ||||
| ) | ||||
| 
 | ||||
| from .log import get_logger | ||||
| from ._portal import Portal | ||||
| from ._actor import Actor | ||||
| from ._runtime import Actor | ||||
| from ._entry import _mp_main | ||||
| from ._exceptions import ActorFailure | ||||
| 
 | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     from ._supervise import ActorNursery | ||||
|     import multiprocessing as mp | ||||
|     ProcessType = TypeVar('ProcessType', mp.Process, trio.Process) | ||||
| 
 | ||||
| log = get_logger('tractor') | ||||
| 
 | ||||
| # placeholder for an mp start context if so using that backend | ||||
| _ctx: Optional[mp.context.BaseContext] = None | ||||
| _spawn_method: str = "trio" | ||||
| _ctx: mp.context.BaseContext | None = None | ||||
| SpawnMethodKey = Literal[ | ||||
|     'trio',  # supported on all platforms | ||||
|     'mp_spawn', | ||||
|     'mp_forkserver',  # posix only | ||||
| ] | ||||
| _spawn_method: SpawnMethodKey = 'trio' | ||||
| 
 | ||||
| 
 | ||||
| if platform.system() == 'Windows': | ||||
|  | @ -72,7 +82,10 @@ else: | |||
|         await trio.lowlevel.wait_readable(proc.sentinel) | ||||
| 
 | ||||
| 
 | ||||
| def try_set_start_method(name: str) -> Optional[mp.context.BaseContext]: | ||||
| def try_set_start_method( | ||||
|     key: SpawnMethodKey | ||||
| 
 | ||||
| ) -> mp.context.BaseContext | None: | ||||
|     ''' | ||||
|     Attempt to set the method for process starting, aka the "actor | ||||
|     spawning backend". | ||||
|  | @ -87,28 +100,30 @@ def try_set_start_method(name: str) -> Optional[mp.context.BaseContext]: | |||
|     global _ctx | ||||
|     global _spawn_method | ||||
| 
 | ||||
|     methods = mp.get_all_start_methods() | ||||
|     if 'fork' in methods: | ||||
|     mp_methods = mp.get_all_start_methods() | ||||
|     if 'fork' in mp_methods: | ||||
|         # forking is incompatible with ``trio``s global task tree | ||||
|         methods.remove('fork') | ||||
|         mp_methods.remove('fork') | ||||
| 
 | ||||
|     # supported on all platforms | ||||
|     methods += ['trio'] | ||||
|     match key: | ||||
|         case 'mp_forkserver': | ||||
|             from . import _forkserver_override | ||||
|             _forkserver_override.override_stdlib() | ||||
|             _ctx = mp.get_context('forkserver') | ||||
| 
 | ||||
|     if name not in methods: | ||||
|         raise ValueError( | ||||
|             f"Spawn method `{name}` is invalid please choose one of {methods}" | ||||
|         ) | ||||
|     elif name == 'forkserver': | ||||
|         from . import _forkserver_override | ||||
|         _forkserver_override.override_stdlib() | ||||
|         _ctx = mp.get_context(name) | ||||
|     elif name == 'trio': | ||||
|         _ctx = None | ||||
|     else: | ||||
|         _ctx = mp.get_context(name) | ||||
|         case 'mp_spawn': | ||||
|             _ctx = mp.get_context('spawn') | ||||
| 
 | ||||
|     _spawn_method = name | ||||
|         case 'trio': | ||||
|             _ctx = None | ||||
| 
 | ||||
|         case _: | ||||
|             raise ValueError( | ||||
|                 f'Spawn method `{key}` is invalid!\n' | ||||
|                 f'Please choose one of {SpawnMethodKey}' | ||||
|             ) | ||||
| 
 | ||||
|     _spawn_method = key | ||||
|     return _ctx | ||||
| 
 | ||||
| 
 | ||||
|  | @ -124,6 +139,7 @@ async def exhaust_portal( | |||
|     If the main task is an async generator do our best to consume | ||||
|     what's left of it. | ||||
|     ''' | ||||
|     __tracebackhide__ = True | ||||
|     try: | ||||
|         log.debug(f"Waiting on final result from {actor.uid}") | ||||
| 
 | ||||
|  | @ -131,8 +147,11 @@ async def exhaust_portal( | |||
|         # always be established and shutdown using a context manager api | ||||
|         final = await portal.result() | ||||
| 
 | ||||
|     except (Exception, trio.MultiError) as err: | ||||
|         # we reraise in the parent task via a ``trio.MultiError`` | ||||
|     except ( | ||||
|         Exception, | ||||
|         BaseExceptionGroup, | ||||
|     ) as err: | ||||
|         # we reraise in the parent task via a ``BaseExceptionGroup`` | ||||
|         return err | ||||
|     except trio.Cancelled as err: | ||||
|         # lol, of course we need this too ;P | ||||
|  | @ -160,7 +179,7 @@ async def cancel_on_completion( | |||
|     ''' | ||||
|     # if this call errors we store the exception for later | ||||
|     # in ``errors`` which will be reraised inside | ||||
|     # a MultiError and we still send out a cancel request | ||||
|     # an exception group and we still send out a cancel request | ||||
|     result = await exhaust_portal(portal, actor) | ||||
|     if isinstance(result, Exception): | ||||
|         errors[actor.uid] = result | ||||
|  | @ -180,16 +199,37 @@ async def cancel_on_completion( | |||
| async def do_hard_kill( | ||||
|     proc: trio.Process, | ||||
|     terminate_after: int = 3, | ||||
| 
 | ||||
| ) -> None: | ||||
|     # NOTE: this timeout used to do nothing since we were shielding | ||||
|     # the ``.wait()`` inside ``new_proc()`` which will pretty much | ||||
|     # never release until the process exits, now it acts as | ||||
|     # a hard-kill time ultimatum. | ||||
|     log.debug(f"Terminating {proc}") | ||||
|     with trio.move_on_after(terminate_after) as cs: | ||||
| 
 | ||||
|         # NOTE: This ``__aexit__()`` shields internally. | ||||
|         async with proc:  # calls ``trio.Process.aclose()`` | ||||
|             log.debug(f"Terminating {proc}") | ||||
|         # NOTE: code below was copied verbatim from the now deprecated | ||||
|         # (in 0.20.0) ``trio._subrocess.Process.aclose()``, orig doc | ||||
|         # string: | ||||
|         # | ||||
|         # Close any pipes we have to the process (both input and output) | ||||
|         # and wait for it to exit. If cancelled, kills the process and | ||||
|         # waits for it to finish exiting before propagating the | ||||
|         # cancellation. | ||||
|         with trio.CancelScope(shield=True): | ||||
|             if proc.stdin is not None: | ||||
|                 await proc.stdin.aclose() | ||||
|             if proc.stdout is not None: | ||||
|                 await proc.stdout.aclose() | ||||
|             if proc.stderr is not None: | ||||
|                 await proc.stderr.aclose() | ||||
|         try: | ||||
|             await proc.wait() | ||||
|         finally: | ||||
|             if proc.returncode is None: | ||||
|                 proc.kill() | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     await proc.wait() | ||||
| 
 | ||||
|     if cs.cancelled_caught: | ||||
|         # XXX: should pretty much never get here unless we have | ||||
|  | @ -240,16 +280,17 @@ async def soft_wait( | |||
| 
 | ||||
|             if proc.poll() is None:  # type: ignore | ||||
|                 log.warning( | ||||
|                     f'Process still alive after cancel request:\n{uid}') | ||||
|                     'Actor still alive after cancel request:\n' | ||||
|                     f'{uid}' | ||||
|                 ) | ||||
| 
 | ||||
|                 n.cancel_scope.cancel() | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| async def new_proc( | ||||
| 
 | ||||
|     name: str, | ||||
|     actor_nursery: 'ActorNursery',  # type: ignore  # noqa | ||||
|     actor_nursery: ActorNursery, | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
| 
 | ||||
|  | @ -263,6 +304,41 @@ async def new_proc( | |||
|     infect_asyncio: bool = False, | ||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     # lookup backend spawning target | ||||
|     target = _methods[_spawn_method] | ||||
| 
 | ||||
|     # mark the new actor with the global spawn method | ||||
|     subactor._spawn_method = _spawn_method | ||||
| 
 | ||||
|     await target( | ||||
|         name, | ||||
|         actor_nursery, | ||||
|         subactor, | ||||
|         errors, | ||||
|         bind_addr, | ||||
|         parent_addr, | ||||
|         _runtime_vars,  # run time vars | ||||
|         infect_asyncio=infect_asyncio, | ||||
|         task_status=task_status, | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| async def trio_proc( | ||||
|     name: str, | ||||
|     actor_nursery: ActorNursery, | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
| 
 | ||||
|     # passed through to actor main | ||||
|     bind_addr: tuple[str, int], | ||||
|     parent_addr: tuple[str, int], | ||||
|     _runtime_vars: dict[str, Any],  # serialized and sent to _child | ||||
|     *, | ||||
|     infect_asyncio: bool = False, | ||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Create a new ``Process`` using a "spawn method" as (configured using | ||||
|  | @ -272,174 +348,165 @@ async def new_proc( | |||
|     here is to be considered the core supervision strategy. | ||||
| 
 | ||||
|     ''' | ||||
|     # mark the new actor with the global spawn method | ||||
|     subactor._spawn_method = _spawn_method | ||||
|     uid = subactor.uid | ||||
|     spawn_cmd = [ | ||||
|         sys.executable, | ||||
|         "-m", | ||||
|         # Hardcode this (instead of using ``_child.__name__`` to avoid a | ||||
|         # double import warning: https://stackoverflow.com/a/45070583 | ||||
|         "tractor._child", | ||||
|         # We provide the child's unique identifier on this exec/spawn | ||||
|         # line for debugging purposes when viewing the process tree from | ||||
|         # the OS; it otherwise can be passed via the parent channel if | ||||
|         # we prefer in the future (for privacy). | ||||
|         "--uid", | ||||
|         str(subactor.uid), | ||||
|         # Address the child must connect to on startup | ||||
|         "--parent_addr", | ||||
|         str(parent_addr) | ||||
|     ] | ||||
| 
 | ||||
|     if _spawn_method == 'trio': | ||||
|         spawn_cmd = [ | ||||
|             sys.executable, | ||||
|             "-m", | ||||
|             # Hardcode this (instead of using ``_child.__name__`` to avoid a | ||||
|             # double import warning: https://stackoverflow.com/a/45070583 | ||||
|             "tractor._child", | ||||
|             # We provide the child's unique identifier on this exec/spawn | ||||
|             # line for debugging purposes when viewing the process tree from | ||||
|             # the OS; it otherwise can be passed via the parent channel if | ||||
|             # we prefer in the future (for privacy). | ||||
|             "--uid", | ||||
|             str(subactor.uid), | ||||
|             # Address the child must connect to on startup | ||||
|             "--parent_addr", | ||||
|             str(parent_addr) | ||||
|     if subactor.loglevel: | ||||
|         spawn_cmd += [ | ||||
|             "--loglevel", | ||||
|             subactor.loglevel | ||||
|         ] | ||||
|     # Tell child to run in guest mode on top of ``asyncio`` loop | ||||
|     if infect_asyncio: | ||||
|         spawn_cmd.append("--asyncio") | ||||
| 
 | ||||
|         if subactor.loglevel: | ||||
|             spawn_cmd += [ | ||||
|                 "--loglevel", | ||||
|                 subactor.loglevel | ||||
|             ] | ||||
|         # Tell child to run in guest mode on top of ``asyncio`` loop | ||||
|         if infect_asyncio: | ||||
|             spawn_cmd.append("--asyncio") | ||||
| 
 | ||||
|         cancelled_during_spawn: bool = False | ||||
|         proc: Optional[trio.Process] = None | ||||
|     cancelled_during_spawn: bool = False | ||||
|     proc: trio.Process | None = None | ||||
|     try: | ||||
|         try: | ||||
|             try: | ||||
|                 proc = await trio.open_process(spawn_cmd) | ||||
|             # TODO: needs ``trio_typing`` patch? | ||||
|             proc = await trio.lowlevel.open_process(spawn_cmd) | ||||
| 
 | ||||
|                 log.runtime(f"Started {proc}") | ||||
|             log.runtime(f"Started {proc}") | ||||
| 
 | ||||
|                 # wait for actor to spawn and connect back to us | ||||
|                 # channel should have handshake completed by the | ||||
|                 # local actor by the time we get a ref to it | ||||
|                 event, chan = await actor_nursery._actor.wait_for_peer( | ||||
|                     subactor.uid) | ||||
|             # wait for actor to spawn and connect back to us | ||||
|             # channel should have handshake completed by the | ||||
|             # local actor by the time we get a ref to it | ||||
|             event, chan = await actor_nursery._actor.wait_for_peer( | ||||
|                 subactor.uid) | ||||
| 
 | ||||
|             except trio.Cancelled: | ||||
|                 cancelled_during_spawn = True | ||||
|                 # we may cancel before the child connects back in which | ||||
|                 # case avoid clobbering the pdb tty. | ||||
|                 if debug_mode(): | ||||
|                     with trio.CancelScope(shield=True): | ||||
|                         # don't clobber an ongoing pdb | ||||
|                         if is_root_process(): | ||||
|                             await maybe_wait_for_debugger() | ||||
| 
 | ||||
|                         elif proc is not None: | ||||
|                             async with acquire_debug_lock(uid): | ||||
|                                 # soft wait on the proc to terminate | ||||
|                                 with trio.move_on_after(0.5): | ||||
|                                     await proc.wait() | ||||
|                 raise | ||||
| 
 | ||||
|             portal = Portal(chan) | ||||
|             actor_nursery._children[subactor.uid] = ( | ||||
|                 subactor, proc, portal) | ||||
| 
 | ||||
|             # send additional init params | ||||
|             await chan.send({ | ||||
|                 "_parent_main_data": subactor._parent_main_data, | ||||
|                 "enable_modules": subactor.enable_modules, | ||||
|                 "_arb_addr": subactor._arb_addr, | ||||
|                 "bind_host": bind_addr[0], | ||||
|                 "bind_port": bind_addr[1], | ||||
|                 "_runtime_vars": _runtime_vars, | ||||
|             }) | ||||
| 
 | ||||
|             # track subactor in current nursery | ||||
|             curr_actor = current_actor() | ||||
|             curr_actor._actoruid2nursery[subactor.uid] = actor_nursery | ||||
| 
 | ||||
|             # resume caller at next checkpoint now that child is up | ||||
|             task_status.started(portal) | ||||
| 
 | ||||
|             # wait for ActorNursery.wait() to be called | ||||
|             with trio.CancelScope(shield=True): | ||||
|                 await actor_nursery._join_procs.wait() | ||||
| 
 | ||||
|             async with trio.open_nursery() as nursery: | ||||
|                 if portal in actor_nursery._cancel_after_result_on_exit: | ||||
|                     nursery.start_soon( | ||||
|                         cancel_on_completion, | ||||
|                         portal, | ||||
|                         subactor, | ||||
|                         errors | ||||
|                     ) | ||||
| 
 | ||||
|                 # This is a "soft" (cancellable) join/reap which | ||||
|                 # will remote cancel the actor on a ``trio.Cancelled`` | ||||
|                 # condition. | ||||
|                 await soft_wait( | ||||
|                     proc, | ||||
|                     trio.Process.wait, | ||||
|                     portal | ||||
|                 ) | ||||
| 
 | ||||
|                 # cancel result waiter that may have been spawned in | ||||
|                 # tandem if not done already | ||||
|                 log.warning( | ||||
|                     "Cancelling existing result waiter task for " | ||||
|                     f"{subactor.uid}") | ||||
|                 nursery.cancel_scope.cancel() | ||||
| 
 | ||||
|         finally: | ||||
|             # The "hard" reap since no actor zombies are allowed! | ||||
|             # XXX: do this **after** cancellation/tearfown to avoid | ||||
|             # killing the process too early. | ||||
|             if proc: | ||||
|                 log.cancel(f'Hard reap sequence starting for {uid}') | ||||
|         except trio.Cancelled: | ||||
|             cancelled_during_spawn = True | ||||
|             # we may cancel before the child connects back in which | ||||
|             # case avoid clobbering the pdb tty. | ||||
|             if debug_mode(): | ||||
|                 with trio.CancelScope(shield=True): | ||||
| 
 | ||||
|                     # don't clobber an ongoing pdb | ||||
|                     if cancelled_during_spawn: | ||||
|                         # Try again to avoid TTY clobbering. | ||||
|                         async with acquire_debug_lock(uid): | ||||
|                     if is_root_process(): | ||||
|                         await maybe_wait_for_debugger() | ||||
| 
 | ||||
|                     elif proc is not None: | ||||
|                         async with acquire_debug_lock(subactor.uid): | ||||
|                             # soft wait on the proc to terminate | ||||
|                             with trio.move_on_after(0.5): | ||||
|                                 await proc.wait() | ||||
|             raise | ||||
| 
 | ||||
|                     if is_root_process(): | ||||
|                         await maybe_wait_for_debugger( | ||||
|                             child_in_debug=_runtime_vars.get( | ||||
|                                 '_debug_mode', False), | ||||
|                         ) | ||||
|         # a sub-proc ref **must** exist now | ||||
|         assert proc | ||||
| 
 | ||||
|                     if proc.poll() is None: | ||||
|                         log.cancel(f"Attempting to hard kill {proc}") | ||||
|                         await do_hard_kill(proc) | ||||
| 
 | ||||
|                     log.debug(f"Joined {proc}") | ||||
|             else: | ||||
|                 log.warning('Nursery cancelled before sub-proc started') | ||||
| 
 | ||||
|             if not cancelled_during_spawn: | ||||
|                 # pop child entry to indicate we no longer managing this | ||||
|                 # subactor | ||||
|                 actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
|     else: | ||||
|         # `multiprocessing` | ||||
|         # async with trio.open_nursery() as nursery: | ||||
|         await mp_new_proc( | ||||
|             name=name, | ||||
|             actor_nursery=actor_nursery, | ||||
|             subactor=subactor, | ||||
|             errors=errors, | ||||
| 
 | ||||
|             # passed through to actor main | ||||
|             bind_addr=bind_addr, | ||||
|             parent_addr=parent_addr, | ||||
|             _runtime_vars=_runtime_vars, | ||||
|             infect_asyncio=infect_asyncio, | ||||
|             task_status=task_status, | ||||
|         portal = Portal(chan) | ||||
|         actor_nursery._children[subactor.uid] = ( | ||||
|             subactor, | ||||
|             proc, | ||||
|             portal, | ||||
|         ) | ||||
| 
 | ||||
|         # send additional init params | ||||
|         await chan.send({ | ||||
|             "_parent_main_data": subactor._parent_main_data, | ||||
|             "enable_modules": subactor.enable_modules, | ||||
|             "_arb_addr": subactor._arb_addr, | ||||
|             "bind_host": bind_addr[0], | ||||
|             "bind_port": bind_addr[1], | ||||
|             "_runtime_vars": _runtime_vars, | ||||
|         }) | ||||
| 
 | ||||
| async def mp_new_proc( | ||||
|         # track subactor in current nursery | ||||
|         curr_actor = current_actor() | ||||
|         curr_actor._actoruid2nursery[subactor.uid] = actor_nursery | ||||
| 
 | ||||
|         # resume caller at next checkpoint now that child is up | ||||
|         task_status.started(portal) | ||||
| 
 | ||||
|         # wait for ActorNursery.wait() to be called | ||||
|         with trio.CancelScope(shield=True): | ||||
|             await actor_nursery._join_procs.wait() | ||||
| 
 | ||||
|         async with trio.open_nursery() as nursery: | ||||
|             if portal in actor_nursery._cancel_after_result_on_exit: | ||||
|                 nursery.start_soon( | ||||
|                     cancel_on_completion, | ||||
|                     portal, | ||||
|                     subactor, | ||||
|                     errors | ||||
|                 ) | ||||
| 
 | ||||
|             # This is a "soft" (cancellable) join/reap which | ||||
|             # will remote cancel the actor on a ``trio.Cancelled`` | ||||
|             # condition. | ||||
|             await soft_wait( | ||||
|                 proc, | ||||
|                 trio.Process.wait, | ||||
|                 portal | ||||
|             ) | ||||
| 
 | ||||
|             # cancel result waiter that may have been spawned in | ||||
|             # tandem if not done already | ||||
|             log.warning( | ||||
|                 "Cancelling existing result waiter task for " | ||||
|                 f"{subactor.uid}") | ||||
|             nursery.cancel_scope.cancel() | ||||
| 
 | ||||
|     finally: | ||||
|         # XXX NOTE XXX: The "hard" reap since no actor zombies are | ||||
|         # allowed! Do this **after** cancellation/teardown to avoid | ||||
|         # killing the process too early. | ||||
|         if proc: | ||||
|             log.cancel(f'Hard reap sequence starting for {subactor.uid}') | ||||
|             with trio.CancelScope(shield=True): | ||||
| 
 | ||||
|                 # don't clobber an ongoing pdb | ||||
|                 if cancelled_during_spawn: | ||||
|                     # Try again to avoid TTY clobbering. | ||||
|                     async with acquire_debug_lock(subactor.uid): | ||||
|                         with trio.move_on_after(0.5): | ||||
|                             await proc.wait() | ||||
| 
 | ||||
|                 if is_root_process(): | ||||
|                     # TODO: solve the following issue where we need | ||||
|                     # to do a similar wait like this but in an | ||||
|                     # "intermediary" parent actor that itself isn't | ||||
|                     # in debug but has a child that is, and we need | ||||
|                     # to hold off on relaying SIGINT until that child | ||||
|                     # is complete. | ||||
|                     # https://github.com/goodboy/tractor/issues/320 | ||||
|                     await maybe_wait_for_debugger( | ||||
|                         child_in_debug=_runtime_vars.get( | ||||
|                             '_debug_mode', False), | ||||
|                     ) | ||||
| 
 | ||||
|                 if proc.poll() is None: | ||||
|                     log.cancel(f"Attempting to hard kill {proc}") | ||||
|                     await do_hard_kill(proc) | ||||
| 
 | ||||
|                 log.debug(f"Joined {proc}") | ||||
|         else: | ||||
|             log.warning('Nursery cancelled before sub-proc started') | ||||
| 
 | ||||
|         if not cancelled_during_spawn: | ||||
|             # pop child entry to indicate we no longer managing this | ||||
|             # subactor | ||||
|             actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
| 
 | ||||
| async def mp_proc( | ||||
|     name: str, | ||||
|     actor_nursery: 'ActorNursery',  # type: ignore  # noqa | ||||
|     actor_nursery: ActorNursery,  # type: ignore  # noqa | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
|     # passed through to actor main | ||||
|  | @ -464,6 +531,7 @@ async def mp_new_proc( | |||
|     assert _ctx | ||||
|     start_method = _ctx.get_start_method() | ||||
|     if start_method == 'forkserver': | ||||
| 
 | ||||
|         from multiprocessing import forkserver  # type: ignore | ||||
|         # XXX do our hackery on the stdlib to avoid multiple | ||||
|         # forkservers (one at each subproc layer). | ||||
|  | @ -476,23 +544,24 @@ async def mp_new_proc( | |||
|             # forkserver.set_forkserver_preload(enable_modules) | ||||
|             forkserver.ensure_running() | ||||
|             fs_info = ( | ||||
|                 fs._forkserver_address, | ||||
|                 fs._forkserver_alive_fd, | ||||
|                 fs._forkserver_address,  # type: ignore  # noqa | ||||
|                 fs._forkserver_alive_fd,  # type: ignore  # noqa | ||||
|                 getattr(fs, '_forkserver_pid', None), | ||||
|                 getattr( | ||||
|                     resource_tracker._resource_tracker, '_pid', None), | ||||
|                 resource_tracker._resource_tracker._fd, | ||||
|             ) | ||||
|         else: | ||||
|         else:  # request to forkerserver to fork a new child | ||||
|             assert curr_actor._forkserver_info | ||||
|             fs_info = ( | ||||
|                 fs._forkserver_address, | ||||
|                 fs._forkserver_alive_fd, | ||||
|                 fs._forkserver_pid, | ||||
|                 fs._forkserver_address,  # type: ignore  # noqa | ||||
|                 fs._forkserver_alive_fd,  # type: ignore  # noqa | ||||
|                 fs._forkserver_pid,  # type: ignore  # noqa | ||||
|                 resource_tracker._resource_tracker._pid, | ||||
|                 resource_tracker._resource_tracker._fd, | ||||
|              ) = curr_actor._forkserver_info | ||||
|     else: | ||||
|         # spawn method | ||||
|         fs_info = (None, None, None, None, None) | ||||
| 
 | ||||
|     proc: mp.Process = _ctx.Process(  # type: ignore | ||||
|  | @ -501,7 +570,7 @@ async def mp_new_proc( | |||
|             subactor, | ||||
|             bind_addr, | ||||
|             fs_info, | ||||
|             start_method, | ||||
|             _spawn_method, | ||||
|             parent_addr, | ||||
|             infect_asyncio, | ||||
|         ), | ||||
|  | @ -595,4 +664,16 @@ async def mp_new_proc( | |||
|         log.debug(f"Joined {proc}") | ||||
| 
 | ||||
|         # pop child entry to indicate we are no longer managing subactor | ||||
|         subactor, proc, portal = actor_nursery._children.pop(subactor.uid) | ||||
|         actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
|         # TODO: prolly report to ``mypy`` how this causes all sorts of | ||||
|         # false errors.. | ||||
|         # subactor, proc, portal = actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
| 
 | ||||
| # proc spawning backend target map | ||||
| _methods: dict[SpawnMethodKey, Callable] = { | ||||
|     'trio': trio_proc, | ||||
|     'mp_spawn': mp_proc, | ||||
|     'mp_forkserver': mp_proc, | ||||
| } | ||||
|  |  | |||
|  | @ -18,8 +18,10 @@ | |||
| Per process state | ||||
| 
 | ||||
| """ | ||||
| from typing import Optional, Dict, Any | ||||
| from collections.abc import Mapping | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     Any, | ||||
| ) | ||||
| 
 | ||||
| import trio | ||||
| 
 | ||||
|  | @ -27,7 +29,7 @@ from ._exceptions import NoRuntime | |||
| 
 | ||||
| 
 | ||||
| _current_actor: Optional['Actor'] = None  # type: ignore # noqa | ||||
| _runtime_vars: Dict[str, Any] = { | ||||
| _runtime_vars: dict[str, Any] = { | ||||
|     '_debug_mode': False, | ||||
|     '_is_root': False, | ||||
|     '_root_mailbox': (None, None) | ||||
|  | @ -43,30 +45,6 @@ def current_actor(err_on_no_runtime: bool = True) -> 'Actor':  # type: ignore # | |||
|     return _current_actor | ||||
| 
 | ||||
| 
 | ||||
| _conc_name_getters = { | ||||
|     'task': trio.lowlevel.current_task, | ||||
|     'actor': current_actor | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| class ActorContextInfo(Mapping): | ||||
|     "Dyanmic lookup for local actor and task names" | ||||
|     _context_keys = ('task', 'actor') | ||||
| 
 | ||||
|     def __len__(self): | ||||
|         return len(self._context_keys) | ||||
| 
 | ||||
|     def __iter__(self): | ||||
|         return iter(self._context_keys) | ||||
| 
 | ||||
|     def __getitem__(self, key: str) -> str: | ||||
|         try: | ||||
|             return _conc_name_getters[key]().name  # type: ignore | ||||
|         except RuntimeError: | ||||
|             # no local actor/task context initialized yet | ||||
|             return f'no {key} context' | ||||
| 
 | ||||
| 
 | ||||
| def is_main_process() -> bool: | ||||
|     """Bool determining if this actor is running in the top-most process. | ||||
|     """ | ||||
|  |  | |||
|  | @ -23,8 +23,10 @@ import inspect | |||
| from contextlib import asynccontextmanager | ||||
| from dataclasses import dataclass | ||||
| from typing import ( | ||||
|     Any, Optional, Callable, | ||||
|     AsyncGenerator, Dict, | ||||
|     Any, | ||||
|     Optional, | ||||
|     Callable, | ||||
|     AsyncGenerator, | ||||
|     AsyncIterator | ||||
| ) | ||||
| 
 | ||||
|  | @ -48,12 +50,13 @@ log = get_logger(__name__) | |||
| # - use __slots__ on ``Context``? | ||||
| 
 | ||||
| 
 | ||||
| class ReceiveMsgStream(trio.abc.ReceiveChannel): | ||||
| class MsgStream(trio.abc.Channel): | ||||
|     ''' | ||||
|     A IPC message stream for receiving logically sequenced values over | ||||
|     an inter-actor ``Channel``. This is the type returned to a local | ||||
|     task which entered either ``Portal.open_stream_from()`` or | ||||
|     ``Context.open_stream()``. | ||||
|     A bidirectional message stream for receiving logically sequenced | ||||
|     values over an inter-actor IPC ``Channel``. | ||||
| 
 | ||||
|     This is the type returned to a local task which entered either | ||||
|     ``Portal.open_stream_from()`` or ``Context.open_stream()``. | ||||
| 
 | ||||
|     Termination rules: | ||||
| 
 | ||||
|  | @ -95,6 +98,9 @@ class ReceiveMsgStream(trio.abc.ReceiveChannel): | |||
|         if self._eoc: | ||||
|             raise trio.EndOfChannel | ||||
| 
 | ||||
|         if self._closed: | ||||
|             raise trio.ClosedResourceError('This stream was closed') | ||||
| 
 | ||||
|         try: | ||||
|             msg = await self._rx_chan.receive() | ||||
|             return msg['yield'] | ||||
|  | @ -108,6 +114,9 @@ class ReceiveMsgStream(trio.abc.ReceiveChannel): | |||
|             # - 'error' | ||||
|             # possibly just handle msg['stop'] here! | ||||
| 
 | ||||
|             if self._closed: | ||||
|                 raise trio.ClosedResourceError('This stream was closed') | ||||
| 
 | ||||
|             if msg.get('stop') or self._eoc: | ||||
|                 log.debug(f"{self} was stopped at remote end") | ||||
| 
 | ||||
|  | @ -187,7 +196,6 @@ class ReceiveMsgStream(trio.abc.ReceiveChannel): | |||
|             return | ||||
| 
 | ||||
|         self._eoc = True | ||||
|         self._closed = True | ||||
| 
 | ||||
|         # NOTE: this is super subtle IPC messaging stuff: | ||||
|         # Relay stop iteration to far end **iff** we're | ||||
|  | @ -204,29 +212,32 @@ class ReceiveMsgStream(trio.abc.ReceiveChannel): | |||
| 
 | ||||
|         # In the bidirectional case, `Context.open_stream()` will create | ||||
|         # the `Actor._cids2qs` entry from a call to | ||||
|         # `Actor.get_context()` and will send the stop message in | ||||
|         # ``__aexit__()`` on teardown so it **does not** need to be | ||||
|         # called here. | ||||
|         if not self._ctx._portal: | ||||
|             # Only for 2 way streams can we can send stop from the | ||||
|             # caller side. | ||||
|             try: | ||||
|                 # NOTE: if this call is cancelled we expect this end to | ||||
|                 # handle as though the stop was never sent (though if it | ||||
|                 # was it shouldn't matter since it's unlikely a user | ||||
|                 # will try to re-use a stream after attemping to close | ||||
|                 # it). | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     await self._ctx.send_stop() | ||||
|         # `Actor.get_context()` and will call us here to send the stop | ||||
|         # msg in ``__aexit__()`` on teardown. | ||||
|         try: | ||||
|             # NOTE: if this call is cancelled we expect this end to | ||||
|             # handle as though the stop was never sent (though if it | ||||
|             # was it shouldn't matter since it's unlikely a user | ||||
|             # will try to re-use a stream after attemping to close | ||||
|             # it). | ||||
|             with trio.CancelScope(shield=True): | ||||
|                 await self._ctx.send_stop() | ||||
| 
 | ||||
|             except ( | ||||
|                 trio.BrokenResourceError, | ||||
|                 trio.ClosedResourceError | ||||
|             ): | ||||
|                 # the underlying channel may already have been pulled | ||||
|                 # in which case our stop message is meaningless since | ||||
|                 # it can't traverse the transport. | ||||
|                 log.debug(f'Channel for {self} was already closed') | ||||
|         except ( | ||||
|             trio.BrokenResourceError, | ||||
|             trio.ClosedResourceError | ||||
|         ): | ||||
|             # the underlying channel may already have been pulled | ||||
|             # in which case our stop message is meaningless since | ||||
|             # it can't traverse the transport. | ||||
|             ctx = self._ctx | ||||
|             log.warning( | ||||
|                 f'Stream was already destroyed?\n' | ||||
|                 f'actor: {ctx.chan.uid}\n' | ||||
|                 f'ctx id: {ctx.cid}' | ||||
|             ) | ||||
| 
 | ||||
|         self._closed = True | ||||
| 
 | ||||
|         # Do we close the local mem chan ``self._rx_chan`` ??!? | ||||
| 
 | ||||
|  | @ -269,7 +280,8 @@ class ReceiveMsgStream(trio.abc.ReceiveChannel): | |||
|         self, | ||||
| 
 | ||||
|     ) -> AsyncIterator[BroadcastReceiver]: | ||||
|         '''Allocate and return a ``BroadcastReceiver`` which delegates | ||||
|         ''' | ||||
|         Allocate and return a ``BroadcastReceiver`` which delegates | ||||
|         to this message stream. | ||||
| 
 | ||||
|         This allows multiple local tasks to receive each their own copy | ||||
|  | @ -306,15 +318,15 @@ class ReceiveMsgStream(trio.abc.ReceiveChannel): | |||
|         async with self._broadcaster.subscribe() as bstream: | ||||
|             assert bstream.key != self._broadcaster.key | ||||
|             assert bstream._recv == self._broadcaster._recv | ||||
| 
 | ||||
|             # NOTE: we patch on a `.send()` to the bcaster so that the | ||||
|             # caller can still conduct 2-way streaming using this | ||||
|             # ``bstream`` handle transparently as though it was the msg | ||||
|             # stream instance. | ||||
|             bstream.send = self.send  # type: ignore | ||||
| 
 | ||||
|             yield bstream | ||||
| 
 | ||||
| 
 | ||||
| class MsgStream(ReceiveMsgStream, trio.abc.Channel): | ||||
|     ''' | ||||
|     Bidirectional message stream for use within an inter-actor actor | ||||
|     ``Context```. | ||||
| 
 | ||||
|     ''' | ||||
|     async def send( | ||||
|         self, | ||||
|         data: Any | ||||
|  | @ -369,6 +381,8 @@ class Context: | |||
| 
 | ||||
|     # status flags | ||||
|     _cancel_called: bool = False | ||||
|     _cancel_msg: Optional[str] = None | ||||
|     _enter_debugger_on_cancel: bool = True | ||||
|     _started_called: bool = False | ||||
|     _started_received: bool = False | ||||
|     _stream_opened: bool = False | ||||
|  | @ -393,7 +407,7 @@ class Context: | |||
| 
 | ||||
|     async def _maybe_raise_from_remote_msg( | ||||
|         self, | ||||
|         msg: Dict[str, Any], | ||||
|         msg: dict[str, Any], | ||||
| 
 | ||||
|     ) -> None: | ||||
|         ''' | ||||
|  | @ -450,7 +464,11 @@ class Context: | |||
|                 if not self._scope_nursery._closed:  # type: ignore | ||||
|                     self._scope_nursery.start_soon(raiser) | ||||
| 
 | ||||
|     async def cancel(self) -> None: | ||||
|     async def cancel( | ||||
|         self, | ||||
|         msg: Optional[str] = None, | ||||
| 
 | ||||
|     ) -> None: | ||||
|         ''' | ||||
|         Cancel this inter-actor-task context. | ||||
| 
 | ||||
|  | @ -459,6 +477,8 @@ class Context: | |||
| 
 | ||||
|         ''' | ||||
|         side = 'caller' if self._portal else 'callee' | ||||
|         if msg: | ||||
|             assert side == 'callee', 'Only callee side can provide cancel msg' | ||||
| 
 | ||||
|         log.cancel(f'Cancelling {side} side of context to {self.chan.uid}') | ||||
| 
 | ||||
|  | @ -495,8 +515,10 @@ class Context: | |||
|                     log.cancel( | ||||
|                         "Timed out on cancelling remote task " | ||||
|                         f"{cid} for {self._portal.channel.uid}") | ||||
| 
 | ||||
|         # callee side remote task | ||||
|         else: | ||||
|             # callee side remote task | ||||
|             self._cancel_msg = msg | ||||
| 
 | ||||
|             # TODO: should we have an explicit cancel message | ||||
|             # or is relaying the local `trio.Cancelled` as an | ||||
|  | @ -581,23 +603,23 @@ class Context: | |||
|         async with MsgStream( | ||||
|             ctx=self, | ||||
|             rx_chan=ctx._recv_chan, | ||||
|         ) as rchan: | ||||
|         ) as stream: | ||||
| 
 | ||||
|             if self._portal: | ||||
|                 self._portal._streams.add(rchan) | ||||
|                 self._portal._streams.add(stream) | ||||
| 
 | ||||
|             try: | ||||
|                 self._stream_opened = True | ||||
| 
 | ||||
|                 # ensure we aren't cancelled before delivering | ||||
|                 # the stream | ||||
|                 # XXX: do we need this? | ||||
|                 # ensure we aren't cancelled before yielding the stream | ||||
|                 # await trio.lowlevel.checkpoint() | ||||
|                 yield rchan | ||||
|                 yield stream | ||||
| 
 | ||||
|                 # XXX: Make the stream "one-shot use".  On exit, signal | ||||
|                 # NOTE: Make the stream "one-shot use".  On exit, signal | ||||
|                 # ``trio.EndOfChannel``/``StopAsyncIteration`` to the | ||||
|                 # far end. | ||||
|                 await self.send_stop() | ||||
|                 await stream.aclose() | ||||
| 
 | ||||
|             finally: | ||||
|                 if self._portal: | ||||
|  |  | |||
|  | @ -18,19 +18,23 @@ | |||
| ``trio`` inspired apis and helpers | ||||
| 
 | ||||
| """ | ||||
| from contextlib import asynccontextmanager as acm | ||||
| from functools import partial | ||||
| import inspect | ||||
| from typing import Tuple, List, Dict, Optional, TYPE_CHECKING | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     TYPE_CHECKING, | ||||
| ) | ||||
| import typing | ||||
| import warnings | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| from async_generator import asynccontextmanager | ||||
| 
 | ||||
| from ._debug import maybe_wait_for_debugger | ||||
| from ._state import current_actor, is_main_process | ||||
| from .log import get_logger, get_loglevel | ||||
| from ._actor import Actor | ||||
| from ._runtime import Actor | ||||
| from ._portal import Portal | ||||
| from ._exceptions import is_multi_cancelled | ||||
| from ._root import open_root_actor | ||||
|  | @ -43,7 +47,7 @@ if TYPE_CHECKING: | |||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| _default_bind_addr: Tuple[str, int] = ('127.0.0.1', 0) | ||||
| _default_bind_addr: tuple[str, int] = ('127.0.0.1', 0) | ||||
| 
 | ||||
| 
 | ||||
| class ActorNursery: | ||||
|  | @ -79,15 +83,19 @@ class ActorNursery: | |||
|         actor: Actor, | ||||
|         ria_nursery: trio.Nursery, | ||||
|         da_nursery: trio.Nursery, | ||||
|         errors: Dict[Tuple[str, str], Exception], | ||||
|         errors: dict[tuple[str, str], BaseException], | ||||
|     ) -> None: | ||||
|         # self.supervisor = supervisor  # TODO | ||||
|         self._actor: Actor = actor | ||||
|         self._ria_nursery = ria_nursery | ||||
|         self._da_nursery = da_nursery | ||||
|         self._children: Dict[ | ||||
|             Tuple[str, str], | ||||
|             Tuple[Actor, mp.Process, Optional[Portal]] | ||||
|         self._children: dict[ | ||||
|             tuple[str, str], | ||||
|             tuple[ | ||||
|                 Actor, | ||||
|                 trio.Process | mp.Process, | ||||
|                 Optional[Portal], | ||||
|             ] | ||||
|         ] = {} | ||||
|         # portals spawned with ``run_in_actor()`` are | ||||
|         # cancelled when their "main" result arrives | ||||
|  | @ -102,12 +110,12 @@ class ActorNursery: | |||
|         self, | ||||
|         name: str, | ||||
|         *, | ||||
|         bind_addr: Tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: List[str] = None, | ||||
|         enable_modules: List[str] = None, | ||||
|         loglevel: str = None,  # set log level per subactor | ||||
|         nursery: trio.Nursery = None, | ||||
|         debug_mode: Optional[bool] = None, | ||||
|         bind_addr: tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: list[str] | None = None, | ||||
|         enable_modules: list[str] | None = None, | ||||
|         loglevel: str | None = None,  # set log level per subactor | ||||
|         nursery: trio.Nursery | None = None, | ||||
|         debug_mode: Optional[bool] | None = None, | ||||
|         infect_asyncio: bool = False, | ||||
|     ) -> Portal: | ||||
|         ''' | ||||
|  | @ -173,10 +181,10 @@ class ActorNursery: | |||
|         *, | ||||
| 
 | ||||
|         name: Optional[str] = None, | ||||
|         bind_addr: Tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: Optional[List[str]] = None, | ||||
|         enable_modules: List[str] = None, | ||||
|         loglevel: str = None,  # set log level per subactor | ||||
|         bind_addr: tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: list[str] | None = None, | ||||
|         enable_modules: list[str] | None = None, | ||||
|         loglevel: str | None = None,  # set log level per subactor | ||||
|         infect_asyncio: bool = False, | ||||
| 
 | ||||
|         **kwargs,  # explicit args to ``fn`` | ||||
|  | @ -287,13 +295,17 @@ class ActorNursery: | |||
|         self._join_procs.set() | ||||
| 
 | ||||
| 
 | ||||
| @asynccontextmanager | ||||
| @acm | ||||
| async def _open_and_supervise_one_cancels_all_nursery( | ||||
|     actor: Actor, | ||||
| 
 | ||||
| ) -> typing.AsyncGenerator[ActorNursery, None]: | ||||
| 
 | ||||
|     # TODO: yay or nay? | ||||
|     __tracebackhide__ = True | ||||
| 
 | ||||
|     # the collection of errors retreived from spawned sub-actors | ||||
|     errors: Dict[Tuple[str, str], Exception] = {} | ||||
|     errors: dict[tuple[str, str], BaseException] = {} | ||||
| 
 | ||||
|     # This is the outermost level "deamon actor" nursery. It is awaited | ||||
|     # **after** the below inner "run in actor nursery". This allows for | ||||
|  | @ -326,19 +338,17 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|                     # after we yield upwards | ||||
|                     yield anursery | ||||
| 
 | ||||
|                     # When we didn't error in the caller's scope, | ||||
|                     # signal all process-monitor-tasks to conduct | ||||
|                     # the "hard join phase". | ||||
|                     log.runtime( | ||||
|                         f"Waiting on subactors {anursery._children} " | ||||
|                         "to complete" | ||||
|                     ) | ||||
| 
 | ||||
|                     # Last bit before first nursery block ends in the case | ||||
|                     # where we didn't error in the caller's scope | ||||
| 
 | ||||
|                     # signal all process monitor tasks to conduct | ||||
|                     # hard join phase. | ||||
|                     anursery._join_procs.set() | ||||
| 
 | ||||
|                 except BaseException as err: | ||||
|                 except BaseException as inner_err: | ||||
|                     errors[actor.uid] = inner_err | ||||
| 
 | ||||
|                     # If we error in the root but the debugger is | ||||
|                     # engaged we don't want to prematurely kill (and | ||||
|  | @ -355,49 +365,42 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|                     # worry more are coming). | ||||
|                     anursery._join_procs.set() | ||||
| 
 | ||||
|                     try: | ||||
|                         # XXX: hypothetically an error could be | ||||
|                         # raised and then a cancel signal shows up | ||||
|                         # slightly after in which case the `else:` | ||||
|                         # block here might not complete?  For now, | ||||
|                         # shield both. | ||||
|                         with trio.CancelScope(shield=True): | ||||
|                             etype = type(err) | ||||
|                             if etype in ( | ||||
|                                 trio.Cancelled, | ||||
|                                 KeyboardInterrupt | ||||
|                             ) or ( | ||||
|                                 is_multi_cancelled(err) | ||||
|                             ): | ||||
|                                 log.cancel( | ||||
|                                     f"Nursery for {current_actor().uid} " | ||||
|                                     f"was cancelled with {etype}") | ||||
|                             else: | ||||
|                                 log.exception( | ||||
|                                     f"Nursery for {current_actor().uid} " | ||||
|                                     f"errored with {err}, ") | ||||
|                     # XXX: hypothetically an error could be | ||||
|                     # raised and then a cancel signal shows up | ||||
|                     # slightly after in which case the `else:` | ||||
|                     # block here might not complete?  For now, | ||||
|                     # shield both. | ||||
|                     with trio.CancelScope(shield=True): | ||||
|                         etype = type(inner_err) | ||||
|                         if etype in ( | ||||
|                             trio.Cancelled, | ||||
|                             KeyboardInterrupt | ||||
|                         ) or ( | ||||
|                             is_multi_cancelled(inner_err) | ||||
|                         ): | ||||
|                             log.cancel( | ||||
|                                 f"Nursery for {current_actor().uid} " | ||||
|                                 f"was cancelled with {etype}") | ||||
|                         else: | ||||
|                             log.exception( | ||||
|                                 f"Nursery for {current_actor().uid} " | ||||
|                                 f"errored with") | ||||
| 
 | ||||
|                             # cancel all subactors | ||||
|                             await anursery.cancel() | ||||
|                         # cancel all subactors | ||||
|                         await anursery.cancel() | ||||
| 
 | ||||
|                     except trio.MultiError as merr: | ||||
|                         # If we receive additional errors while waiting on | ||||
|                         # remaining subactors that were cancelled, | ||||
|                         # aggregate those errors with the original error | ||||
|                         # that triggered this teardown. | ||||
|                         if err not in merr.exceptions: | ||||
|                             raise trio.MultiError(merr.exceptions + [err]) | ||||
|                     else: | ||||
|                         raise | ||||
|             # ria_nursery scope end | ||||
| 
 | ||||
|                 # ria_nursery scope end | ||||
| 
 | ||||
|         # XXX: do we need a `trio.Cancelled` catch here as well? | ||||
|         # this is the catch around the ``.run_in_actor()`` nursery | ||||
|         # TODO: this is the handler around the ``.run_in_actor()`` | ||||
|         # nursery. Ideally we can drop this entirely in the future as | ||||
|         # the whole ``.run_in_actor()`` API should be built "on top of" | ||||
|         # this lower level spawn-request-cancel "daemon actor" API where | ||||
|         # a local in-actor task nursery is used with one-to-one task | ||||
|         # + `await Portal.run()` calls and the results/errors are | ||||
|         # handled directly (inline) and errors by the local nursery. | ||||
|         except ( | ||||
| 
 | ||||
|             Exception, | ||||
|             trio.MultiError, | ||||
|             BaseExceptionGroup, | ||||
|             trio.Cancelled | ||||
| 
 | ||||
|         ) as err: | ||||
|  | @ -429,18 +432,20 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|                     with trio.CancelScope(shield=True): | ||||
|                         await anursery.cancel() | ||||
| 
 | ||||
|                 # use `MultiError` as needed | ||||
|                 # use `BaseExceptionGroup` as needed | ||||
|                 if len(errors) > 1: | ||||
|                     raise trio.MultiError(tuple(errors.values())) | ||||
|                     raise BaseExceptionGroup( | ||||
|                         'tractor.ActorNursery errored with', | ||||
|                         tuple(errors.values()), | ||||
|                     ) | ||||
|                 else: | ||||
|                     raise list(errors.values())[0] | ||||
| 
 | ||||
|         # ria_nursery scope end - nursery checkpoint | ||||
| 
 | ||||
|     # after nursery exit | ||||
|         # da_nursery scope end - nursery checkpoint | ||||
|     # final exit | ||||
| 
 | ||||
| 
 | ||||
| @asynccontextmanager | ||||
| @acm | ||||
| async def open_nursery( | ||||
|     **kwargs, | ||||
| 
 | ||||
|  |  | |||
|  | @ -26,7 +26,10 @@ support provided by ``tractor.Context.open_stream()`` and friends. | |||
| from __future__ import annotations | ||||
| import inspect | ||||
| import typing | ||||
| from typing import Dict, Any, Set, Callable, List, Tuple | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Callable, | ||||
| ) | ||||
| from functools import partial | ||||
| from async_generator import aclosing | ||||
| 
 | ||||
|  | @ -44,8 +47,8 @@ log = get_logger('messaging') | |||
| 
 | ||||
| async def fan_out_to_ctxs( | ||||
|     pub_async_gen_func: typing.Callable,  # it's an async gen ... gd mypy | ||||
|     topics2ctxs: Dict[str, list], | ||||
|     packetizer: typing.Callable = None, | ||||
|     topics2ctxs: dict[str, list], | ||||
|     packetizer: typing.Callable | None = None, | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Request and fan out quotes to each subscribed actor channel. | ||||
|  | @ -61,7 +64,7 @@ async def fan_out_to_ctxs( | |||
| 
 | ||||
|         async for published in pub_gen: | ||||
| 
 | ||||
|             ctx_payloads: List[Tuple[Context, Any]] = [] | ||||
|             ctx_payloads: list[tuple[Context, Any]] = [] | ||||
| 
 | ||||
|             for topic, data in published.items(): | ||||
|                 log.debug(f"publishing {topic, data}") | ||||
|  | @ -103,8 +106,8 @@ async def fan_out_to_ctxs( | |||
| 
 | ||||
| def modify_subs( | ||||
| 
 | ||||
|     topics2ctxs: Dict[str, List[Context]], | ||||
|     topics: Set[str], | ||||
|     topics2ctxs: dict[str, list[Context]], | ||||
|     topics: set[str], | ||||
|     ctx: Context, | ||||
| 
 | ||||
| ) -> None: | ||||
|  | @ -136,20 +139,20 @@ def modify_subs( | |||
|             topics2ctxs.pop(topic) | ||||
| 
 | ||||
| 
 | ||||
| _pub_state: Dict[str, dict] = {} | ||||
| _pubtask2lock: Dict[str, trio.StrictFIFOLock] = {} | ||||
| _pub_state: dict[str, dict] = {} | ||||
| _pubtask2lock: dict[str, trio.StrictFIFOLock] = {} | ||||
| 
 | ||||
| 
 | ||||
| def pub( | ||||
|     wrapped: typing.Callable = None, | ||||
|     wrapped: typing.Callable | None = None, | ||||
|     *, | ||||
|     tasks: Set[str] = set(), | ||||
|     tasks: set[str] = set(), | ||||
| ): | ||||
|     """Publisher async generator decorator. | ||||
| 
 | ||||
|     A publisher can be called multiple times from different actors but | ||||
|     will only spawn a finite set of internal tasks to stream values to | ||||
|     each caller. The ``tasks: Set[str]`` argument to the decorator | ||||
|     each caller. The ``tasks: set[str]`` argument to the decorator | ||||
|     specifies the names of the mutex set of publisher tasks.  When the | ||||
|     publisher function is called, an argument ``task_name`` must be | ||||
|     passed to specify which task (of the set named in ``tasks``) should | ||||
|  | @ -158,9 +161,9 @@ def pub( | |||
|     necessary. | ||||
| 
 | ||||
|     Values yielded from the decorated async generator must be | ||||
|     ``Dict[str, Dict[str, Any]]`` where the fist level key is the topic | ||||
|     ``dict[str, dict[str, Any]]`` where the fist level key is the topic | ||||
|     string and determines which subscription the packet will be | ||||
|     delivered to and the value is a packet ``Dict[str, Any]`` by default | ||||
|     delivered to and the value is a packet ``dict[str, Any]`` by default | ||||
|     of the form: | ||||
| 
 | ||||
|     .. ::python | ||||
|  | @ -186,7 +189,7 @@ def pub( | |||
| 
 | ||||
| 
 | ||||
|     The publisher must be called passing in the following arguments: | ||||
|     - ``topics: Set[str]`` the topic sequence or "subscriptions" | ||||
|     - ``topics: set[str]`` the topic sequence or "subscriptions" | ||||
|     - ``task_name: str`` the task to use (if ``tasks`` was passed) | ||||
|     - ``ctx: Context`` the tractor context (only needed if calling the | ||||
|       pub func without a nursery, otherwise this is provided implicitly) | ||||
|  | @ -231,7 +234,7 @@ def pub( | |||
|     if wrapped is None: | ||||
|         return partial(pub, tasks=tasks) | ||||
| 
 | ||||
|     task2lock: Dict[str, trio.StrictFIFOLock] = {} | ||||
|     task2lock: dict[str, trio.StrictFIFOLock] = {} | ||||
| 
 | ||||
|     for name in tasks: | ||||
|         task2lock[name] = trio.StrictFIFOLock() | ||||
|  | @ -243,11 +246,11 @@ def pub( | |||
|         # `wrapt` docs | ||||
|         async def _execute( | ||||
|             ctx: Context, | ||||
|             topics: Set[str], | ||||
|             topics: set[str], | ||||
|             *args, | ||||
|             # *, | ||||
|             task_name: str = None,  # default: only one task allocated | ||||
|             packetizer: Callable = None, | ||||
|             task_name: str | None = None,  # default: only one task allocated | ||||
|             packetizer: Callable | None = None, | ||||
|             **kwargs, | ||||
|         ): | ||||
|             if task_name is None: | ||||
|  |  | |||
|  | @ -18,12 +18,14 @@ | |||
| Log like a forester! | ||||
| 
 | ||||
| """ | ||||
| from collections.abc import Mapping | ||||
| import sys | ||||
| import logging | ||||
| import colorlog  # type: ignore | ||||
| from typing import Optional | ||||
| 
 | ||||
| from ._state import ActorContextInfo | ||||
| import trio | ||||
| 
 | ||||
| from ._state import current_actor | ||||
| 
 | ||||
| 
 | ||||
| _proj_name: str = 'tractor' | ||||
|  | @ -36,7 +38,8 @@ LOG_FORMAT = ( | |||
|     # "{bold_white}{log_color}{asctime}{reset}" | ||||
|     "{log_color}{asctime}{reset}" | ||||
|     " {bold_white}{thin_white}({reset}" | ||||
|     "{thin_white}{actor}, {process}, {task}){reset}{bold_white}{thin_white})" | ||||
|     "{thin_white}{actor_name}[{actor_uid}], " | ||||
|     "{process}, {task}){reset}{bold_white}{thin_white})" | ||||
|     " {reset}{log_color}[{reset}{bold_log_color}{levelname}{reset}{log_color}]" | ||||
|     " {log_color}{name}" | ||||
|     " {thin_white}{filename}{log_color}:{reset}{thin_white}{lineno}{log_color}" | ||||
|  | @ -136,9 +139,40 @@ class StackLevelAdapter(logging.LoggerAdapter): | |||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| _conc_name_getters = { | ||||
|     'task': lambda: trio.lowlevel.current_task().name, | ||||
|     'actor': lambda: current_actor(), | ||||
|     'actor_name': lambda: current_actor().name, | ||||
|     'actor_uid': lambda: current_actor().uid[1][:6], | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| class ActorContextInfo(Mapping): | ||||
|     "Dyanmic lookup for local actor and task names" | ||||
|     _context_keys = ( | ||||
|         'task', | ||||
|         'actor', | ||||
|         'actor_name', | ||||
|         'actor_uid', | ||||
|     ) | ||||
| 
 | ||||
|     def __len__(self): | ||||
|         return len(self._context_keys) | ||||
| 
 | ||||
|     def __iter__(self): | ||||
|         return iter(self._context_keys) | ||||
| 
 | ||||
|     def __getitem__(self, key: str) -> str: | ||||
|         try: | ||||
|             return _conc_name_getters[key]() | ||||
|         except RuntimeError: | ||||
|             # no local actor/task context initialized yet | ||||
|             return f'no {key} context' | ||||
| 
 | ||||
| 
 | ||||
| def get_logger( | ||||
| 
 | ||||
|     name: str = None, | ||||
|     name: str | None = None, | ||||
|     _root_name: str = _proj_name, | ||||
| 
 | ||||
| ) -> StackLevelAdapter: | ||||
|  | @ -173,7 +207,7 @@ def get_logger( | |||
| 
 | ||||
| 
 | ||||
| def get_console_log( | ||||
|     level: str = None, | ||||
|     level: str | None = None, | ||||
|     **kwargs, | ||||
| ) -> logging.LoggerAdapter: | ||||
|     '''Get the package logger and enable a handler which writes to stderr. | ||||
|  |  | |||
|  | @ -24,7 +24,7 @@ Built-in messaging patterns, types, APIs and helpers. | |||
| # ``pkgutil.resolve_name()`` internally uses | ||||
| # ``importlib.import_module()`` which can be filtered by inserting | ||||
| # a ``MetaPathFinder`` into ``sys.meta_path`` (which we could do before | ||||
| # entering the ``Actor._process_messages()`` loop). | ||||
| # entering the ``_runtime.process_messages()`` loop). | ||||
| # - https://github.com/python/cpython/blob/main/Lib/pkgutil.py#L645 | ||||
| # - https://stackoverflow.com/questions/1350466/preventing-python-code-from-importing-certain-modules | ||||
| #   - https://stackoverflow.com/a/63320902 | ||||
|  |  | |||
|  | @ -1,17 +0,0 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| from ._tractor_test import tractor_test | ||||
|  | @ -1,104 +0,0 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| import inspect | ||||
| import platform | ||||
| from functools import partial, wraps | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| __all__ = ['tractor_test'] | ||||
| 
 | ||||
| 
 | ||||
| def tractor_test(fn): | ||||
|     """ | ||||
|     Use: | ||||
| 
 | ||||
|     @tractor_test | ||||
|     async def test_whatever(): | ||||
|         await ... | ||||
| 
 | ||||
|     If fixtures: | ||||
| 
 | ||||
|         - ``arb_addr`` (a socket addr tuple where arbiter is listening) | ||||
|         - ``loglevel`` (logging level passed to tractor internals) | ||||
|         - ``start_method`` (subprocess spawning backend) | ||||
| 
 | ||||
|     are defined in the `pytest` fixture space they will be automatically | ||||
|     injected to tests declaring these funcargs. | ||||
|     """ | ||||
|     @wraps(fn) | ||||
|     def wrapper( | ||||
|         *args, | ||||
|         loglevel=None, | ||||
|         arb_addr=None, | ||||
|         start_method=None, | ||||
|         **kwargs | ||||
|     ): | ||||
|         # __tracebackhide__ = True | ||||
| 
 | ||||
|         if 'arb_addr' in inspect.signature(fn).parameters: | ||||
|             # injects test suite fixture value to test as well | ||||
|             # as `run()` | ||||
|             kwargs['arb_addr'] = arb_addr | ||||
| 
 | ||||
|         if 'loglevel' in inspect.signature(fn).parameters: | ||||
|             # allows test suites to define a 'loglevel' fixture | ||||
|             # that activates the internal logging | ||||
|             kwargs['loglevel'] = loglevel | ||||
| 
 | ||||
|         if start_method is None: | ||||
|             if platform.system() == "Windows": | ||||
|                 start_method = 'spawn' | ||||
|             else: | ||||
|                 start_method = 'trio' | ||||
| 
 | ||||
|         if 'start_method' in inspect.signature(fn).parameters: | ||||
|             # set of subprocess spawning backends | ||||
|             kwargs['start_method'] = start_method | ||||
| 
 | ||||
|         if kwargs: | ||||
| 
 | ||||
|             # use explicit root actor start | ||||
| 
 | ||||
|             async def _main(): | ||||
|                 async with tractor.open_root_actor( | ||||
|                     # **kwargs, | ||||
|                     arbiter_addr=arb_addr, | ||||
|                     loglevel=loglevel, | ||||
|                     start_method=start_method, | ||||
| 
 | ||||
|                     # TODO: only enable when pytest is passed --pdb | ||||
|                     # debug_mode=True, | ||||
| 
 | ||||
|                 ) as actor: | ||||
|                     await fn(*args, **kwargs) | ||||
| 
 | ||||
|             main = _main | ||||
| 
 | ||||
|         else: | ||||
|             # use implicit root actor start | ||||
|             main = partial(fn, *args, **kwargs) | ||||
| 
 | ||||
|         return trio.run(main) | ||||
|             # arbiter_addr=arb_addr, | ||||
|             # loglevel=loglevel, | ||||
|             # start_method=start_method, | ||||
|         # ) | ||||
| 
 | ||||
|     return wrapper | ||||
|  | @ -466,11 +466,11 @@ async def open_channel_from( | |||
|         ): | ||||
|             # sync to a "started()"-like first delivered value from the | ||||
|             # ``asyncio`` task. | ||||
|             first = await chan.receive() | ||||
| 
 | ||||
|             # deliver stream handle upward | ||||
|             try: | ||||
|                 with chan._trio_cs: | ||||
|                     first = await chan.receive() | ||||
| 
 | ||||
|                     # deliver stream handle upward | ||||
|                     yield first, chan | ||||
|             finally: | ||||
|                 chan._trio_exited = True | ||||
|  | @ -491,16 +491,18 @@ def run_as_asyncio_guest( | |||
|     SC semantics. | ||||
| 
 | ||||
|     ''' | ||||
|     # Uh, oh. :o | ||||
|     # Uh, oh. | ||||
|     # | ||||
|     # :o | ||||
| 
 | ||||
|     # It looks like your event loop has caught a case of the ``trio``s. | ||||
| 
 | ||||
|     # :() | ||||
| 
 | ||||
|     # Don't worry, we've heard you'll barely notice. You might hallucinate | ||||
|     # a few more propagating errors and feel like your digestion has | ||||
|     # slowed but if anything get's too bad your parents will know about | ||||
|     # it. | ||||
|     # Don't worry, we've heard you'll barely notice. You might | ||||
|     # hallucinate a few more propagating errors and feel like your | ||||
|     # digestion has slowed but if anything get's too bad your parents | ||||
|     # will know about it. | ||||
| 
 | ||||
|     # :) | ||||
| 
 | ||||
|  |  | |||
|  | @ -21,6 +21,7 @@ Sugary patterns for trio + tractor designs. | |||
| from ._mngrs import ( | ||||
|     gather_contexts, | ||||
|     maybe_open_context, | ||||
|     maybe_open_nursery, | ||||
| ) | ||||
| from ._broadcast import ( | ||||
|     broadcast_receiver, | ||||
|  | @ -35,4 +36,5 @@ __all__ = [ | |||
|     'BroadcastReceiver', | ||||
|     'Lagged', | ||||
|     'maybe_open_context', | ||||
|     'maybe_open_nursery', | ||||
| ] | ||||
|  |  | |||
|  | @ -23,7 +23,6 @@ from __future__ import annotations | |||
| from abc import abstractmethod | ||||
| from collections import deque | ||||
| from contextlib import asynccontextmanager | ||||
| from dataclasses import dataclass | ||||
| from functools import partial | ||||
| from operator import ne | ||||
| from typing import Optional, Callable, Awaitable, Any, AsyncIterator, Protocol | ||||
|  | @ -33,7 +32,10 @@ import trio | |||
| from trio._core._run import Task | ||||
| from trio.abc import ReceiveChannel | ||||
| from trio.lowlevel import current_task | ||||
| from msgspec import Struct | ||||
| from tractor.log import get_logger | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| # A regular invariant generic type | ||||
| T = TypeVar("T") | ||||
|  | @ -86,8 +88,7 @@ class Lagged(trio.TooSlowError): | |||
|     ''' | ||||
| 
 | ||||
| 
 | ||||
| @dataclass | ||||
| class BroadcastState: | ||||
| class BroadcastState(Struct): | ||||
|     ''' | ||||
|     Common state to all receivers of a broadcast. | ||||
| 
 | ||||
|  | @ -110,7 +111,35 @@ class BroadcastState: | |||
|     eoc: bool = False | ||||
| 
 | ||||
|     # If the broadcaster was cancelled, we might as well track it | ||||
|     cancelled: bool = False | ||||
|     cancelled: dict[int, Task] = {} | ||||
| 
 | ||||
|     def statistics(self) -> dict[str, Any]: | ||||
|         ''' | ||||
|         Return broadcast receiver group "statistics" like many of | ||||
|         ``trio``'s internal task-sync primitives. | ||||
| 
 | ||||
|         ''' | ||||
|         key: int | None | ||||
|         ev: trio.Event | None | ||||
| 
 | ||||
|         subs = self.subs | ||||
|         if self.recv_ready is not None: | ||||
|             key, ev = self.recv_ready | ||||
|         else: | ||||
|             key = ev = None | ||||
| 
 | ||||
|         qlens: dict[int, int] = {} | ||||
|         for tid, sz in subs.items(): | ||||
|             qlens[tid] = sz if sz != -1 else 0 | ||||
| 
 | ||||
|         return { | ||||
|             'open_consumers': len(subs), | ||||
|             'queued_len_by_task': qlens, | ||||
|             'max_buffer_size': self.maxlen, | ||||
|             'tasks_waiting': ev.statistics().tasks_waiting if ev else 0, | ||||
|             'tasks_cancelled': self.cancelled, | ||||
|             'next_value_receiver_id': key, | ||||
|         } | ||||
| 
 | ||||
| 
 | ||||
| class BroadcastReceiver(ReceiveChannel): | ||||
|  | @ -128,23 +157,40 @@ class BroadcastReceiver(ReceiveChannel): | |||
|         rx_chan: AsyncReceiver, | ||||
|         state: BroadcastState, | ||||
|         receive_afunc: Optional[Callable[[], Awaitable[Any]]] = None, | ||||
|         raise_on_lag: bool = True, | ||||
| 
 | ||||
|     ) -> None: | ||||
| 
 | ||||
|         # register the original underlying (clone) | ||||
|         self.key = id(self) | ||||
|         self._state = state | ||||
| 
 | ||||
|         # each consumer has an int count which indicates | ||||
|         # which index contains the next value that the task has not yet | ||||
|         # consumed and thus should read. In the "up-to-date" case the | ||||
|         # consumer task must wait for a new value from the underlying | ||||
|         # receiver and we use ``-1`` as the sentinel for this state. | ||||
|         state.subs[self.key] = -1 | ||||
| 
 | ||||
|         # underlying for this receiver | ||||
|         self._rx = rx_chan | ||||
|         self._recv = receive_afunc or rx_chan.receive | ||||
|         self._closed: bool = False | ||||
|         self._raise_on_lag = raise_on_lag | ||||
| 
 | ||||
|     async def receive(self) -> ReceiveType: | ||||
|     def receive_nowait( | ||||
|         self, | ||||
|         _key: int | None = None, | ||||
|         _state: BroadcastState | None = None, | ||||
| 
 | ||||
|         key = self.key | ||||
|         state = self._state | ||||
|     ) -> Any: | ||||
|         ''' | ||||
|         Sync version of `.receive()` which does all the low level work | ||||
|         of receiving from the underlying/wrapped receive channel. | ||||
| 
 | ||||
|         ''' | ||||
|         key = _key or self.key | ||||
|         state = _state or self._state | ||||
| 
 | ||||
|         # TODO: ideally we can make some way to "lock out" the | ||||
|         # underlying receive channel in some way such that if some task | ||||
|  | @ -177,128 +223,173 @@ class BroadcastReceiver(ReceiveChannel): | |||
|                 # return this value." | ||||
|                 # https://docs.rs/tokio/1.11.0/tokio/sync/broadcast/index.html#lagging | ||||
| 
 | ||||
|                 mxln = state.maxlen | ||||
|                 lost = seq - mxln | ||||
| 
 | ||||
|                 # decrement to the last value and expect | ||||
|                 # consumer to either handle the ``Lagged`` and come back | ||||
|                 # or bail out on its own (thus un-subscribing) | ||||
|                 state.subs[key] = state.maxlen - 1 | ||||
|                 state.subs[key] = mxln - 1 | ||||
| 
 | ||||
|                 # this task was overrun by the producer side | ||||
|                 task: Task = current_task() | ||||
|                 raise Lagged(f'Task {task.name} was overrun') | ||||
|                 msg = f'Task `{task.name}` overrun and dropped `{lost}` values' | ||||
| 
 | ||||
|                 if self._raise_on_lag: | ||||
|                     raise Lagged(msg) | ||||
|                 else: | ||||
|                     log.warning(msg) | ||||
|                     return self.receive_nowait(_key, _state) | ||||
| 
 | ||||
|             state.subs[key] -= 1 | ||||
|             return value | ||||
| 
 | ||||
|         # current task already has the latest value **and** is the | ||||
|         # first task to begin waiting for a new one | ||||
|         if state.recv_ready is None: | ||||
|         raise trio.WouldBlock | ||||
| 
 | ||||
|             if self._closed: | ||||
|                 raise trio.ClosedResourceError | ||||
|     async def _receive_from_underlying( | ||||
|         self, | ||||
|         key: int, | ||||
|         state: BroadcastState, | ||||
| 
 | ||||
|             event = trio.Event() | ||||
|             state.recv_ready = key, event | ||||
|     ) -> ReceiveType: | ||||
| 
 | ||||
|         if self._closed: | ||||
|             raise trio.ClosedResourceError | ||||
| 
 | ||||
|         event = trio.Event() | ||||
|         assert state.recv_ready is None | ||||
|         state.recv_ready = key, event | ||||
| 
 | ||||
|         try: | ||||
|             # if we're cancelled here it should be | ||||
|             # fine to bail without affecting any other consumers | ||||
|             # right? | ||||
|             try: | ||||
|                 value = await self._recv() | ||||
|             value = await self._recv() | ||||
| 
 | ||||
|                 # items with lower indices are "newer" | ||||
|                 # NOTE: ``collections.deque`` implicitly takes care of | ||||
|                 # trucating values outside our ``state.maxlen``. In the | ||||
|                 # alt-backend-array-case we'll need to make sure this is | ||||
|                 # implemented in similar ringer-buffer-ish style. | ||||
|                 state.queue.appendleft(value) | ||||
|             # items with lower indices are "newer" | ||||
|             # NOTE: ``collections.deque`` implicitly takes care of | ||||
|             # trucating values outside our ``state.maxlen``. In the | ||||
|             # alt-backend-array-case we'll need to make sure this is | ||||
|             # implemented in similar ringer-buffer-ish style. | ||||
|             state.queue.appendleft(value) | ||||
| 
 | ||||
|                 # broadcast new value to all subscribers by increasing | ||||
|                 # all sequence numbers that will point in the queue to | ||||
|                 # their latest available value. | ||||
|             # broadcast new value to all subscribers by increasing | ||||
|             # all sequence numbers that will point in the queue to | ||||
|             # their latest available value. | ||||
| 
 | ||||
|                 # don't decrement the sequence for this task since we | ||||
|                 # already retreived the last value | ||||
|             # don't decrement the sequence for this task since we | ||||
|             # already retreived the last value | ||||
| 
 | ||||
|                 # XXX: which of these impls is fastest? | ||||
|             # XXX: which of these impls is fastest? | ||||
|             # subs = state.subs.copy() | ||||
|             # subs.pop(key) | ||||
| 
 | ||||
|                 # subs = state.subs.copy() | ||||
|                 # subs.pop(key) | ||||
| 
 | ||||
|                 for sub_key in filter( | ||||
|                     # lambda k: k != key, state.subs, | ||||
|                     partial(ne, key), state.subs, | ||||
|                 ): | ||||
|                     state.subs[sub_key] += 1 | ||||
| 
 | ||||
|                 # NOTE: this should ONLY be set if the above task was *NOT* | ||||
|                 # cancelled on the `._recv()` call. | ||||
|                 event.set() | ||||
|                 return value | ||||
| 
 | ||||
|             except trio.EndOfChannel: | ||||
|                 # if any one consumer gets an EOC from the underlying | ||||
|                 # receiver we need to unblock and send that signal to | ||||
|                 # all other consumers. | ||||
|                 self._state.eoc = True | ||||
|                 if event.statistics().tasks_waiting: | ||||
|                     event.set() | ||||
|                 raise | ||||
| 
 | ||||
|             except ( | ||||
|                 trio.Cancelled, | ||||
|             for sub_key in filter( | ||||
|                 # lambda k: k != key, state.subs, | ||||
|                 partial(ne, key), state.subs, | ||||
|             ): | ||||
|                 # handle cancelled specially otherwise sibling | ||||
|                 # consumers will be awoken with a sequence of -1 | ||||
|                 # and will potentially try to rewait the underlying | ||||
|                 # receiver instead of just cancelling immediately. | ||||
|                 self._state.cancelled = True | ||||
|                 if event.statistics().tasks_waiting: | ||||
|                     event.set() | ||||
|                 raise | ||||
|                 state.subs[sub_key] += 1 | ||||
| 
 | ||||
|             finally: | ||||
|             # NOTE: this should ONLY be set if the above task was *NOT* | ||||
|             # cancelled on the `._recv()` call. | ||||
|             event.set() | ||||
|             return value | ||||
| 
 | ||||
|                 # Reset receiver waiter task event for next blocking condition. | ||||
|                 # this MUST be reset even if the above ``.recv()`` call | ||||
|                 # was cancelled to avoid the next consumer from blocking on | ||||
|                 # an event that won't be set! | ||||
|                 state.recv_ready = None | ||||
|         except trio.EndOfChannel: | ||||
|             # if any one consumer gets an EOC from the underlying | ||||
|             # receiver we need to unblock and send that signal to | ||||
|             # all other consumers. | ||||
|             self._state.eoc = True | ||||
|             if event.statistics().tasks_waiting: | ||||
|                 event.set() | ||||
|             raise | ||||
| 
 | ||||
|         except ( | ||||
|             trio.Cancelled, | ||||
|         ): | ||||
|             # handle cancelled specially otherwise sibling | ||||
|             # consumers will be awoken with a sequence of -1 | ||||
|             # and will potentially try to rewait the underlying | ||||
|             # receiver instead of just cancelling immediately. | ||||
|             self._state.cancelled[key] = current_task() | ||||
|             if event.statistics().tasks_waiting: | ||||
|                 event.set() | ||||
|             raise | ||||
| 
 | ||||
|         finally: | ||||
|             # Reset receiver waiter task event for next blocking condition. | ||||
|             # this MUST be reset even if the above ``.recv()`` call | ||||
|             # was cancelled to avoid the next consumer from blocking on | ||||
|             # an event that won't be set! | ||||
|             state.recv_ready = None | ||||
| 
 | ||||
|     async def receive(self) -> ReceiveType: | ||||
|         key = self.key | ||||
|         state = self._state | ||||
| 
 | ||||
|         try: | ||||
|             return self.receive_nowait( | ||||
|                 _key=key, | ||||
|                 _state=state, | ||||
|             ) | ||||
|         except trio.WouldBlock: | ||||
|             pass | ||||
| 
 | ||||
|         # current task already has the latest value **and** is the | ||||
|         # first task to begin waiting for a new one so we begin blocking | ||||
|         # until rescheduled with the a new value from the underlying. | ||||
|         if state.recv_ready is None: | ||||
|             return await self._receive_from_underlying(key, state) | ||||
| 
 | ||||
|         # This task is all caught up and ready to receive the latest | ||||
|         # value, so queue sched it on the internal event. | ||||
|         # value, so queue/schedule it to be woken on the next internal | ||||
|         # event. | ||||
|         else: | ||||
|             seq = state.subs[key] | ||||
|             assert seq == -1  # sanity | ||||
|             _, ev = state.recv_ready | ||||
|             await ev.wait() | ||||
|             while state.recv_ready is not None: | ||||
|                 # seq = state.subs[key] | ||||
|                 # assert seq == -1  # sanity | ||||
|                 _, ev = state.recv_ready | ||||
|                 await ev.wait() | ||||
|                 try: | ||||
|                     return self.receive_nowait( | ||||
|                         _key=key, | ||||
|                         _state=state, | ||||
|                     ) | ||||
|                 except trio.WouldBlock: | ||||
|                     if self._closed: | ||||
|                         raise trio.ClosedResourceError | ||||
| 
 | ||||
|             # NOTE: if we ever would like the behaviour where if the | ||||
|             # first task to recv on the underlying is cancelled but it | ||||
|             # still DOES trigger the ``.recv_ready``, event we'll likely need | ||||
|             # this logic: | ||||
|                     subs = state.subs | ||||
|                     if ( | ||||
|                         len(subs) == 1 | ||||
|                         and key in subs | ||||
|                         # or cancelled | ||||
|                     ): | ||||
|                         # XXX: we are the last and only user of this BR so | ||||
|                         # likely it makes sense to unwind back to the | ||||
|                         # underlying? | ||||
|                         # import tractor | ||||
|                         # await tractor.breakpoint() | ||||
|                         log.warning( | ||||
|                             f'Only one sub left for {self}?\n' | ||||
|                             'We can probably unwind from breceiver?' | ||||
|                         ) | ||||
| 
 | ||||
|             if seq > -1: | ||||
|                 # stuff from above.. | ||||
|                 seq = state.subs[key] | ||||
|                     # XXX: In the case where the first task to allocate the | ||||
|                     # ``.recv_ready`` event is cancelled we will be woken | ||||
|                     # with a non-incremented sequence number (the ``-1`` | ||||
|                     # sentinel) and thus will read the oldest value if we | ||||
|                     # use that. Instead we need to detect if we have not | ||||
|                     # been incremented and then receive again. | ||||
|                     # return await self.receive() | ||||
| 
 | ||||
|                 value = state.queue[seq] | ||||
|                 state.subs[key] -= 1 | ||||
|                 return value | ||||
| 
 | ||||
|             elif seq == -1: | ||||
|                 # XXX: In the case where the first task to allocate the | ||||
|                 # ``.recv_ready`` event is cancelled we will be woken with | ||||
|                 # a non-incremented sequence number and thus will read the | ||||
|                 # oldest value if we use that. Instead we need to detect if | ||||
|                 # we have not been incremented and then receive again. | ||||
|                 return await self.receive() | ||||
| 
 | ||||
|             else: | ||||
|                 raise ValueError(f'Invalid sequence {seq}!?') | ||||
|             return await self._receive_from_underlying(key, state) | ||||
| 
 | ||||
|     @asynccontextmanager | ||||
|     async def subscribe( | ||||
|         self, | ||||
|         raise_on_lag: bool = True, | ||||
| 
 | ||||
|     ) -> AsyncIterator[BroadcastReceiver]: | ||||
|         ''' | ||||
|         Subscribe for values from this broadcast receiver. | ||||
|  | @ -316,6 +407,7 @@ class BroadcastReceiver(ReceiveChannel): | |||
|             rx_chan=self._rx, | ||||
|             state=state, | ||||
|             receive_afunc=self._recv, | ||||
|             raise_on_lag=raise_on_lag, | ||||
|         ) | ||||
|         # assert clone in state.subs | ||||
|         assert br.key in state.subs | ||||
|  | @ -352,7 +444,8 @@ def broadcast_receiver( | |||
| 
 | ||||
|     recv_chan: AsyncReceiver, | ||||
|     max_buffer_size: int, | ||||
|     **kwargs, | ||||
|     receive_afunc: Optional[Callable[[], Awaitable[Any]]] = None, | ||||
|     raise_on_lag: bool = True, | ||||
| 
 | ||||
| ) -> BroadcastReceiver: | ||||
| 
 | ||||
|  | @ -363,5 +456,6 @@ def broadcast_receiver( | |||
|             maxlen=max_buffer_size, | ||||
|             subs={}, | ||||
|         ), | ||||
|         **kwargs, | ||||
|         receive_afunc=receive_afunc, | ||||
|         raise_on_lag=raise_on_lag, | ||||
|     ) | ||||
|  |  | |||
|  | @ -19,6 +19,7 @@ Async context manager primitives with hard ``trio``-aware semantics | |||
| 
 | ||||
| ''' | ||||
| from contextlib import asynccontextmanager as acm | ||||
| import inspect | ||||
| from typing import ( | ||||
|     Any, | ||||
|     AsyncContextManager, | ||||
|  | @ -34,8 +35,8 @@ from typing import ( | |||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| 
 | ||||
| from ..log import get_logger | ||||
| from .._state import current_actor | ||||
| from ..log import get_logger | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
|  | @ -44,6 +45,25 @@ log = get_logger(__name__) | |||
| T = TypeVar("T") | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def maybe_open_nursery( | ||||
|     nursery: trio.Nursery | None = None, | ||||
|     shield: bool = False, | ||||
| ) -> AsyncGenerator[trio.Nursery, Any]: | ||||
|     ''' | ||||
|     Create a new nursery if None provided. | ||||
| 
 | ||||
|     Blocks on exit as expected if no input nursery is provided. | ||||
| 
 | ||||
|     ''' | ||||
|     if nursery is not None: | ||||
|         yield nursery | ||||
|     else: | ||||
|         async with trio.open_nursery() as nursery: | ||||
|             nursery.cancel_scope.shield = shield | ||||
|             yield nursery | ||||
| 
 | ||||
| 
 | ||||
| async def _enter_and_wait( | ||||
| 
 | ||||
|     mngr: AsyncContextManager[T], | ||||
|  | @ -81,7 +101,7 @@ async def gather_contexts( | |||
|     This function is somewhat similar to common usage of | ||||
|     ``contextlib.AsyncExitStack.enter_async_context()`` (in a loop) in | ||||
|     combo with ``asyncio.gather()`` except the managers are concurrently | ||||
|     entered and exited cancellation just works. | ||||
|     entered and exited, and cancellation just works. | ||||
| 
 | ||||
|     ''' | ||||
|     unwrapped: dict[int, Optional[T]] = {}.fromkeys(id(mngr) for mngr in mngrs) | ||||
|  | @ -89,6 +109,17 @@ async def gather_contexts( | |||
|     all_entered = trio.Event() | ||||
|     parent_exit = trio.Event() | ||||
| 
 | ||||
|     # XXX: ensure greedy sequence of manager instances | ||||
|     # since a lazy inline generator doesn't seem to work | ||||
|     # with `async with` syntax. | ||||
|     mngrs = list(mngrs) | ||||
| 
 | ||||
|     if not mngrs: | ||||
|         raise ValueError( | ||||
|             'input mngrs is empty?\n' | ||||
|             'Did try to use inline generator syntax?' | ||||
|         ) | ||||
| 
 | ||||
|     async with trio.open_nursery() as n: | ||||
|         for mngr in mngrs: | ||||
|             n.start_soon( | ||||
|  | @ -102,11 +133,13 @@ async def gather_contexts( | |||
|         # deliver control once all managers have started up | ||||
|         await all_entered.wait() | ||||
| 
 | ||||
|         yield tuple(unwrapped.values()) | ||||
| 
 | ||||
|         # we don't need a try/finally since cancellation will be triggered | ||||
|         # by the surrounding nursery on error. | ||||
|         parent_exit.set() | ||||
|         try: | ||||
|             yield tuple(unwrapped.values()) | ||||
|         finally: | ||||
|             # NOTE: this is ABSOLUTELY REQUIRED to avoid | ||||
|             # the following wacky bug: | ||||
|             # <tractorbugurlhere> | ||||
|             parent_exit.set() | ||||
| 
 | ||||
| 
 | ||||
| # Per actor task caching helpers. | ||||
|  | @ -119,13 +152,15 @@ class _Cache: | |||
|     a kept-alive-while-in-use async resource. | ||||
| 
 | ||||
|     ''' | ||||
|     lock = trio.Lock() | ||||
|     service_n: Optional[trio.Nursery] = None | ||||
|     locks: dict[Hashable, trio.Lock] = {} | ||||
|     users: int = 0 | ||||
|     values: dict[Any,  Any] = {} | ||||
|     resources: dict[ | ||||
|         Hashable, | ||||
|         tuple[trio.Nursery, trio.Event] | ||||
|     ] = {} | ||||
|     # nurseries: dict[int, trio.Nursery] = {} | ||||
|     no_more_users: Optional[trio.Event] = None | ||||
| 
 | ||||
|     @classmethod | ||||
|  | @ -156,7 +191,7 @@ async def maybe_open_context( | |||
|     # XXX: used as cache key after conversion to tuple | ||||
|     # and all embedded values must also be hashable | ||||
|     kwargs: dict = {}, | ||||
|     key: Hashable = None, | ||||
|     key: Hashable | Callable[..., Hashable] = None, | ||||
| 
 | ||||
| ) -> AsyncIterator[tuple[bool, T]]: | ||||
|     ''' | ||||
|  | @ -165,51 +200,69 @@ async def maybe_open_context( | |||
|     _Cached instance on a _Cache hit. | ||||
| 
 | ||||
|     ''' | ||||
|     # lock resource acquisition around task racing  / ``trio``'s | ||||
|     # scheduler protocol | ||||
|     await _Cache.lock.acquire() | ||||
|     fid = id(acm_func) | ||||
| 
 | ||||
|     ctx_key = (id(acm_func), key or tuple(kwargs.items())) | ||||
|     value = None | ||||
|     if inspect.isfunction(key): | ||||
|         ctx_key = (fid, key(**kwargs)) | ||||
|     else: | ||||
|         ctx_key = (fid, key or tuple(kwargs.items())) | ||||
| 
 | ||||
|     # yielded output | ||||
|     yielded: Any = None | ||||
| 
 | ||||
|     # Lock resource acquisition around task racing  / ``trio``'s | ||||
|     # scheduler protocol. | ||||
|     # NOTE: the lock is target context manager func specific in order | ||||
|     # to allow re-entrant use cases where one `maybe_open_context()` | ||||
|     # wrapped factor may want to call into another. | ||||
|     lock = _Cache.locks.setdefault(fid, trio.Lock()) | ||||
|     await lock.acquire() | ||||
| 
 | ||||
|     # XXX: one singleton nursery per actor and we want to | ||||
|     # have it not be closed until all consumers have exited (which is | ||||
|     # currently difficult to implement any other way besides using our | ||||
|     # pre-allocated runtime instance..) | ||||
|     service_n: trio.Nursery = current_actor()._service_n | ||||
| 
 | ||||
|     # TODO: is there any way to allocate | ||||
|     # a 'stays-open-till-last-task-finshed nursery? | ||||
|     # service_n: trio.Nursery | ||||
|     # async with maybe_open_nursery(_Cache.service_n) as service_n: | ||||
|     #     _Cache.service_n = service_n | ||||
| 
 | ||||
|     try: | ||||
|         # **critical section** that should prevent other tasks from | ||||
|         # checking the _Cache until complete otherwise the scheduler | ||||
|         # may switch and by accident we create more then one resource. | ||||
|         value = _Cache.values[ctx_key] | ||||
|         yielded = _Cache.values[ctx_key] | ||||
| 
 | ||||
|     except KeyError: | ||||
|         log.info(f'Allocating new {acm_func} for {ctx_key}') | ||||
| 
 | ||||
|         mngr = acm_func(**kwargs) | ||||
|         # TODO: avoid pulling from ``tractor`` internals and | ||||
|         # instead offer a "root nursery" in piker actors? | ||||
|         service_n = current_actor()._service_n | ||||
| 
 | ||||
|         # TODO: does this need to be a tractor "root nursery"? | ||||
|         resources = _Cache.resources | ||||
|         assert not resources.get(ctx_key), f'Resource exists? {ctx_key}' | ||||
|         ln, _ = resources[ctx_key] = (service_n, trio.Event()) | ||||
|         resources[ctx_key] = (service_n, trio.Event()) | ||||
| 
 | ||||
|         value = await ln.start( | ||||
|         # sync up to the mngr's yielded value | ||||
|         yielded = await service_n.start( | ||||
|             _Cache.run_ctx, | ||||
|             mngr, | ||||
|             ctx_key, | ||||
|         ) | ||||
|         _Cache.users += 1 | ||||
|         _Cache.lock.release() | ||||
|         yield False, value | ||||
|         lock.release() | ||||
|         yield False, yielded | ||||
| 
 | ||||
|     else: | ||||
|         log.info(f'Reusing _Cached resource for {ctx_key}') | ||||
|         _Cache.users += 1 | ||||
|         _Cache.lock.release() | ||||
|         yield True, value | ||||
|         lock.release() | ||||
|         yield True, yielded | ||||
| 
 | ||||
|     finally: | ||||
|         _Cache.users -= 1 | ||||
| 
 | ||||
|         if value is not None: | ||||
|         if yielded is not None: | ||||
|             # if no more consumers, teardown the client | ||||
|             if _Cache.users <= 0: | ||||
|                 log.info(f'De-allocating resource for {ctx_key}') | ||||
|  | @ -221,3 +274,5 @@ async def maybe_open_context( | |||
|                 if entry: | ||||
|                     _, no_more_users = entry | ||||
|                     no_more_users.set() | ||||
| 
 | ||||
|                 _Cache.locks.pop(fid) | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue