Compare commits
	
		
			60 Commits 
		
	
	
		
			master
			...
			sigintsavi
		
	
	| Author | SHA1 | Date | 
|---|---|---|
|  | fec2ba004c | |
|  | ba9c914221 | |
|  | 30ee3f2dcc | |
|  | 0b4fc4fc47 | |
|  | 6b8c193221 | |
|  | 05167bdc70 | |
|  | fa21083b51 | |
|  | e6ad7a117b | |
|  | 4366873582 | |
|  | 9e6a22e52e | |
|  | 0ab49cd244 | |
|  | 3fafa87ea9 | |
|  | a6f5b9396a | |
|  | 61af2dc5aa | |
|  | ba857fe85c | |
|  | cb221b9e7c | |
|  | 3bc4778950 | |
|  | 5ae21e4753 | |
|  | d4a36e57d1 | |
|  | 58956ae950 | |
|  | a864f1e729 | |
|  | a4bc5f79ad | |
|  | c132b7f624 | |
|  | b659326d5b | |
|  | d971e9bc9d | |
|  | 611120c67c | |
|  | 7f6cace40b | |
|  | fe4adbf6f0 | |
|  | 6ccfeb17d5 | |
|  | 9bed332a94 | |
|  | 13df959d90 | |
|  | d0074291a1 | |
|  | 8559ad69f3 | |
|  | e519df1bd2 | |
|  | 24fd87d969 | |
|  | 91054a8a42 | |
|  | cdc7bf6549 | |
|  | c865d01e85 | |
|  | e1caeeb8de | |
|  | 7c25aa176f | |
|  | 3b7985292f | |
|  | e8fc820b92 | |
|  | b2fdbc44d1 | |
|  | f7823a46b8 | |
|  | f76c809c39 | |
|  | 9e56881163 | |
|  | 8291ee09b3 | |
|  | 4a441f0988 | |
|  | df0108a0bb | |
|  | 8537e17251 | |
|  | 20acb50d94 | |
|  | eab895864f | |
|  | 65a9f69d6c | |
|  | 24b6cc0209 | |
|  | f488db6d8d | |
|  | c5d335c057 | |
|  | 4594fe3501 | |
|  | 5f0262fd98 | |
|  | 59e7f29eed | |
|  | e2dfd6e99d | 
|  | @ -26,10 +26,8 @@ jobs: | |||
|         run: pip install -U . --upgrade-strategy eager -r requirements-test.txt | ||||
| 
 | ||||
|       - name: Run MyPy check | ||||
|         run: mypy tractor/ --ignore-missing-imports --show-traceback | ||||
|         run: mypy tractor/ --ignore-missing-imports | ||||
| 
 | ||||
|   # test that we can generate a software distribution and install it | ||||
|   # thus avoid missing file issues after packaging. | ||||
|   sdist-linux: | ||||
|     name: 'sdist' | ||||
|     runs-on: ubuntu-latest | ||||
|  | @ -59,12 +57,8 @@ jobs: | |||
|       fail-fast: false | ||||
|       matrix: | ||||
|         os: [ubuntu-latest] | ||||
|         python: ['3.10'] | ||||
|         spawn_backend: [ | ||||
|           'trio', | ||||
|           'mp_spawn', | ||||
|           'mp_forkserver', | ||||
|         ] | ||||
|         python: ['3.9', '3.10'] | ||||
|         spawn_backend: ['trio', 'mp'] | ||||
| 
 | ||||
|     steps: | ||||
| 
 | ||||
|  | @ -79,53 +73,42 @@ jobs: | |||
|       - name: Install dependencies | ||||
|         run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||
| 
 | ||||
|       - name: List dependencies | ||||
|         run: pip list | ||||
| 
 | ||||
|       - name: Run tests | ||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx | ||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs -v | ||||
| 
 | ||||
|   # We skip 3.10 on windows for now due to not having any collabs to | ||||
|   # debug the CI failures. Anyone wanting to hack and solve them is very | ||||
|   # welcome, but our primary user base is not using that OS. | ||||
|   # We skip 3.10 on windows for now due to | ||||
|   # https://github.com/pytest-dev/pytest/issues/8733 | ||||
|   # some kinda weird `pyreadline` issue.. | ||||
| 
 | ||||
|   # TODO: use job filtering to accomplish instead of repeated | ||||
|   # boilerplate as is above XD: | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix | ||||
|   # - https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#jobsjob_idif | ||||
|   # testing-windows: | ||||
|   #   name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' | ||||
|   #   timeout-minutes: 12 | ||||
|   #   runs-on: ${{ matrix.os }} | ||||
|   testing-windows: | ||||
|     name: '${{ matrix.os }} Python ${{ matrix.python }} - ${{ matrix.spawn_backend }}' | ||||
|     timeout-minutes: 12 | ||||
|     runs-on: ${{ matrix.os }} | ||||
| 
 | ||||
|   #   strategy: | ||||
|   #     fail-fast: false | ||||
|   #     matrix: | ||||
|   #       os: [windows-latest] | ||||
|   #       python: ['3.10'] | ||||
|   #       spawn_backend: ['trio', 'mp'] | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         os: [windows-latest] | ||||
|         python: ['3.9', '3.10'] | ||||
|         spawn_backend: ['trio', 'mp'] | ||||
| 
 | ||||
|   #   steps: | ||||
|     steps: | ||||
| 
 | ||||
|   #     - name: Checkout | ||||
|   #       uses: actions/checkout@v2 | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v2 | ||||
| 
 | ||||
|   #     - name: Setup python | ||||
|   #       uses: actions/setup-python@v2 | ||||
|   #       with: | ||||
|   #         python-version: '${{ matrix.python }}' | ||||
|       - name: Setup python | ||||
|         uses: actions/setup-python@v2 | ||||
|         with: | ||||
|           python-version: '${{ matrix.python }}' | ||||
| 
 | ||||
|   #     - name: Install dependencies | ||||
|   #       run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||
|       - name: Install dependencies | ||||
|         run: pip install -U . -r requirements-test.txt -r requirements-docs.txt --upgrade-strategy eager | ||||
| 
 | ||||
|   #     # TODO: pretty sure this solves debugger deps-issues on windows, but it needs to | ||||
|   #     # be verified by someone with a native setup. | ||||
|   #     # - name: Force pyreadline3 | ||||
|   #     #   run: pip uninstall pyreadline; pip install -U pyreadline3 | ||||
| 
 | ||||
|   #     - name: List dependencies | ||||
|   #       run: pip list | ||||
| 
 | ||||
|   #     - name: Run tests | ||||
|   #       run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rsx | ||||
|       - name: Run tests | ||||
|         run: pytest tests/ --spawn-backend=${{ matrix.spawn_backend }} -rs --full-trace | ||||
|  |  | |||
|  | @ -0,0 +1,7 @@ | |||
| Add ``tractor.query_actor()`` an addr looker-upper which doesn't deliver | ||||
| a ``Portal`` instance and instead just a socket address ``tuple``. | ||||
| 
 | ||||
| Sometimes it's handy to just have a simple way to figure out if | ||||
| a "service" actor is up, so add this discovery helper for that. We'll | ||||
| prolly just leave it undocumented for now until we figure out | ||||
| a longer-term/better discovery system. | ||||
							
								
								
									
										142
									
								
								NEWS.rst
								
								
								
								
							
							
						
						
									
										142
									
								
								NEWS.rst
								
								
								
								
							|  | @ -4,148 +4,6 @@ Changelog | |||
| 
 | ||||
| .. towncrier release notes start | ||||
| 
 | ||||
| tractor 0.1.0a5 (2022-08-03) | ||||
| ============================ | ||||
| 
 | ||||
| This is our final release supporting Python 3.9 since we will be moving | ||||
| internals to the new `match:` syntax from 3.10 going forward and | ||||
| further, we have officially dropped usage of the `msgpack` library and | ||||
| happily adopted `msgspec`. | ||||
| 
 | ||||
| Features | ||||
| -------- | ||||
| 
 | ||||
| - `#165 <https://github.com/goodboy/tractor/issues/165>`_: Add SIGINT | ||||
|   protection to our `pdbpp` based debugger subystem such that for | ||||
|   (single-depth) actor trees in debug mode we ignore interrupts in any | ||||
|   actor currently holding the TTY lock thus avoiding clobbering IPC | ||||
|   connections and/or task and process state when working in the REPL. | ||||
| 
 | ||||
|   As a big note currently so called "nested" actor trees (trees with | ||||
|   actors having more then one parent/ancestor) are not fully supported | ||||
|   since we don't yet have a mechanism to relay the debug mode knowledge | ||||
|   "up" the actor tree (for eg. when handling a crash in a leaf actor). | ||||
|   As such currently there is a set of tests and known scenarios which will | ||||
|   result in process cloberring by the zombie repaing machinery and these | ||||
|   have been documented in https://github.com/goodboy/tractor/issues/320. | ||||
| 
 | ||||
|   The implementation details include: | ||||
| 
 | ||||
|   - utilizing a custom SIGINT handler which we apply whenever an actor's | ||||
|     runtime enters the debug machinery, which we also make sure the | ||||
|     stdlib's `pdb` configuration doesn't override (which it does by | ||||
|     default without special instance config). | ||||
|   - litter the runtime with `maybe_wait_for_debugger()` mostly in spots | ||||
|     where the root actor should block before doing embedded nursery | ||||
|     teardown ops which both cancel potential-children-in-deubg as well | ||||
|     as eventually trigger zombie reaping machinery. | ||||
|   - hardening of the TTY locking semantics/API both in terms of IPC | ||||
|     terminations and cancellation and lock release determinism from | ||||
|     sync debugger instance methods. | ||||
|   - factoring of locking infrastructure into a new `._debug.Lock` global | ||||
|     which encapsulates all details of the ``trio`` sync primitives and | ||||
|     task/actor uid management and tracking. | ||||
| 
 | ||||
|   We also add `ctrl-c` cases throughout the test suite though these are | ||||
|   disabled for py3.9 (`pdbpp` UX differences that don't seem worth | ||||
|   compensating for, especially since this will be our last 3.9 supported | ||||
|   release) and there are a slew of marked cases that aren't expected to | ||||
|   work in CI more generally (as mentioned in the "nested" tree note | ||||
|   above) despite seemingly working  when run manually on linux. | ||||
| 
 | ||||
| - `#304 <https://github.com/goodboy/tractor/issues/304>`_: Add a new | ||||
|   ``to_asyncio.LinkedTaskChannel.subscribe()`` which gives task-oriented | ||||
|   broadcast functionality semantically equivalent to | ||||
|   ``tractor.MsgStream.subscribe()`` this makes it possible for multiple | ||||
|   ``trio``-side tasks to consume ``asyncio``-side task msgs in tandem. | ||||
| 
 | ||||
|   Further Improvements to the test suite were added in this patch set | ||||
|   including a new scenario test for a sub-actor managed "service nursery" | ||||
|   (implementing the basics of a "service manager") including use of | ||||
|   *infected asyncio* mode. Further we added a lower level | ||||
|   ``test_trioisms.py`` to start to track issues we need to work around in | ||||
|   ``trio`` itself which in this case included a bug we were trying to | ||||
|   solve related to https://github.com/python-trio/trio/issues/2258. | ||||
| 
 | ||||
| 
 | ||||
| Bug Fixes | ||||
| --------- | ||||
| 
 | ||||
| - `#318 <https://github.com/goodboy/tractor/issues/318>`_: Fix | ||||
|   a previously undetected ``trio``-``asyncio`` task lifetime linking | ||||
|   issue with the ``to_asyncio.open_channel_from()`` api where both sides | ||||
|   where not properly waiting/signalling termination and it was possible | ||||
|   for ``asyncio``-side errors to not propagate due to a race condition. | ||||
| 
 | ||||
|   The implementation fix summary is: | ||||
|   - add state to signal the end of the ``trio`` side task to be | ||||
|     read by the ``asyncio`` side and always cancel any ongoing | ||||
|     task in such cases. | ||||
|   - always wait on the ``asyncio`` task termination from the ``trio`` | ||||
|     side on error before maybe raising said error. | ||||
|   - always close the ``trio`` mem chan on exit to ensure the other | ||||
|     side can detect it and follow. | ||||
| 
 | ||||
| 
 | ||||
| Trivial/Internal Changes | ||||
| ------------------------ | ||||
| 
 | ||||
| - `#248 <https://github.com/goodboy/tractor/issues/248>`_: Adjust the | ||||
|   `tractor._spawn.soft_wait()` strategy to avoid sending an actor cancel | ||||
|   request (via `Portal.cancel_actor()`) if either the child process is | ||||
|   detected as having terminated or the IPC channel is detected to be | ||||
|   closed. | ||||
| 
 | ||||
|   This ensures (even) more deterministic inter-actor cancellation by | ||||
|   avoiding the timeout condition where possible when a whild never | ||||
|   sucessfully spawned, crashed, or became un-contactable over IPC. | ||||
| 
 | ||||
| - `#295 <https://github.com/goodboy/tractor/issues/295>`_: Add an | ||||
|   experimental ``tractor.msg.NamespacePath`` type for passing Python | ||||
|   objects by "reference" through a ``str``-subtype message and using the | ||||
|   new ``pkgutil.resolve_name()`` for reference loading. | ||||
| 
 | ||||
| - `#298 <https://github.com/goodboy/tractor/issues/298>`_: Add a new | ||||
|   `tractor.experimental` subpackage for staging new high level APIs and | ||||
|   subystems that we might eventually make built-ins. | ||||
| 
 | ||||
| - `#300 <https://github.com/goodboy/tractor/issues/300>`_: Update to and | ||||
|   pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) both of which | ||||
|   required adjustments for backwards imcompatible API tweaks. | ||||
| 
 | ||||
| - `#303 <https://github.com/goodboy/tractor/issues/303>`_: Fence off | ||||
|   ``multiprocessing`` imports until absolutely necessary in an effort to | ||||
|   avoid "resource tracker" spawning side effects that seem to have | ||||
|   varying degrees of unreliability per Python release. Port to new | ||||
|   ``msgspec.DecodeError``. | ||||
| 
 | ||||
| - `#305 <https://github.com/goodboy/tractor/issues/305>`_: Add | ||||
|   ``tractor.query_actor()`` an addr looker-upper which doesn't deliver | ||||
|   a ``Portal`` instance and instead just a socket address ``tuple``. | ||||
| 
 | ||||
|   Sometimes it's handy to just have a simple way to figure out if | ||||
|   a "service" actor is up, so add this discovery helper for that. We'll | ||||
|   prolly just leave it undocumented for now until we figure out | ||||
|   a longer-term/better discovery system. | ||||
| 
 | ||||
| - `#316 <https://github.com/goodboy/tractor/issues/316>`_: Run windows | ||||
|   CI jobs on python 3.10 after some hacks for ``pdbpp`` dependency | ||||
|   issues. | ||||
| 
 | ||||
|   Issue was to do with the now deprecated `pyreadline` project which | ||||
|   should be changed over to `pyreadline3`. | ||||
| 
 | ||||
| - `#317 <https://github.com/goodboy/tractor/issues/317>`_: Drop use of | ||||
|   the ``msgpack`` package and instead move fully to the ``msgspec`` | ||||
|   codec library. | ||||
| 
 | ||||
|   We've now used ``msgspec`` extensively in production and there's no | ||||
|   reason to not use it as default. Further this change preps us for the up | ||||
|   and coming typed messaging semantics (#196), dialog-unprotocol system | ||||
|   (#297), and caps-based messaging-protocols (#299) planned before our | ||||
|   first beta. | ||||
| 
 | ||||
| 
 | ||||
| tractor 0.1.0a4 (2021-12-18) | ||||
| ============================ | ||||
| 
 | ||||
|  |  | |||
|  | @ -6,14 +6,8 @@ | |||
| ``tractor`` is a `structured concurrent`_, multi-processing_ runtime | ||||
| built on trio_. | ||||
| 
 | ||||
| Fundamentally, ``tractor`` gives you parallelism via | ||||
| ``trio``-"*actors*": independent Python processes (aka | ||||
| non-shared-memory threads) which maintain structured | ||||
| concurrency (SC) *end-to-end* inside a *supervision tree*. | ||||
| 
 | ||||
| Cross-process (and thus cross-host) SC is accomplished through the | ||||
| combined use of our "actor nurseries_" and an "SC-transitive IPC | ||||
| protocol" constructed on top of multiple Pythons each running a ``trio`` | ||||
| Fundamentally ``tractor`` gives you parallelism via ``trio``-"*actors*": | ||||
| our nurseries_ let you spawn new Python processes which each run a ``trio`` | ||||
| scheduled runtime - a call to ``trio.run()``. | ||||
| 
 | ||||
| We believe the system adheres to the `3 axioms`_ of an "`actor model`_" | ||||
|  | @ -29,8 +23,7 @@ Features | |||
| - **It's just** a ``trio`` API | ||||
| - *Infinitely nesteable* process trees | ||||
| - Builtin IPC streaming APIs with task fan-out broadcasting | ||||
| - A "native" multi-core debugger REPL using `pdbp`_ (a fork & fix of | ||||
|   `pdb++`_ thanks to @mdmintz!) | ||||
| - A (first ever?) "native" multi-core debugger UX for Python using `pdb++`_ | ||||
| - Support for a swappable, OS specific, process spawning layer | ||||
| - A modular transport stack, allowing for custom serialization (eg. with | ||||
|   `msgspec`_), communications protocols, and environment specific IPC | ||||
|  | @ -125,7 +118,7 @@ Zombie safe: self-destruct a process tree | |||
|             f"running in pid {os.getpid()}" | ||||
|         ) | ||||
| 
 | ||||
|         await trio.sleep_forever() | ||||
|        await trio.sleep_forever() | ||||
| 
 | ||||
| 
 | ||||
|     async def main(): | ||||
|  | @ -156,7 +149,7 @@ it **is a bug**. | |||
| 
 | ||||
| "Native" multi-process debugging | ||||
| -------------------------------- | ||||
| Using the magic of `pdbp`_ and our internal IPC, we've | ||||
| Using the magic of `pdb++`_ and our internal IPC, we've | ||||
| been able to create a native feeling debugging experience for | ||||
| any (sub-)process in your ``tractor`` tree. | ||||
| 
 | ||||
|  | @ -574,13 +567,6 @@ Help us push toward the future of distributed `Python`. | |||
| - Typed capability-based (dialog) protocols ( see `#196 | ||||
|   <https://github.com/goodboy/tractor/issues/196>`_ with draft work | ||||
|   started in `#311 <https://github.com/goodboy/tractor/pull/311>`_) | ||||
| - We **recently disabled CI-testing on windows** and need help getting | ||||
|   it running again! (see `#327 | ||||
|   <https://github.com/goodboy/tractor/pull/327>`_). **We do have windows | ||||
|   support** (and have for quite a while) but since no active hacker | ||||
|   exists in the user-base to help test on that OS, for now we're not | ||||
|   actively maintaining testing due to the added hassle and general | ||||
|   latency.. | ||||
| 
 | ||||
| 
 | ||||
| Feel like saying hi? | ||||
|  | @ -604,7 +590,6 @@ channel`_! | |||
| .. _adherance to: https://www.youtube.com/watch?v=7erJ1DV_Tlo&t=1821s | ||||
| .. _trio gitter channel: https://gitter.im/python-trio/general | ||||
| .. _matrix channel: https://matrix.to/#/!tractor:matrix.org | ||||
| .. _pdbp: https://github.com/mdmintz/pdbp | ||||
| .. _pdb++: https://github.com/pdbpp/pdbpp | ||||
| .. _guest mode: https://trio.readthedocs.io/en/stable/reference-lowlevel.html?highlight=guest%20mode#using-guest-mode-to-run-trio-on-top-of-other-event-loops | ||||
| .. _messages: https://en.wikipedia.org/wiki/Message_passing | ||||
|  |  | |||
|  | @ -396,7 +396,7 @@ tasks spawned via multiple RPC calls to an actor can modify | |||
| 
 | ||||
| 
 | ||||
|         # a per process cache | ||||
|         _actor_cache: dict[str, bool] = {} | ||||
|         _actor_cache: Dict[str, bool] = {} | ||||
| 
 | ||||
| 
 | ||||
|         def ping_endpoints(endpoints: List[str]): | ||||
|  |  | |||
|  | @ -1,151 +0,0 @@ | |||
| ''' | ||||
| Complex edge case where during real-time streaming the IPC tranport | ||||
| channels are wiped out (purposely in this example though it could have | ||||
| been an outage) and we want to ensure that despite being in debug mode | ||||
| (or not) the user can sent SIGINT once they notice the hang and the | ||||
| actor tree will eventually be cancelled without leaving any zombies. | ||||
| 
 | ||||
| ''' | ||||
| import trio | ||||
| from tractor import ( | ||||
|     open_nursery, | ||||
|     context, | ||||
|     Context, | ||||
|     MsgStream, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| async def break_channel_silently_then_error( | ||||
|     stream: MsgStream, | ||||
| ): | ||||
|     async for msg in stream: | ||||
|         await stream.send(msg) | ||||
| 
 | ||||
|         # XXX: close the channel right after an error is raised | ||||
|         # purposely breaking the IPC transport to make sure the parent | ||||
|         # doesn't get stuck in debug or hang on the connection join. | ||||
|         # this more or less simulates an infinite msg-receive hang on | ||||
|         # the other end. | ||||
|         await stream._ctx.chan.send(None) | ||||
|         assert 0 | ||||
| 
 | ||||
| 
 | ||||
| async def close_stream_and_error( | ||||
|     stream: MsgStream, | ||||
| ): | ||||
|     async for msg in stream: | ||||
|         await stream.send(msg) | ||||
| 
 | ||||
|         # wipe out channel right before raising | ||||
|         await stream._ctx.chan.send(None) | ||||
|         await stream.aclose() | ||||
|         assert 0 | ||||
| 
 | ||||
| 
 | ||||
| @context | ||||
| async def recv_and_spawn_net_killers( | ||||
| 
 | ||||
|     ctx: Context, | ||||
|     break_ipc_after: bool | int = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Receive stream msgs and spawn some IPC killers mid-stream. | ||||
| 
 | ||||
|     ''' | ||||
|     await ctx.started() | ||||
|     async with ( | ||||
|         ctx.open_stream() as stream, | ||||
|         trio.open_nursery() as n, | ||||
|     ): | ||||
|         async for i in stream: | ||||
|             print(f'child echoing {i}') | ||||
|             await stream.send(i) | ||||
|             if ( | ||||
|                 break_ipc_after | ||||
|                 and i > break_ipc_after | ||||
|             ): | ||||
|                 '#################################\n' | ||||
|                 'Simulating child-side IPC BREAK!\n' | ||||
|                 '#################################' | ||||
|                 n.start_soon(break_channel_silently_then_error, stream) | ||||
|                 n.start_soon(close_stream_and_error, stream) | ||||
| 
 | ||||
| 
 | ||||
| async def main( | ||||
|     debug_mode: bool = False, | ||||
|     start_method: str = 'trio', | ||||
| 
 | ||||
|     # by default we break the parent IPC first (if configured to break | ||||
|     # at all), but this can be changed so the child does first (even if | ||||
|     # both are set to break). | ||||
|     break_parent_ipc_after: int | bool = False, | ||||
|     break_child_ipc_after: int | bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     async with ( | ||||
|         open_nursery( | ||||
|             start_method=start_method, | ||||
| 
 | ||||
|             # NOTE: even debugger is used we shouldn't get | ||||
|             # a hang since it never engages due to broken IPC | ||||
|             debug_mode=debug_mode, | ||||
|             loglevel='warning', | ||||
| 
 | ||||
|         ) as an, | ||||
|     ): | ||||
|         portal = await an.start_actor( | ||||
|             'chitty_hijo', | ||||
|             enable_modules=[__name__], | ||||
|         ) | ||||
| 
 | ||||
|         async with portal.open_context( | ||||
|             recv_and_spawn_net_killers, | ||||
|             break_ipc_after=break_child_ipc_after, | ||||
| 
 | ||||
|         ) as (ctx, sent): | ||||
|             async with ctx.open_stream() as stream: | ||||
|                 for i in range(1000): | ||||
| 
 | ||||
|                     if ( | ||||
|                         break_parent_ipc_after | ||||
|                         and i > break_parent_ipc_after | ||||
|                     ): | ||||
|                         print( | ||||
|                             '#################################\n' | ||||
|                             'Simulating parent-side IPC BREAK!\n' | ||||
|                             '#################################' | ||||
|                         ) | ||||
|                         await stream._ctx.chan.send(None) | ||||
| 
 | ||||
|                     # it actually breaks right here in the | ||||
|                     # mp_spawn/forkserver backends and thus the zombie | ||||
|                     # reaper never even kicks in? | ||||
|                     print(f'parent sending {i}') | ||||
|                     await stream.send(i) | ||||
| 
 | ||||
|                     with trio.move_on_after(2) as cs: | ||||
| 
 | ||||
|                         # NOTE: in the parent side IPC failure case this | ||||
|                         # will raise an ``EndOfChannel`` after the child | ||||
|                         # is killed and sends a stop msg back to it's | ||||
|                         # caller/this-parent. | ||||
|                         rx = await stream.receive() | ||||
| 
 | ||||
|                         print(f"I'm a happy user and echoed to me is {rx}") | ||||
| 
 | ||||
|                     if cs.cancelled_caught: | ||||
|                         # pretend to be a user seeing no streaming action | ||||
|                         # thinking it's a hang, and then hitting ctl-c.. | ||||
|                         print("YOO i'm a user anddd thingz hangin..") | ||||
| 
 | ||||
|                 print( | ||||
|                     "YOO i'm mad send side dun but thingz hangin..\n" | ||||
|                     'MASHING CTlR-C Ctl-c..' | ||||
|                 ) | ||||
|                 raise KeyboardInterrupt | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -27,18 +27,7 @@ async def main(): | |||
| 
 | ||||
|         # retreive results | ||||
|         async with p0.open_stream_from(breakpoint_forever) as stream: | ||||
| 
 | ||||
|             # triggers the first name error | ||||
|             try: | ||||
|                 await p1.run(name_error) | ||||
|             except tractor.RemoteActorError as rae: | ||||
|                 assert rae.type is NameError | ||||
| 
 | ||||
|             async for i in stream: | ||||
| 
 | ||||
|                 # a second time try the failing subactor and this tie | ||||
|                 # let error propagate up to the parent/nursery. | ||||
|                 await p1.run(name_error) | ||||
|             await p1.run(name_error) | ||||
| 
 | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|  |  | |||
|  | @ -12,31 +12,18 @@ async def breakpoint_forever(): | |||
|     while True: | ||||
|         await tractor.breakpoint() | ||||
| 
 | ||||
|         # NOTE: if the test never sent 'q'/'quit' commands | ||||
|         # on the pdb repl, without this checkpoint line the | ||||
|         # repl would spin in this actor forever. | ||||
|         # await trio.sleep(0) | ||||
| 
 | ||||
| 
 | ||||
| async def spawn_until(depth=0): | ||||
|     """"A nested nursery that triggers another ``NameError``. | ||||
|     """ | ||||
|     async with tractor.open_nursery() as n: | ||||
|         if depth < 1: | ||||
| 
 | ||||
|             await n.run_in_actor(breakpoint_forever) | ||||
| 
 | ||||
|             p = await n.run_in_actor( | ||||
|             # await n.run_in_actor('breakpoint_forever', breakpoint_forever) | ||||
|             await n.run_in_actor( | ||||
|                 name_error, | ||||
|                 name='name_error' | ||||
|             ) | ||||
|             await trio.sleep(0.5) | ||||
|             # rx and propagate error from child | ||||
|             await p.result() | ||||
| 
 | ||||
|         else: | ||||
|             # recusrive call to spawn another process branching layer of | ||||
|             # the tree | ||||
|             depth -= 1 | ||||
|             await n.run_in_actor( | ||||
|                 spawn_until, | ||||
|  | @ -66,7 +53,6 @@ async def main(): | |||
|     """ | ||||
|     async with tractor.open_nursery( | ||||
|         debug_mode=True, | ||||
|         # loglevel='cancel', | ||||
|     ) as n: | ||||
| 
 | ||||
|         # spawn both actors | ||||
|  | @ -81,16 +67,8 @@ async def main(): | |||
|             name='spawner1', | ||||
|         ) | ||||
| 
 | ||||
|         # TODO: test this case as well where the parent don't see | ||||
|         # the sub-actor errors by default and instead expect a user | ||||
|         # ctrl-c to kill the root. | ||||
|         with trio.move_on_after(3): | ||||
|             await trio.sleep_forever() | ||||
| 
 | ||||
|         # gah still an issue here. | ||||
|         await portal.result() | ||||
| 
 | ||||
|         # should never get here | ||||
|         await portal1.result() | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,24 +0,0 @@ | |||
| import os | ||||
| import sys | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| async def main() -> None: | ||||
|     async with tractor.open_nursery(debug_mode=True) as an: | ||||
| 
 | ||||
|         assert os.environ['PYTHONBREAKPOINT'] == 'tractor._debug._set_trace' | ||||
| 
 | ||||
|         # TODO: an assert that verifies the hook has indeed been, hooked | ||||
|         # XD | ||||
|         assert sys.breakpointhook is not tractor._debug._set_trace | ||||
| 
 | ||||
|         breakpoint() | ||||
| 
 | ||||
|     # TODO: an assert that verifies the hook is unhooked.. | ||||
|     assert sys.breakpointhook | ||||
|     breakpoint() | ||||
| 
 | ||||
| if __name__ == '__main__': | ||||
|     trio.run(main) | ||||
|  | @ -9,7 +9,7 @@ is ``tractor``'s channels. | |||
| 
 | ||||
| """ | ||||
| from contextlib import asynccontextmanager | ||||
| from typing import Callable | ||||
| from typing import List, Callable | ||||
| import itertools | ||||
| import math | ||||
| import time | ||||
|  | @ -71,8 +71,8 @@ async def worker_pool(workers=4): | |||
| 
 | ||||
|         async def _map( | ||||
|             worker_func: Callable[[int], bool], | ||||
|             sequence: list[int] | ||||
|         ) -> list[bool]: | ||||
|             sequence: List[int] | ||||
|         ) -> List[bool]: | ||||
| 
 | ||||
|             # define an async (local) task to collect results from workers | ||||
|             async def send_result(func, value, portal): | ||||
|  |  | |||
|  | @ -0,0 +1,8 @@ | |||
| Adjust the `tractor._spawn.soft_wait()` strategy to avoid sending an | ||||
| actor cancel request (via `Portal.cancel_actor()`) if either the child | ||||
| process is detected as having terminated or the IPC channel is detected | ||||
| to be closed. | ||||
| 
 | ||||
| This ensures (even) more deterministic inter-actor cancellation by | ||||
| avoiding the timeout condition where possible when a whild never | ||||
| sucessfully spawned, crashed, or became un-contactable over IPC. | ||||
|  | @ -0,0 +1,3 @@ | |||
| Add an experimental ``tractor.msg.NamespacePath`` type for passing Python | ||||
| objects by "reference" through a ``str``-subtype message and using the | ||||
| new ``pkgutil.resolve_name()`` for reference loading. | ||||
|  | @ -0,0 +1,2 @@ | |||
| Add a new `tractor.experimental` subpackage for staging new high level | ||||
| APIs and subystems that we might eventually make built-ins. | ||||
|  | @ -0,0 +1,3 @@ | |||
| Update to and pin latest ``msgpack`` (1.0.3) and ``msgspec`` (0.4.0) | ||||
| both of which required adjustments for backwards imcompatible API | ||||
| tweaks. | ||||
|  | @ -0,0 +1,4 @@ | |||
| Fence off ``multiprocessing`` imports until absolutely necessary in an | ||||
| effort to avoid "resource tracker" spawning side effects that seem to | ||||
| have varying degrees of unreliability per Python release. Port to new | ||||
| ``msgspec.DecodeError``. | ||||
|  | @ -0,0 +1,12 @@ | |||
| Add a new ``to_asyncio.LinkedTaskChannel.subscribe()`` which gives | ||||
| task-oriented broadcast functionality semantically equivalent to | ||||
| ``tractor.MsgStream.subscribe()`` this makes it possible for multiple | ||||
| ``trio``-side tasks to consume ``asyncio``-side task msgs in tandem. | ||||
| 
 | ||||
| Further Improvements to the test suite were added in this patch set | ||||
| including a new scenario test for a sub-actor managed "service nursery" | ||||
| (implementing the basics of a "service manager") including use of | ||||
| *infected asyncio* mode. Further we added a lower level | ||||
| ``test_trioisms.py`` to start to track issues we need to work around in | ||||
| ``trio`` itself which in this case included a bug we were trying to | ||||
| solve related to https://github.com/python-trio/trio/issues/2258. | ||||
|  | @ -0,0 +1,5 @@ | |||
| Run windows CI jobs on python 3.10 after some | ||||
| hacks for ``pdbpp`` dependency issues. | ||||
| 
 | ||||
| Issue was to do with the now deprecated `pyreadline` project which | ||||
| should be changed over to `pyreadline3`. | ||||
|  | @ -0,0 +1,8 @@ | |||
| Drop use of the ``msgpack`` package and instead move fully to the | ||||
| ``msgspec`` codec library. | ||||
| 
 | ||||
| We've now used ``msgspec`` extensively in production and there's no | ||||
| reason to not use it as default. Further this change preps us for the up | ||||
| and coming typed messaging semantics (#196), dialog-unprotocol system | ||||
| (#297), and caps-based messaging-protocols (#299) planned before our | ||||
| first beta. | ||||
|  | @ -0,0 +1,13 @@ | |||
| Fix a previously undetected ``trio``-``asyncio`` task lifetime linking | ||||
| issue with the ``to_asyncio.open_channel_from()`` api where both sides | ||||
| where not properly waiting/signalling termination and it was possible | ||||
| for ``asyncio``-side errors to not propagate due to a race condition. | ||||
| 
 | ||||
| The implementation fix summary is: | ||||
| - add state to signal the end of the ``trio`` side task to be | ||||
|   read by the ``asyncio`` side and always cancel any ongoing | ||||
|   task in such cases. | ||||
| - always wait on the ``asyncio`` task termination from the ``trio`` | ||||
|   side on error before maybe raising said error. | ||||
| - always close the ``trio`` mem chan on exit to ensure the other | ||||
|   side can detect it and follow. | ||||
|  | @ -1,16 +0,0 @@ | |||
| Strictly support Python 3.10+, start runtime machinery reorg | ||||
| 
 | ||||
| Since we want to push forward using the new `match:` syntax for our | ||||
| internal RPC-msg loops, we officially drop 3.9 support for the next | ||||
| release which should coincide well with the first release of 3.11. | ||||
| 
 | ||||
| This patch set also officially removes the ``tractor.run()`` API (which | ||||
| has been deprecated for some time) as well as starts an initial re-org | ||||
| of the internal runtime core by: | ||||
| - renaming ``tractor._actor`` -> ``._runtime`` | ||||
| - moving the ``._runtime.ActorActor._process_messages()`` and | ||||
|   ``._async_main()`` to be module level singleton-task-functions since | ||||
|   they are only started once for each connection and actor spawn | ||||
|   respectively; this internal API thus looks more similar to (at the | ||||
|   time of writing) the ``trio``-internals in ``trio._core._run``. | ||||
| - officially remove ``tractor.run()``, now deprecated for some time. | ||||
|  | @ -1,4 +0,0 @@ | |||
| Only set `._debug.Lock.local_pdb_complete` if has been created. | ||||
| 
 | ||||
| This can be triggered by a very rare race condition (and thus we have no | ||||
| working test yet) but it is known to exist in (a) consumer project(s). | ||||
|  | @ -1,25 +0,0 @@ | |||
| Add support for ``trio >= 0.22`` and support for the new Python 3.11 | ||||
| ``[Base]ExceptionGroup`` from `pep 654`_ via the backported | ||||
| `exceptiongroup`_ package and some final fixes to the debug mode | ||||
| subsystem. | ||||
| 
 | ||||
| This port ended up driving some (hopefully) final fixes to our debugger | ||||
| subsystem including the solution to all lingering stdstreams locking | ||||
| race-conditions and deadlock scenarios. This includes extending the | ||||
| debugger tests suite as well as cancellation and ``asyncio`` mode cases. | ||||
| Some of the notable details: | ||||
| 
 | ||||
| - always reverting to the ``trio`` SIGINT handler when leaving debug | ||||
|   mode. | ||||
| - bypassing child attempts to acquire the debug lock when detected | ||||
|   to be amdist actor-runtime-cancellation. | ||||
| - allowing the root actor to cancel local but IPC-stale subactor | ||||
|   requests-tasks for the debug lock when in a "no IPC peers" state. | ||||
| 
 | ||||
| Further we refined our ``ActorNursery`` semantics to be more similar to | ||||
| ``trio`` in the sense that parent task errors are always packed into the | ||||
| actor-nursery emitted exception group and adjusted all tests and | ||||
| examples accordingly. | ||||
| 
 | ||||
| .. _pep 654: https://peps.python.org/pep-0654/#handling-exception-groups | ||||
| .. _exceptiongroup: https://github.com/python-trio/exceptiongroup | ||||
|  | @ -1,5 +0,0 @@ | |||
| Establish an explicit "backend spawning" method table; use it from CI | ||||
| 
 | ||||
| More clearly lays out the current set of (3) backends: ``['trio', | ||||
| 'mp_spawn', 'mp_forkserver']`` and adjusts the ``._spawn.py`` internals | ||||
| as well as the test suite to accommodate. | ||||
|  | @ -1,4 +0,0 @@ | |||
| Add ``key: Callable[..., Hashable]`` support to ``.trionics.maybe_open_context()`` | ||||
| 
 | ||||
| Gives users finer grained control over cache hit behaviour using | ||||
| a callable which receives the input ``kwargs: dict``. | ||||
|  | @ -1,41 +0,0 @@ | |||
| Add support for debug-lock blocking using a ``._debug.Lock._blocked: | ||||
| set[tuple]`` and add ids when no-more IPC connections with the | ||||
| root actor are detected. | ||||
| 
 | ||||
| This is an enhancement which (mostly) solves a lingering debugger | ||||
| locking race case we needed to handle: | ||||
| 
 | ||||
| - child crashes acquires TTY lock in root and attaches to ``pdb`` | ||||
| - child IPC goes down such that all channels to the root are broken | ||||
|   / non-functional. | ||||
| - root is stuck thinking the child is still in debug even though it | ||||
|   can't be contacted and the child actor machinery hasn't been | ||||
|   cancelled by its parent. | ||||
| - root get's stuck in deadlock with child since it won't send a cancel | ||||
|   request until the child is finished debugging (to avoid clobbering | ||||
|   a child that is actually using the debugger), but the child can't | ||||
|   unlock the debugger bc IPC is down and it can't contact the root. | ||||
| 
 | ||||
| To avoid this scenario add debug lock blocking list via | ||||
| `._debug.Lock._blocked: set[tuple]` which holds actor uids for any actor | ||||
| that is detected by the root as having no transport channel connections | ||||
| (of which at least one should exist if this sub-actor at some point | ||||
| acquired the debug lock). The root consequently checks this list for any | ||||
| actor that tries to (re)acquire the lock and blocks with | ||||
| a ``ContextCancelled``. Further, when a debug condition is tested in | ||||
| ``._runtime._invoke``, the context's ``._enter_debugger_on_cancel`` is | ||||
| set to `False` if the actor was put on the block list then all | ||||
| post-mortem / crash handling will be bypassed for that task. | ||||
| 
 | ||||
| In theory this approach to block list management may cause problems | ||||
| where some nested child actor acquires and releases the lock multiple | ||||
| times and it gets stuck on the block list after the first use? If this | ||||
| turns out to be an issue we can try changing the strat so blocks are | ||||
| only added when the root has zero IPC peers left? | ||||
| 
 | ||||
| Further, this adds a root-locking-task side cancel scope, | ||||
| ``Lock._root_local_task_cs_in_debug``, which can be ``.cancel()``-ed by the root | ||||
| runtime when a stale lock is detected during the IPC channel testing. | ||||
| However, right now we're NOT using this since it seems to cause test | ||||
| failures likely due to causing pre-mature cancellation and maybe needs | ||||
| a bit more experimenting? | ||||
|  | @ -1,19 +0,0 @@ | |||
| Rework our ``.trionics.BroadcastReceiver`` internals to avoid method | ||||
| recursion and approach a design and interface closer to ``trio``'s | ||||
| ``MemoryReceiveChannel``. | ||||
| 
 | ||||
| The details of the internal changes include: | ||||
| 
 | ||||
| - implementing a ``BroadcastReceiver.receive_nowait()`` and using it | ||||
|   within the async ``.receive()`` thus avoiding recursion from | ||||
|   ``.receive()``. | ||||
| - failing over to an internal ``._receive_from_underlying()`` when the | ||||
|   ``_nowait()`` call raises ``trio.WouldBlock`` | ||||
| - adding ``BroadcastState.statistics()`` for debugging and testing both | ||||
|   internals and by users. | ||||
| - add an internal ``BroadcastReceiver._raise_on_lag: bool`` which can be | ||||
|   set to avoid ``Lagged`` raising for possible use cases where a user | ||||
|   wants to choose between a [cheap or nasty | ||||
|   pattern](https://zguide.zeromq.org/docs/chapter7/#The-Cheap-or-Nasty-Pattern) | ||||
|   the the particular stream (we use this in ``piker``'s dark clearing | ||||
|   engine to avoid fast feeds breaking during HFT periods). | ||||
|  | @ -1,11 +0,0 @@ | |||
| Always ``list``-cast the ``mngrs`` input to | ||||
| ``.trionics.gather_contexts()`` and ensure its size otherwise raise | ||||
| a ``ValueError``. | ||||
| 
 | ||||
| Turns out that trying to pass an inline-style generator comprehension | ||||
| doesn't seem to work inside the ``async with`` expression? Further, in | ||||
| such a case we can get a hang waiting on the all-entered event | ||||
| completion when the internal mngrs iteration is a noop. Instead we | ||||
| always greedily check a size and error on empty input; the lazy | ||||
| iteration of a generator input is not beneficial anyway since we're | ||||
| entering all manager instances in concurrent tasks. | ||||
|  | @ -1,15 +0,0 @@ | |||
| Fixes to ensure IPC (channel) breakage doesn't result in hung actor | ||||
| trees; the zombie reaping and general supervision machinery will always | ||||
| clean up and terminate. | ||||
| 
 | ||||
| This includes not only the (mostly minor) fixes to solve these cases but | ||||
| also a new extensive test suite in `test_advanced_faults.py` with an | ||||
| accompanying highly configurable example module-script in | ||||
| `examples/advanced_faults/ipc_failure_during_stream.py`. Tests ensure we | ||||
| never get hang or zombies despite operating in debug mode and attempt to | ||||
| simulate all possible IPC transport failure cases for a local-host actor | ||||
| tree. | ||||
| 
 | ||||
| Further we simplify `Context.open_stream.__aexit__()` to just call | ||||
| `MsgStream.aclose()` directly more or less avoiding a pure duplicate | ||||
| code path. | ||||
|  | @ -1,10 +0,0 @@ | |||
| Always redraw the `pdbpp` prompt on `SIGINT` during REPL use. | ||||
| 
 | ||||
| There was recent changes todo with Python 3.10 that required us to pin | ||||
| to a specific commit in `pdbpp` which have recently been fixed minus | ||||
| this last issue with `SIGINT` shielding: not clobbering or not | ||||
| showing the `(Pdb++)` prompt on ctlr-c by the user. This repairs all | ||||
| that by firstly removing the standard KBI intercepting of the std lib's | ||||
| `pdb.Pdb._cmdloop()` as well as ensuring that only the actor with REPL | ||||
| control ever reports `SIGINT` handler log msgs and prompt redraws. With | ||||
| this we move back to using pypi `pdbpp` release. | ||||
|  | @ -1,7 +0,0 @@ | |||
| Drop `trio.Process.aclose()` usage, copy into our spawning code. | ||||
| 
 | ||||
| The details are laid out in https://github.com/goodboy/tractor/issues/330. | ||||
| `trio` changed is process running quite some time ago, this just copies | ||||
| out the small bit we needed (from the old `.aclose()`) for hard kills | ||||
| where a soft runtime cancel request fails and our "zombie killer" | ||||
| implementation kicks in. | ||||
|  | @ -1,15 +0,0 @@ | |||
| Switch to using the fork & fix of `pdb++`, `pdbp`: | ||||
| https://github.com/mdmintz/pdbp | ||||
| 
 | ||||
| Allows us to sidestep a variety of issues that aren't being maintained | ||||
| in the upstream project thanks to the hard work of @mdmintz! | ||||
| 
 | ||||
| We also include some default settings adjustments as per recent | ||||
| development on the fork: | ||||
| 
 | ||||
| - sticky mode is still turned on by default but now activates when | ||||
|   a using the `ll` repl command. | ||||
| - turn off line truncation by default to avoid inter-line gaps when | ||||
|   resizing the terimnal during use. | ||||
| - when using the backtrace cmd either by `w` or `bt`, the config | ||||
|   automatically switches to non-sticky mode. | ||||
|  | @ -1,28 +0,0 @@ | |||
| [tool.towncrier] | ||||
| package = "tractor" | ||||
| filename = "NEWS.rst" | ||||
| directory = "nooz/" | ||||
| version = "0.1.0a6" | ||||
| title_format = "tractor {version} ({project_date})" | ||||
| template = "nooz/_template.rst" | ||||
| all_bullets = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "feature" | ||||
|   name = "Features" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "bugfix" | ||||
|   name = "Bug Fixes" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "doc" | ||||
|   name = "Improved Documentation" | ||||
|   showcontent = true | ||||
| 
 | ||||
|   [[tool.towncrier.type]] | ||||
|   directory = "trivial" | ||||
|   name = "Trivial/Internal Changes" | ||||
|   showcontent = true | ||||
|  | @ -1,8 +1,8 @@ | |||
| pytest | ||||
| pytest-trio | ||||
| pytest-timeout | ||||
| pdbp | ||||
| mypy | ||||
| trio_typing | ||||
| pdbpp | ||||
| mypy<0.920 | ||||
| trio_typing<0.7.0 | ||||
| pexpect | ||||
| towncrier | ||||
|  |  | |||
							
								
								
									
										32
									
								
								setup.py
								
								
								
								
							
							
						
						
									
										32
									
								
								setup.py
								
								
								
								
							|  | @ -25,55 +25,58 @@ with open('docs/README.rst', encoding='utf-8') as f: | |||
| 
 | ||||
| setup( | ||||
|     name="tractor", | ||||
|     version='0.1.0a6dev0',  # alpha zone | ||||
|     description='structured concurrrent `trio`-"actors"', | ||||
|     version='0.1.0a5.dev',  # alpha zone | ||||
|     description='structured concurrrent "actors"', | ||||
|     long_description=readme, | ||||
|     license='AGPLv3', | ||||
|     author='Tyler Goodlet', | ||||
|     maintainer='Tyler Goodlet', | ||||
|     maintainer_email='goodboy_foss@protonmail.com', | ||||
|     maintainer_email='jgbt@protonmail.com', | ||||
|     url='https://github.com/goodboy/tractor', | ||||
|     platforms=['linux', 'windows'], | ||||
|     packages=[ | ||||
|         'tractor', | ||||
|         'tractor.experimental', | ||||
|         'tractor.trionics', | ||||
|         'tractor.testing', | ||||
|     ], | ||||
|     install_requires=[ | ||||
| 
 | ||||
|         # trio related | ||||
|         # proper range spec: | ||||
|         # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#id5 | ||||
|         'trio >= 0.22', | ||||
|         'trio >= 0.20', | ||||
|         'async_generator', | ||||
|         'trio_typing', | ||||
|         'exceptiongroup', | ||||
| 
 | ||||
|         # tooling | ||||
|         'tricycle', | ||||
|         'trio_typing', | ||||
| 
 | ||||
|         # tooling | ||||
|         'colorlog', | ||||
|         'wrapt', | ||||
| 
 | ||||
|         # IPC serialization | ||||
|         'msgspec', | ||||
| 
 | ||||
|         # debug mode REPL | ||||
|         'pdbp', | ||||
| 
 | ||||
|         # pip ref docs on these specs: | ||||
|         # https://pip.pypa.io/en/stable/reference/requirement-specifiers/#examples | ||||
|         # and pep: | ||||
|         # https://peps.python.org/pep-0440/#version-specifiers | ||||
|         'pdbpp <= 0.10.1; python_version < "3.10"', | ||||
| 
 | ||||
|         # windows deps workaround for ``pdbpp`` | ||||
|         # https://github.com/pdbpp/pdbpp/issues/498 | ||||
|         # https://github.com/pdbpp/fancycompleter/issues/37 | ||||
|         'pyreadline3 ; platform_system == "Windows"', | ||||
| 
 | ||||
|         # 3.10 has an outstanding unreleased issue and `pdbpp` itself | ||||
|         #   pins to patched forks of its own dependencies as well..and | ||||
|         #   we need a specific patch on master atm. | ||||
|         'pdbpp @ git+https://github.com/pdbpp/pdbpp@76c4be5#egg=pdbpp ; python_version > "3.9"',  # noqa: E501 | ||||
| 
 | ||||
|         # serialization | ||||
|         'msgspec >= "0.4.0"' | ||||
| 
 | ||||
|     ], | ||||
|     tests_require=['pytest'], | ||||
|     python_requires=">=3.10", | ||||
|     python_requires=">=3.9", | ||||
|     keywords=[ | ||||
|         'trio', | ||||
|         'async', | ||||
|  | @ -91,6 +94,7 @@ setup( | |||
|         "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", | ||||
|         "Programming Language :: Python :: Implementation :: CPython", | ||||
|         "Programming Language :: Python :: 3 :: Only", | ||||
|         "Programming Language :: Python :: 3.9", | ||||
|         "Programming Language :: Python :: 3.10", | ||||
|         "Intended Audience :: Science/Research", | ||||
|         "Intended Audience :: Developers", | ||||
|  |  | |||
|  | @ -7,91 +7,16 @@ import os | |||
| import random | ||||
| import signal | ||||
| import platform | ||||
| import pathlib | ||||
| import time | ||||
| import inspect | ||||
| from functools import partial, wraps | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| # export for tests | ||||
| from tractor.testing import tractor_test  # noqa | ||||
| 
 | ||||
| 
 | ||||
| pytest_plugins = ['pytester'] | ||||
| 
 | ||||
| 
 | ||||
| def tractor_test(fn): | ||||
|     """ | ||||
|     Use: | ||||
| 
 | ||||
|     @tractor_test | ||||
|     async def test_whatever(): | ||||
|         await ... | ||||
| 
 | ||||
|     If fixtures: | ||||
| 
 | ||||
|         - ``arb_addr`` (a socket addr tuple where arbiter is listening) | ||||
|         - ``loglevel`` (logging level passed to tractor internals) | ||||
|         - ``start_method`` (subprocess spawning backend) | ||||
| 
 | ||||
|     are defined in the `pytest` fixture space they will be automatically | ||||
|     injected to tests declaring these funcargs. | ||||
|     """ | ||||
|     @wraps(fn) | ||||
|     def wrapper( | ||||
|         *args, | ||||
|         loglevel=None, | ||||
|         arb_addr=None, | ||||
|         start_method=None, | ||||
|         **kwargs | ||||
|     ): | ||||
|         # __tracebackhide__ = True | ||||
| 
 | ||||
|         if 'arb_addr' in inspect.signature(fn).parameters: | ||||
|             # injects test suite fixture value to test as well | ||||
|             # as `run()` | ||||
|             kwargs['arb_addr'] = arb_addr | ||||
| 
 | ||||
|         if 'loglevel' in inspect.signature(fn).parameters: | ||||
|             # allows test suites to define a 'loglevel' fixture | ||||
|             # that activates the internal logging | ||||
|             kwargs['loglevel'] = loglevel | ||||
| 
 | ||||
|         if start_method is None: | ||||
|             if platform.system() == "Windows": | ||||
|                 start_method = 'trio' | ||||
| 
 | ||||
|         if 'start_method' in inspect.signature(fn).parameters: | ||||
|             # set of subprocess spawning backends | ||||
|             kwargs['start_method'] = start_method | ||||
| 
 | ||||
|         if kwargs: | ||||
| 
 | ||||
|             # use explicit root actor start | ||||
| 
 | ||||
|             async def _main(): | ||||
|                 async with tractor.open_root_actor( | ||||
|                     # **kwargs, | ||||
|                     arbiter_addr=arb_addr, | ||||
|                     loglevel=loglevel, | ||||
|                     start_method=start_method, | ||||
| 
 | ||||
|                     # TODO: only enable when pytest is passed --pdb | ||||
|                     # debug_mode=True, | ||||
| 
 | ||||
|                 ): | ||||
|                     await fn(*args, **kwargs) | ||||
| 
 | ||||
|             main = _main | ||||
| 
 | ||||
|         else: | ||||
|             # use implicit root actor start | ||||
|             main = partial(fn, *args, **kwargs) | ||||
| 
 | ||||
|         return trio.run(main) | ||||
| 
 | ||||
|     return wrapper | ||||
| 
 | ||||
| 
 | ||||
| _arb_addr = '127.0.0.1', random.randint(1000, 9999) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -114,21 +39,14 @@ no_windows = pytest.mark.skipif( | |||
| ) | ||||
| 
 | ||||
| 
 | ||||
| def repodir() -> pathlib.Path: | ||||
|     ''' | ||||
|     Return the abspath to the repo directory. | ||||
| 
 | ||||
|     ''' | ||||
|     # 2 parents up to step up through tests/<repo_dir> | ||||
|     return pathlib.Path(__file__).parent.parent.absolute() | ||||
| 
 | ||||
| 
 | ||||
| def examples_dir() -> pathlib.Path: | ||||
|     ''' | ||||
|     Return the abspath to the examples directory as `pathlib.Path`. | ||||
| 
 | ||||
|     ''' | ||||
|     return repodir() / 'examples' | ||||
| def repodir(): | ||||
|     """Return the abspath to the repo directory. | ||||
|     """ | ||||
|     dirname = os.path.dirname | ||||
|     dirpath = os.path.abspath( | ||||
|         dirname(dirname(os.path.realpath(__file__))) | ||||
|         ) | ||||
|     return dirpath | ||||
| 
 | ||||
| 
 | ||||
| def pytest_addoption(parser): | ||||
|  | @ -146,7 +64,11 @@ def pytest_addoption(parser): | |||
| 
 | ||||
| def pytest_configure(config): | ||||
|     backend = config.option.spawn_backend | ||||
|     tractor._spawn.try_set_start_method(backend) | ||||
| 
 | ||||
|     if backend == 'mp': | ||||
|         tractor._spawn.try_set_start_method('spawn') | ||||
|     elif backend == 'trio': | ||||
|         tractor._spawn.try_set_start_method(backend) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session', autouse=True) | ||||
|  | @ -159,7 +81,7 @@ def loglevel(request): | |||
| 
 | ||||
| 
 | ||||
| @pytest.fixture(scope='session') | ||||
| def spawn_backend(request) -> str: | ||||
| def spawn_backend(request): | ||||
|     return request.config.option.spawn_backend | ||||
| 
 | ||||
| 
 | ||||
|  | @ -180,24 +102,24 @@ def arb_addr(): | |||
| 
 | ||||
| def pytest_generate_tests(metafunc): | ||||
|     spawn_backend = metafunc.config.option.spawn_backend | ||||
| 
 | ||||
|     if not spawn_backend: | ||||
|         # XXX some weird windows bug with `pytest`? | ||||
|         spawn_backend = 'trio' | ||||
|         spawn_backend = 'mp' | ||||
|     assert spawn_backend in ('mp', 'trio') | ||||
| 
 | ||||
|     # TODO: maybe just use the literal `._spawn.SpawnMethodKey`? | ||||
|     assert spawn_backend in ( | ||||
|         'mp_spawn', | ||||
|         'mp_forkserver', | ||||
|         'trio', | ||||
|     ) | ||||
| 
 | ||||
|     # NOTE: used to be used to dyanmically parametrize tests for when | ||||
|     # you just passed --spawn-backend=`mp` on the cli, but now we expect | ||||
|     # that cli input to be manually specified, BUT, maybe we'll do | ||||
|     # something like this again in the future? | ||||
|     if 'start_method' in metafunc.fixturenames: | ||||
|         metafunc.parametrize("start_method", [spawn_backend], scope='module') | ||||
|         if spawn_backend == 'mp': | ||||
|             from multiprocessing import get_all_start_methods | ||||
|             methods = get_all_start_methods() | ||||
|             if 'fork' in methods: | ||||
|                 # fork not available on windows, so check before | ||||
|                 # removing XXX: the fork method is in general | ||||
|                 # incompatible with trio's global scheduler state | ||||
|                 methods.remove('fork') | ||||
|         elif spawn_backend == 'trio': | ||||
|             methods = ['trio'] | ||||
| 
 | ||||
|         metafunc.parametrize("start_method", methods, scope='module') | ||||
| 
 | ||||
| 
 | ||||
| def sig_prog(proc, sig): | ||||
|  | @ -213,22 +135,16 @@ def sig_prog(proc, sig): | |||
| 
 | ||||
| 
 | ||||
| @pytest.fixture | ||||
| def daemon( | ||||
|     loglevel: str, | ||||
|     testdir, | ||||
|     arb_addr: tuple[str, int], | ||||
| ): | ||||
|     ''' | ||||
|     Run a daemon actor as a "remote arbiter". | ||||
| 
 | ||||
|     ''' | ||||
| def daemon(loglevel, testdir, arb_addr): | ||||
|     """Run a daemon actor as a "remote arbiter". | ||||
|     """ | ||||
|     if loglevel in ('trace', 'debug'): | ||||
|         # too much logging will lock up the subproc (smh) | ||||
|         loglevel = 'info' | ||||
| 
 | ||||
|     cmdargs = [ | ||||
|         sys.executable, '-c', | ||||
|         "import tractor; tractor.run_daemon([], registry_addr={}, loglevel={})" | ||||
|         "import tractor; tractor.run_daemon([], arbiter_addr={}, loglevel={})" | ||||
|         .format( | ||||
|             arb_addr, | ||||
|             "'{}'".format(loglevel) if loglevel else None) | ||||
|  |  | |||
|  | @ -1,193 +0,0 @@ | |||
| ''' | ||||
| Sketchy network blackoutz, ugly byzantine gens, puedes eschuchar la | ||||
| cancelacion?.. | ||||
| 
 | ||||
| ''' | ||||
| from functools import partial | ||||
| 
 | ||||
| import pytest | ||||
| from _pytest.pathlib import import_path | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| from conftest import ( | ||||
|     examples_dir, | ||||
| ) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'debug_mode', | ||||
|     [False, True], | ||||
|     ids=['no_debug_mode', 'debug_mode'], | ||||
| ) | ||||
| @pytest.mark.parametrize( | ||||
|     'ipc_break', | ||||
|     [ | ||||
|         # no breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': False, | ||||
|             'break_child_ipc_after': False, | ||||
|         }, | ||||
| 
 | ||||
|         # only parent breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': 500, | ||||
|             'break_child_ipc_after': False, | ||||
|         }, | ||||
| 
 | ||||
|         # only child breaks | ||||
|         { | ||||
|             'break_parent_ipc_after': False, | ||||
|             'break_child_ipc_after': 500, | ||||
|         }, | ||||
| 
 | ||||
|         # both: break parent first | ||||
|         { | ||||
|             'break_parent_ipc_after': 500, | ||||
|             'break_child_ipc_after': 800, | ||||
|         }, | ||||
|         # both: break child first | ||||
|         { | ||||
|             'break_parent_ipc_after': 800, | ||||
|             'break_child_ipc_after': 500, | ||||
|         }, | ||||
| 
 | ||||
|     ], | ||||
|     ids=[ | ||||
|         'no_break', | ||||
|         'break_parent', | ||||
|         'break_child', | ||||
|         'break_both_parent_first', | ||||
|         'break_both_child_first', | ||||
|     ], | ||||
| ) | ||||
| def test_ipc_channel_break_during_stream( | ||||
|     debug_mode: bool, | ||||
|     spawn_backend: str, | ||||
|     ipc_break: dict | None, | ||||
| ): | ||||
|     ''' | ||||
|     Ensure we can have an IPC channel break its connection during | ||||
|     streaming and it's still possible for the (simulated) user to kill | ||||
|     the actor tree using SIGINT. | ||||
| 
 | ||||
|     We also verify the type of connection error expected in the parent | ||||
|     depending on which side if the IPC breaks first. | ||||
| 
 | ||||
|     ''' | ||||
|     if spawn_backend != 'trio': | ||||
|         if debug_mode: | ||||
|             pytest.skip('`debug_mode` only supported on `trio` spawner') | ||||
| 
 | ||||
|         # non-`trio` spawners should never hit the hang condition that | ||||
|         # requires the user to do ctl-c to cancel the actor tree. | ||||
|         expect_final_exc = trio.ClosedResourceError | ||||
| 
 | ||||
|     mod = import_path( | ||||
|         examples_dir() / 'advanced_faults' / 'ipc_failure_during_stream.py', | ||||
|         root=examples_dir(), | ||||
|     ) | ||||
| 
 | ||||
|     expect_final_exc = KeyboardInterrupt | ||||
| 
 | ||||
|     # when ONLY the child breaks we expect the parent to get a closed | ||||
|     # resource error on the next `MsgStream.receive()` and then fail out | ||||
|     # and cancel the child from there. | ||||
|     if ( | ||||
| 
 | ||||
|         # only child breaks | ||||
|         ( | ||||
|             ipc_break['break_child_ipc_after'] | ||||
|             and ipc_break['break_parent_ipc_after'] is False | ||||
|         ) | ||||
| 
 | ||||
|         # both break but, parent breaks first | ||||
|         or ( | ||||
|             ipc_break['break_child_ipc_after'] is not False | ||||
|             and ( | ||||
|                 ipc_break['break_parent_ipc_after'] | ||||
|                 > ipc_break['break_child_ipc_after'] | ||||
|             ) | ||||
|         ) | ||||
| 
 | ||||
|     ): | ||||
|         expect_final_exc = trio.ClosedResourceError | ||||
| 
 | ||||
|     # when the parent IPC side dies (even if the child's does as well | ||||
|     # but the child fails BEFORE the parent) we expect the channel to be | ||||
|     # sent a stop msg from the child at some point which will signal the | ||||
|     # parent that the stream has been terminated. | ||||
|     # NOTE: when the parent breaks "after" the child you get this same | ||||
|     # case as well, the child breaks the IPC channel with a stop msg | ||||
|     # before any closure takes place. | ||||
|     elif ( | ||||
|         # only parent breaks | ||||
|         ( | ||||
|             ipc_break['break_parent_ipc_after'] | ||||
|             and ipc_break['break_child_ipc_after'] is False | ||||
|         ) | ||||
| 
 | ||||
|         # both break but, child breaks first | ||||
|         or ( | ||||
|             ipc_break['break_parent_ipc_after'] is not False | ||||
|             and ( | ||||
|                 ipc_break['break_child_ipc_after'] | ||||
|                 > ipc_break['break_parent_ipc_after'] | ||||
|             ) | ||||
|         ) | ||||
|     ): | ||||
|         expect_final_exc = trio.EndOfChannel | ||||
| 
 | ||||
|     with pytest.raises(expect_final_exc): | ||||
|         trio.run( | ||||
|             partial( | ||||
|                 mod.main, | ||||
|                 debug_mode=debug_mode, | ||||
|                 start_method=spawn_backend, | ||||
|                 **ipc_break, | ||||
|             ) | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def break_ipc_after_started( | ||||
|     ctx: tractor.Context, | ||||
| ) -> None: | ||||
|     await ctx.started() | ||||
|     async with ctx.open_stream() as stream: | ||||
|         await stream.aclose() | ||||
|         await trio.sleep(0.2) | ||||
|         await ctx.chan.send(None) | ||||
|         print('child broke IPC and terminating') | ||||
| 
 | ||||
| 
 | ||||
| def test_stream_closed_right_after_ipc_break_and_zombie_lord_engages(): | ||||
|     ''' | ||||
|     Verify that is a subactor's IPC goes down just after bringing up a stream | ||||
|     the parent can trigger a SIGINT and the child will be reaped out-of-IPC by | ||||
|     the localhost process supervision machinery: aka "zombie lord". | ||||
| 
 | ||||
|     ''' | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             portal = await n.start_actor( | ||||
|                 'ipc_breaker', | ||||
|                 enable_modules=[__name__], | ||||
|             ) | ||||
| 
 | ||||
|             with trio.move_on_after(1): | ||||
|                 async with ( | ||||
|                     portal.open_context( | ||||
|                         break_ipc_after_started | ||||
|                     ) as (ctx, sent), | ||||
|                 ): | ||||
|                     async with ctx.open_stream(): | ||||
|                         await trio.sleep(0.5) | ||||
| 
 | ||||
|                     print('parent waiting on context') | ||||
| 
 | ||||
|             print('parent exited context') | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|  | @ -5,6 +5,7 @@ Advanced streaming patterns using bidirectional streams and contexts. | |||
| from collections import Counter | ||||
| import itertools | ||||
| import platform | ||||
| from typing import Set, Dict, List | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
|  | @ -14,7 +15,7 @@ def is_win(): | |||
|     return platform.system() == 'Windows' | ||||
| 
 | ||||
| 
 | ||||
| _registry: dict[str, set[tractor.MsgStream]] = { | ||||
| _registry: Dict[str, Set[tractor.ReceiveMsgStream]] = { | ||||
|     'even': set(), | ||||
|     'odd': set(), | ||||
| } | ||||
|  | @ -76,7 +77,7 @@ async def subscribe( | |||
| 
 | ||||
| async def consumer( | ||||
| 
 | ||||
|     subs: list[str], | ||||
|     subs: List[str], | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|  |  | |||
|  | @ -8,10 +8,6 @@ import platform | |||
| import time | ||||
| from itertools import repeat | ||||
| 
 | ||||
| from exceptiongroup import ( | ||||
|     BaseExceptionGroup, | ||||
|     ExceptionGroup, | ||||
| ) | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
|  | @ -60,49 +56,29 @@ def test_remote_error(arb_addr, args_err): | |||
|             arbiter_addr=arb_addr, | ||||
|         ) as nursery: | ||||
| 
 | ||||
|             # on a remote type error caused by bad input args | ||||
|             # this should raise directly which means we **don't** get | ||||
|             # an exception group outside the nursery since the error | ||||
|             # here and the far end task error are one in the same? | ||||
|             portal = await nursery.run_in_actor( | ||||
|                 assert_err, name='errorer', **args | ||||
|             ) | ||||
| 
 | ||||
|             # get result(s) from main task | ||||
|             try: | ||||
|                 # this means the root actor will also raise a local | ||||
|                 # parent task error and thus an eg will propagate out | ||||
|                 # of this actor nursery. | ||||
|                 await portal.result() | ||||
|             except tractor.RemoteActorError as err: | ||||
|                 assert err.type == errtype | ||||
|                 print("Look Maa that actor failed hard, hehh") | ||||
|                 raise | ||||
| 
 | ||||
|     # ensure boxed errors | ||||
|     if args: | ||||
|         with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||
|             trio.run(main) | ||||
|     with pytest.raises(tractor.RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|         assert excinfo.value.type == errtype | ||||
| 
 | ||||
|     else: | ||||
|         # the root task will also error on the `.result()` call | ||||
|         # so we expect an error from there AND the child. | ||||
|         with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|             trio.run(main) | ||||
| 
 | ||||
|         # ensure boxed errors | ||||
|         for exc in excinfo.value.exceptions: | ||||
|             assert exc.type == errtype | ||||
|     # ensure boxed error is correct | ||||
|     assert excinfo.value.type == errtype | ||||
| 
 | ||||
| 
 | ||||
| def test_multierror(arb_addr): | ||||
|     ''' | ||||
|     Verify we raise a ``BaseExceptionGroup`` out of a nursery where | ||||
|     """Verify we raise a ``trio.MultiError`` out of a nursery where | ||||
|     more then one actor errors. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     async def main(): | ||||
|         async with tractor.open_nursery( | ||||
|             arbiter_addr=arb_addr, | ||||
|  | @ -119,10 +95,10 @@ def test_multierror(arb_addr): | |||
|                 print("Look Maa that first actor failed hard, hehh") | ||||
|                 raise | ||||
| 
 | ||||
|         # here we should get a ``BaseExceptionGroup`` containing exceptions | ||||
|         # here we should get a `trio.MultiError` containing exceptions | ||||
|         # from both subactors | ||||
| 
 | ||||
|     with pytest.raises(BaseExceptionGroup): | ||||
|     with pytest.raises(trio.MultiError): | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -131,7 +107,7 @@ def test_multierror(arb_addr): | |||
|     'num_subactors', range(25, 26), | ||||
| ) | ||||
| def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | ||||
|     """Verify we raise a ``BaseExceptionGroup`` out of a nursery where | ||||
|     """Verify we raise a ``trio.MultiError`` out of a nursery where | ||||
|     more then one actor errors and also with a delay before failure | ||||
|     to test failure during an ongoing spawning. | ||||
|     """ | ||||
|  | @ -147,11 +123,10 @@ def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | |||
|                     delay=delay | ||||
|                 ) | ||||
| 
 | ||||
|     # with pytest.raises(trio.MultiError) as exc_info: | ||||
|     with pytest.raises(BaseExceptionGroup) as exc_info: | ||||
|     with pytest.raises(trio.MultiError) as exc_info: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     assert exc_info.type == ExceptionGroup | ||||
|     assert exc_info.type == tractor.MultiError | ||||
|     err = exc_info.value | ||||
|     exceptions = err.exceptions | ||||
| 
 | ||||
|  | @ -239,8 +214,8 @@ async def test_cancel_infinite_streamer(start_method): | |||
|     [ | ||||
|         # daemon actors sit idle while single task actors error out | ||||
|         (1, tractor.RemoteActorError, AssertionError, (assert_err, {}), None), | ||||
|         (2, BaseExceptionGroup, AssertionError, (assert_err, {}), None), | ||||
|         (3, BaseExceptionGroup, AssertionError, (assert_err, {}), None), | ||||
|         (2, tractor.MultiError, AssertionError, (assert_err, {}), None), | ||||
|         (3, tractor.MultiError, AssertionError, (assert_err, {}), None), | ||||
| 
 | ||||
|         # 1 daemon actor errors out while single task actors sleep forever | ||||
|         (3, tractor.RemoteActorError, AssertionError, (sleep_forever, {}), | ||||
|  | @ -251,7 +226,7 @@ async def test_cancel_infinite_streamer(start_method): | |||
|          (do_nuthin, {}), (assert_err, {'delay': 1}, True)), | ||||
|         # daemon complete quickly delay while single task | ||||
|         # actors error after brief delay | ||||
|         (3, BaseExceptionGroup, AssertionError, | ||||
|         (3, tractor.MultiError, AssertionError, | ||||
|          (assert_err, {'delay': 1}), (do_nuthin, {}, False)), | ||||
|     ], | ||||
|     ids=[ | ||||
|  | @ -318,7 +293,7 @@ async def test_some_cancels_all(num_actors_and_errs, start_method, loglevel): | |||
|         # should error here with a ``RemoteActorError`` or ``MultiError`` | ||||
| 
 | ||||
|     except first_err as err: | ||||
|         if isinstance(err, BaseExceptionGroup): | ||||
|         if isinstance(err, tractor.MultiError): | ||||
|             assert len(err.exceptions) == num_actors | ||||
|             for exc in err.exceptions: | ||||
|                 if isinstance(exc, tractor.RemoteActorError): | ||||
|  | @ -362,7 +337,7 @@ async def spawn_and_error(breadth, depth) -> None: | |||
| @tractor_test | ||||
| async def test_nested_multierrors(loglevel, start_method): | ||||
|     ''' | ||||
|     Test that failed actor sets are wrapped in `BaseExceptionGroup`s. This | ||||
|     Test that failed actor sets are wrapped in `trio.MultiError`s. This | ||||
|     test goes only 2 nurseries deep but we should eventually have tests | ||||
|     for arbitrary n-depth actor trees. | ||||
| 
 | ||||
|  | @ -390,7 +365,7 @@ async def test_nested_multierrors(loglevel, start_method): | |||
|                         breadth=subactor_breadth, | ||||
|                         depth=depth, | ||||
|                     ) | ||||
|         except BaseExceptionGroup as err: | ||||
|         except trio.MultiError as err: | ||||
|             assert len(err.exceptions) == subactor_breadth | ||||
|             for subexc in err.exceptions: | ||||
| 
 | ||||
|  | @ -408,10 +383,10 @@ async def test_nested_multierrors(loglevel, start_method): | |||
|                         assert subexc.type in ( | ||||
|                             tractor.RemoteActorError, | ||||
|                             trio.Cancelled, | ||||
|                             BaseExceptionGroup, | ||||
|                             trio.MultiError | ||||
|                         ) | ||||
| 
 | ||||
|                     elif isinstance(subexc, BaseExceptionGroup): | ||||
|                     elif isinstance(subexc, trio.MultiError): | ||||
|                         for subsub in subexc.exceptions: | ||||
| 
 | ||||
|                             if subsub in (tractor.RemoteActorError,): | ||||
|  | @ -419,7 +394,7 @@ async def test_nested_multierrors(loglevel, start_method): | |||
| 
 | ||||
|                             assert type(subsub) in ( | ||||
|                                 trio.Cancelled, | ||||
|                                 BaseExceptionGroup, | ||||
|                                 trio.MultiError, | ||||
|                             ) | ||||
|                 else: | ||||
|                     assert isinstance(subexc, tractor.RemoteActorError) | ||||
|  | @ -431,13 +406,13 @@ async def test_nested_multierrors(loglevel, start_method): | |||
|                     if is_win(): | ||||
|                         if isinstance(subexc, tractor.RemoteActorError): | ||||
|                             assert subexc.type in ( | ||||
|                                 BaseExceptionGroup, | ||||
|                                 trio.MultiError, | ||||
|                                 tractor.RemoteActorError | ||||
|                             ) | ||||
|                         else: | ||||
|                             assert isinstance(subexc, BaseExceptionGroup) | ||||
|                             assert isinstance(subexc, trio.MultiError) | ||||
|                     else: | ||||
|                         assert subexc.type is ExceptionGroup | ||||
|                         assert subexc.type is trio.MultiError | ||||
|                 else: | ||||
|                     assert subexc.type in ( | ||||
|                         tractor.RemoteActorError, | ||||
|  | @ -460,7 +435,7 @@ def test_cancel_via_SIGINT( | |||
|         with trio.fail_after(2): | ||||
|             async with tractor.open_nursery() as tn: | ||||
|                 await tn.start_actor('sucka') | ||||
|                 if 'mp' in spawn_backend: | ||||
|                 if spawn_backend == 'mp': | ||||
|                     time.sleep(0.1) | ||||
|                 os.kill(pid, signal.SIGINT) | ||||
|                 await trio.sleep_forever() | ||||
|  | @ -499,7 +474,7 @@ def test_cancel_via_SIGINT_other_task( | |||
|         with trio.fail_after(timeout): | ||||
|             async with trio.open_nursery() as n: | ||||
|                 await n.start(spawn_and_sleep_forever) | ||||
|                 if 'mp' in spawn_backend: | ||||
|                 if spawn_backend == 'mp': | ||||
|                     time.sleep(0.1) | ||||
|                 os.kill(pid, signal.SIGINT) | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,6 +1,5 @@ | |||
| import itertools | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from tractor import open_actor_cluster | ||||
|  | @ -12,72 +11,26 @@ from conftest import tractor_test | |||
| MESSAGE = 'tractoring at full speed' | ||||
| 
 | ||||
| 
 | ||||
| def test_empty_mngrs_input_raises() -> None: | ||||
| 
 | ||||
|     async def main(): | ||||
|         with trio.fail_after(1): | ||||
|             async with ( | ||||
|                 open_actor_cluster( | ||||
|                     modules=[__name__], | ||||
| 
 | ||||
|                     # NOTE: ensure we can passthrough runtime opts | ||||
|                     loglevel='info', | ||||
|                     # debug_mode=True, | ||||
| 
 | ||||
|                 ) as portals, | ||||
| 
 | ||||
|                 gather_contexts( | ||||
|                     # NOTE: it's the use of inline-generator syntax | ||||
|                     # here that causes the empty input. | ||||
|                     mngrs=( | ||||
|                         p.open_context(worker) for p in portals.values() | ||||
|                     ), | ||||
|                 ), | ||||
|             ): | ||||
|                 assert 0 | ||||
| 
 | ||||
|     with pytest.raises(ValueError): | ||||
|         trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def worker( | ||||
|     ctx: tractor.Context, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
| async def worker(ctx: tractor.Context) -> None: | ||||
|     await ctx.started() | ||||
| 
 | ||||
|     async with ctx.open_stream( | ||||
|         backpressure=True, | ||||
|     ) as stream: | ||||
| 
 | ||||
|         # TODO: this with the below assert causes a hang bug? | ||||
|         # with trio.move_on_after(1): | ||||
| 
 | ||||
|     async with ctx.open_stream(backpressure=True) as stream: | ||||
|         async for msg in stream: | ||||
|             # do something with msg | ||||
|             print(msg) | ||||
|             assert msg == MESSAGE | ||||
| 
 | ||||
|         # TODO: does this ever cause a hang | ||||
|         # assert 0 | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_streaming_to_actor_cluster() -> None: | ||||
| 
 | ||||
|     async with ( | ||||
|         open_actor_cluster(modules=[__name__]) as portals, | ||||
| 
 | ||||
|         gather_contexts( | ||||
|             mngrs=[p.open_context(worker) for p in portals.values()], | ||||
|         ) as contexts, | ||||
| 
 | ||||
|         gather_contexts( | ||||
|             mngrs=[ctx[0].open_stream() for ctx in contexts], | ||||
|         ) as streams, | ||||
| 
 | ||||
|     ): | ||||
|         with trio.move_on_after(1): | ||||
|             for stream in itertools.cycle(streams): | ||||
|  |  | |||
|  | @ -571,7 +571,7 @@ def test_one_end_stream_not_opened(overrun_by): | |||
| 
 | ||||
|     ''' | ||||
|     overrunner, buf_size_increase, entrypoint = overrun_by | ||||
|     from tractor._runtime import Actor | ||||
|     from tractor._actor import Actor | ||||
|     buf_size = buf_size_increase + Actor.msg_buffer_size | ||||
| 
 | ||||
|     async def main(): | ||||
|  |  | |||
|  | @ -10,11 +10,9 @@ TODO: | |||
|     - wonder if any of it'll work on OS X? | ||||
| 
 | ||||
| """ | ||||
| import itertools | ||||
| from os import path | ||||
| from typing import Optional | ||||
| import platform | ||||
| import pathlib | ||||
| import sys | ||||
| import time | ||||
| 
 | ||||
|  | @ -25,10 +23,7 @@ from pexpect.exceptions import ( | |||
|     EOF, | ||||
| ) | ||||
| 
 | ||||
| from conftest import ( | ||||
|     examples_dir, | ||||
|     _ci_env, | ||||
| ) | ||||
| from conftest import repodir | ||||
| 
 | ||||
| # TODO: The next great debugger audit could be done by you! | ||||
| # - recurrent entry to breakpoint() from single actor *after* and an | ||||
|  | @ -47,31 +42,19 @@ if platform.system() == 'Windows': | |||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| def examples_dir(): | ||||
|     """Return the abspath to the examples directory. | ||||
|     """ | ||||
|     return path.join(repodir(), 'examples', 'debugging/') | ||||
| 
 | ||||
| 
 | ||||
| def mk_cmd(ex_name: str) -> str: | ||||
|     ''' | ||||
|     Generate a command suitable to pass to ``pexpect.spawn()``. | ||||
| 
 | ||||
|     ''' | ||||
|     script_path: pathlib.Path = examples_dir() / 'debugging' / f'{ex_name}.py' | ||||
|     return ' '.join(['python', str(script_path)]) | ||||
| 
 | ||||
| 
 | ||||
| # TODO: was trying to this xfail style but some weird bug i see in CI | ||||
| # that's happening at collect time.. pretty soon gonna dump actions i'm | ||||
| # thinkin... | ||||
| # in CI we skip tests which >= depth 1 actor trees due to there | ||||
| # still being an oustanding issue with relaying the debug-mode-state | ||||
| # through intermediary parents. | ||||
| has_nested_actors = pytest.mark.has_nested_actors | ||||
| # .xfail( | ||||
| #     os.environ.get('CI', False), | ||||
| #     reason=( | ||||
| #         'This test uses nested actors and fails in CI\n' | ||||
| #         'The test seems to run fine locally but until we solve the ' | ||||
| #         'following issue this CI test will be xfail:\n' | ||||
| #         'https://github.com/goodboy/tractor/issues/320' | ||||
| #     ) | ||||
| # ) | ||||
|     """Generate a command suitable to pass to ``pexpect.spawn()``. | ||||
|     """ | ||||
|     return ' '.join( | ||||
|         ['python', | ||||
|          path.join(examples_dir(), f'{ex_name}.py')] | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture | ||||
|  | @ -95,34 +78,6 @@ def spawn( | |||
|     return _spawn | ||||
| 
 | ||||
| 
 | ||||
| PROMPT = r"\(Pdb\+\)" | ||||
| 
 | ||||
| 
 | ||||
| def expect( | ||||
|     child, | ||||
| 
 | ||||
|     # prompt by default | ||||
|     patt: str = PROMPT, | ||||
| 
 | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Expect wrapper that prints last seen console | ||||
|     data before failing. | ||||
| 
 | ||||
|     ''' | ||||
|     try: | ||||
|         child.expect( | ||||
|             patt, | ||||
|             **kwargs, | ||||
|         ) | ||||
|     except TIMEOUT: | ||||
|         before = str(child.before.decode()) | ||||
|         print(before) | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| def assert_before( | ||||
|     child, | ||||
|     patts: list[str], | ||||
|  | @ -143,24 +98,21 @@ def assert_before( | |||
|     params=[False, True], | ||||
|     ids='ctl-c={}'.format, | ||||
| ) | ||||
| def ctlc( | ||||
|     request, | ||||
|     ci_env: bool, | ||||
| 
 | ||||
| ) -> bool: | ||||
| def ctlc(request) -> bool: | ||||
| 
 | ||||
|     use_ctlc = request.param | ||||
| 
 | ||||
|     node = request.node | ||||
|     markers = node.own_markers | ||||
|     for mark in markers: | ||||
|         if mark.name == 'has_nested_actors': | ||||
|             pytest.skip( | ||||
|                 f'Test {node} has nested actors and fails with Ctrl-C.\n' | ||||
|                 f'The test can sometimes run fine locally but until' | ||||
|                 ' we solve' 'this issue this CI test will be xfail:\n' | ||||
|                 'https://github.com/goodboy/tractor/issues/320' | ||||
|             ) | ||||
|     if ( | ||||
|         sys.version_info <= (3, 10) | ||||
|         and use_ctlc | ||||
|     ): | ||||
|         # on 3.9 it seems the REPL UX | ||||
|         # is highly unreliable and frankly annoying | ||||
|         # to test for. It does work from manual testing | ||||
|         # but i just don't think it's wroth it to try | ||||
|         # and get this working especially since we want to | ||||
|         # be 3.10+ mega-asap. | ||||
|         pytest.skip('Py3.9 and `pdbpp` son no bueno..') | ||||
| 
 | ||||
|     if use_ctlc: | ||||
|         # XXX: disable pygments highlighting for auto-tests | ||||
|  | @ -181,16 +133,14 @@ def ctlc( | |||
|     ids=lambda item: f'{item[0]} -> {item[1]}', | ||||
| ) | ||||
| def test_root_actor_error(spawn, user_in_out): | ||||
|     ''' | ||||
|     Demonstrate crash handler entering pdb from basic error in root actor. | ||||
| 
 | ||||
|     ''' | ||||
|     """Demonstrate crash handler entering pdbpp from basic error in root actor. | ||||
|     """ | ||||
|     user_input, expect_err_str = user_in_out | ||||
| 
 | ||||
|     child = spawn('root_actor_error') | ||||
| 
 | ||||
|     # scan for the prompt | ||||
|     expect(child, PROMPT) | ||||
|     # scan for the pdbpp prompt | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
| 
 | ||||
|  | @ -202,7 +152,7 @@ def test_root_actor_error(spawn, user_in_out): | |||
|     child.sendline(user_input) | ||||
| 
 | ||||
|     # process should exit | ||||
|     expect(child, EOF) | ||||
|     child.expect(pexpect.EOF) | ||||
|     assert expect_err_str in str(child.before) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -220,8 +170,8 @@ def test_root_actor_bp(spawn, user_in_out): | |||
|     user_input, expect_err_str = user_in_out | ||||
|     child = spawn('root_actor_breakpoint') | ||||
| 
 | ||||
|     # scan for the prompt | ||||
|     child.expect(PROMPT) | ||||
|     # scan for the pdbpp prompt | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     assert 'Error' not in str(child.before) | ||||
| 
 | ||||
|  | @ -244,11 +194,9 @@ def do_ctlc( | |||
|     delay: float = 0.1, | ||||
|     patt: Optional[str] = None, | ||||
| 
 | ||||
|     # expect repl UX to reprint the prompt after every | ||||
|     # ctrl-c send. | ||||
|     # XXX: no idea but, in CI this never seems to work even on 3.10 so | ||||
|     # needs some further investigation potentially... | ||||
|     expect_prompt: bool = not _ci_env, | ||||
|     # XXX: literally no idea why this is an issue in CI but likely will | ||||
|     # flush out (hopefully) with proper 3.10 release of `pdbpp`... | ||||
|     expect_prompt: bool = True, | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|  | @ -259,10 +207,11 @@ def do_ctlc( | |||
| 
 | ||||
|         # TODO: figure out why this makes CI fail.. | ||||
|         # if you run this test manually it works just fine.. | ||||
|         if expect_prompt: | ||||
|         from conftest import _ci_env | ||||
|         if expect_prompt and not _ci_env: | ||||
|             before = str(child.before.decode()) | ||||
|             time.sleep(delay) | ||||
|             child.expect(PROMPT) | ||||
|             child.expect(r"\(Pdb\+\+\)") | ||||
|             time.sleep(delay) | ||||
| 
 | ||||
|             if patt: | ||||
|  | @ -281,7 +230,7 @@ def test_root_actor_bp_forever( | |||
|     # entries | ||||
|     for _ in range(10): | ||||
| 
 | ||||
|         child.expect(PROMPT) | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|         if ctlc: | ||||
|             do_ctlc(child) | ||||
|  | @ -291,7 +240,7 @@ def test_root_actor_bp_forever( | |||
|     # do one continue which should trigger a | ||||
|     # new task to lock the tty | ||||
|     child.sendline('continue') | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     # seems that if we hit ctrl-c too fast the | ||||
|     # sigint guard machinery might not kick in.. | ||||
|  | @ -302,10 +251,10 @@ def test_root_actor_bp_forever( | |||
| 
 | ||||
|     # XXX: this previously caused a bug! | ||||
|     child.sendline('n') | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     child.sendline('n') | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     # quit out of the loop | ||||
|     child.sendline('q') | ||||
|  | @ -322,14 +271,12 @@ def test_subactor_error( | |||
|     ctlc: bool, | ||||
|     do_next: bool, | ||||
| ): | ||||
|     ''' | ||||
|     Single subactor raising an error | ||||
|     "Single subactor raising an error" | ||||
| 
 | ||||
|     ''' | ||||
|     child = spawn('subactor_error') | ||||
| 
 | ||||
|     # scan for the prompt | ||||
|     child.expect(PROMPT) | ||||
|     # scan for the pdbpp prompt | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
|     assert "Attaching to pdb in crashed actor: ('name_error'" in before | ||||
|  | @ -349,7 +296,7 @@ def test_subactor_error( | |||
|         # creating actor | ||||
|         child.sendline('continue') | ||||
| 
 | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
|     before = str(child.before.decode()) | ||||
| 
 | ||||
|     # root actor gets debugger engaged | ||||
|  | @ -376,8 +323,8 @@ def test_subactor_breakpoint( | |||
| 
 | ||||
|     child = spawn('subactor_breakpoint') | ||||
| 
 | ||||
|     # scan for the prompt | ||||
|     child.expect(PROMPT) | ||||
|     # scan for the pdbpp prompt | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
|     assert "Attaching pdb to actor: ('breakpoint_forever'" in before | ||||
|  | @ -386,7 +333,7 @@ def test_subactor_breakpoint( | |||
|     # entries | ||||
|     for _ in range(10): | ||||
|         child.sendline('next') | ||||
|         child.expect(PROMPT) | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|         if ctlc: | ||||
|             do_ctlc(child) | ||||
|  | @ -394,7 +341,7 @@ def test_subactor_breakpoint( | |||
|     # now run some "continues" to show re-entries | ||||
|     for _ in range(5): | ||||
|         child.sendline('continue') | ||||
|         child.expect(PROMPT) | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
|         before = str(child.before.decode()) | ||||
|         assert "Attaching pdb to actor: ('breakpoint_forever'" in before | ||||
| 
 | ||||
|  | @ -405,7 +352,7 @@ def test_subactor_breakpoint( | |||
|     child.sendline('q') | ||||
| 
 | ||||
|     # child process should exit but parent will capture pdb.BdbQuit | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
|     assert "RemoteActorError: ('breakpoint_forever'" in before | ||||
|  | @ -425,7 +372,6 @@ def test_subactor_breakpoint( | |||
|     assert 'bdb.BdbQuit' in before | ||||
| 
 | ||||
| 
 | ||||
| @has_nested_actors | ||||
| def test_multi_subactors( | ||||
|     spawn, | ||||
|     ctlc: bool, | ||||
|  | @ -437,8 +383,8 @@ def test_multi_subactors( | |||
|     ''' | ||||
|     child = spawn(r'multi_subactors') | ||||
| 
 | ||||
|     # scan for the prompt | ||||
|     child.expect(PROMPT) | ||||
|     # scan for the pdbpp prompt | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
|     assert "Attaching pdb to actor: ('breakpoint_forever'" in before | ||||
|  | @ -450,7 +396,7 @@ def test_multi_subactors( | |||
|     # entries | ||||
|     for _ in range(10): | ||||
|         child.sendline('next') | ||||
|         child.expect(PROMPT) | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|         if ctlc: | ||||
|             do_ctlc(child) | ||||
|  | @ -459,7 +405,7 @@ def test_multi_subactors( | |||
|     child.sendline('c') | ||||
| 
 | ||||
|     # first name_error failure | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
|     before = str(child.before.decode()) | ||||
|     assert "Attaching to pdb in crashed actor: ('name_error'" in before | ||||
|     assert "NameError" in before | ||||
|  | @ -471,21 +417,25 @@ def test_multi_subactors( | |||
|     child.sendline('c') | ||||
| 
 | ||||
|     # 2nd name_error failure | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     # TODO: will we ever get the race where this crash will show up? | ||||
|     # blocklist strat now prevents this crash | ||||
|     # assert_before(child, [ | ||||
|     #     "Attaching to pdb in crashed actor: ('name_error_1'", | ||||
|     #     "NameError", | ||||
|     # ]) | ||||
|     # XXX: lol honestly no idea why CI is cuck but | ||||
|     # seems like this likely falls into our unhandled nested | ||||
|     # case and isn't working in that env due to raciness.. | ||||
|     from conftest import _ci_env | ||||
|     if not ctlc and _ci_env: | ||||
|         name = 'name_error' if ctlc else 'name_error_1' | ||||
|         assert_before(child, [ | ||||
|             f"Attaching to pdb in crashed actor: ('{name}'", | ||||
|             "NameError", | ||||
|         ]) | ||||
| 
 | ||||
|     if ctlc: | ||||
|         do_ctlc(child) | ||||
| 
 | ||||
|     # breakpoint loop should re-engage | ||||
|     child.sendline('c') | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
|     before = str(child.before.decode()) | ||||
|     assert "Attaching pdb to actor: ('breakpoint_forever'" in before | ||||
| 
 | ||||
|  | @ -497,20 +447,17 @@ def test_multi_subactors( | |||
|     start = time.time() | ||||
|     while ( | ||||
|         spawn_err not in before | ||||
|         and (time.time() - start) < 3  # timeout eventually | ||||
|         and (time.time() - start) < 3 | ||||
|     ): | ||||
|         child.sendline('c') | ||||
|         time.sleep(0.1) | ||||
|         child.expect(PROMPT) | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
|         before = str(child.before.decode()) | ||||
| 
 | ||||
|         if ctlc: | ||||
|             do_ctlc(child) | ||||
| 
 | ||||
|     # 2nd depth nursery should trigger | ||||
|     # (XXX: this below if guard is technically a hack that makes the | ||||
|     # nested case seem to work locally on linux but ideally in the long | ||||
|     # run this can be dropped.) | ||||
|     if not ctlc: | ||||
|         assert_before(child, [ | ||||
|             spawn_err, | ||||
|  | @ -520,31 +467,45 @@ def test_multi_subactors( | |||
|     # now run some "continues" to show re-entries | ||||
|     for _ in range(5): | ||||
|         child.sendline('c') | ||||
|         child.expect(PROMPT) | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     # quit the loop and expect parent to attach | ||||
|     child.sendline('q') | ||||
|     child.expect(PROMPT) | ||||
|     before = str(child.before.decode()) | ||||
| 
 | ||||
|     assert_before(child, [ | ||||
|         # debugger attaches to root | ||||
|         "Attaching to pdb in crashed actor: ('root'", | ||||
|     try: | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
|     except TIMEOUT: | ||||
|         if _ci_env and not ctlc: | ||||
|             raise | ||||
| 
 | ||||
|         # expect a multierror with exceptions for each sub-actor | ||||
|         "RemoteActorError: ('breakpoint_forever'", | ||||
|         "RemoteActorError: ('name_error'", | ||||
|         "RemoteActorError: ('spawn_error'", | ||||
|         "RemoteActorError: ('name_error_1'", | ||||
|         'bdb.BdbQuit', | ||||
|     ]) | ||||
|         # in ci seems like this can sometimes just result | ||||
|         # in full tree death? | ||||
|         print('tree died?') | ||||
| 
 | ||||
|     else: | ||||
|         before = str(child.before.decode()) | ||||
|         assert_before(child, [ | ||||
|             # debugger attaches to root | ||||
|             "Attaching to pdb in crashed actor: ('root'", | ||||
| 
 | ||||
|             # expect a multierror with exceptions for each sub-actor | ||||
|             "RemoteActorError: ('breakpoint_forever'", | ||||
|             "RemoteActorError: ('name_error'", | ||||
|             "RemoteActorError: ('spawn_error'", | ||||
|             "RemoteActorError: ('name_error_1'", | ||||
|             'bdb.BdbQuit', | ||||
|         ]) | ||||
| 
 | ||||
|     if ctlc: | ||||
|         do_ctlc(child) | ||||
| 
 | ||||
|     # process should exit | ||||
|     child.sendline('c') | ||||
|     child.expect(pexpect.EOF) | ||||
| 
 | ||||
|     try: | ||||
|         child.expect(pexpect.EOF) | ||||
|     except TIMEOUT: | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     # repeat of previous multierror for final output | ||||
|     assert_before(child, [ | ||||
|  | @ -568,16 +529,16 @@ def test_multi_daemon_subactors( | |||
|     ''' | ||||
|     child = spawn('multi_daemon_subactors') | ||||
| 
 | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     # there can be a race for which subactor will acquire | ||||
|     # the root's tty lock first so anticipate either crash | ||||
|     # message on the first entry. | ||||
| 
 | ||||
|     bp_forever_msg = "Attaching pdb to actor: ('bp_forever'" | ||||
|     name_error_msg = "NameError: name 'doggypants' is not defined" | ||||
|     # there is a race for which subactor will acquire | ||||
|     # the root's tty lock first | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
| 
 | ||||
|     bp_forever_msg = "Attaching pdb to actor: ('bp_forever'" | ||||
|     name_error_msg = "NameError" | ||||
| 
 | ||||
|     if bp_forever_msg in before: | ||||
|         next_msg = name_error_msg | ||||
| 
 | ||||
|  | @ -598,8 +559,10 @@ def test_multi_daemon_subactors( | |||
|     # second entry by `bp_forever`. | ||||
| 
 | ||||
|     child.sendline('c') | ||||
|     child.expect(PROMPT) | ||||
|     assert_before(child, [next_msg]) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
|     before = str(child.before.decode()) | ||||
| 
 | ||||
|     assert next_msg in before | ||||
| 
 | ||||
|     # XXX: hooray the root clobbering the child here was fixed! | ||||
|     # IMO, this demonstrates the true power of SC system design. | ||||
|  | @ -618,54 +581,34 @@ def test_multi_daemon_subactors( | |||
|     if ctlc: | ||||
|         do_ctlc(child) | ||||
| 
 | ||||
|     # expect another breakpoint actor entry | ||||
|     child.sendline('c') | ||||
|     child.expect(PROMPT) | ||||
|     # wait for final error in root | ||||
|     while True: | ||||
| 
 | ||||
|     try: | ||||
|         assert_before(child, [bp_forever_msg]) | ||||
|     except AssertionError: | ||||
|         assert_before(child, [name_error_msg]) | ||||
|         child.sendline('c') | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
|         before = str(child.before.decode()) | ||||
|         try: | ||||
| 
 | ||||
|             # root error should be packed as remote error | ||||
|             assert "_exceptions.RemoteActorError: ('name_error'" in before | ||||
|             break | ||||
| 
 | ||||
|         except AssertionError: | ||||
|             assert bp_forever_msg in before | ||||
| 
 | ||||
|     else: | ||||
|         if ctlc: | ||||
|             do_ctlc(child) | ||||
| 
 | ||||
|         # should crash with the 2nd name error (simulates | ||||
|         # a retry) and then the root eventually (boxed) errors | ||||
|         # after 1 or more further bp actor entries. | ||||
| 
 | ||||
|     try: | ||||
|         child.sendline('c') | ||||
|         child.expect(PROMPT) | ||||
|         assert_before(child, [name_error_msg]) | ||||
|         child.expect(pexpect.EOF) | ||||
| 
 | ||||
|     # wait for final error in root | ||||
|     # where it crashs with boxed error | ||||
|     while True: | ||||
|         try: | ||||
|             child.sendline('c') | ||||
|             child.expect(PROMPT) | ||||
|             assert_before( | ||||
|                 child, | ||||
|                 [bp_forever_msg] | ||||
|             ) | ||||
|         except AssertionError: | ||||
|             break | ||||
| 
 | ||||
|     assert_before( | ||||
|         child, | ||||
|         [ | ||||
|             # boxed error raised in root task | ||||
|             "Attaching to pdb in crashed actor: ('root'", | ||||
|             "_exceptions.RemoteActorError: ('name_error'", | ||||
|         ] | ||||
|     ) | ||||
| 
 | ||||
|     child.sendline('c') | ||||
|     child.expect(pexpect.EOF) | ||||
|     except TIMEOUT: | ||||
|         # Failed to exit using continue..? | ||||
|         child.sendline('q') | ||||
|         child.expect(pexpect.EOF) | ||||
| 
 | ||||
| 
 | ||||
| @has_nested_actors | ||||
| def test_multi_subactors_root_errors( | ||||
|     spawn, | ||||
|     ctlc: bool | ||||
|  | @ -677,8 +620,8 @@ def test_multi_subactors_root_errors( | |||
|     ''' | ||||
|     child = spawn('multi_subactor_root_errors') | ||||
| 
 | ||||
|     # scan for the prompt | ||||
|     child.expect(PROMPT) | ||||
|     # scan for the pdbpp prompt | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     # at most one subactor should attach before the root is cancelled | ||||
|     before = str(child.before.decode()) | ||||
|  | @ -690,72 +633,66 @@ def test_multi_subactors_root_errors( | |||
|     # continue again to catch 2nd name error from | ||||
|     # actor 'name_error_1' (which is 2nd depth). | ||||
|     child.sendline('c') | ||||
|     try: | ||||
|         child.expect(r"\(Pdb\+\+\)") | ||||
|     except TIMEOUT: | ||||
|         child.sendline('') | ||||
| 
 | ||||
|     # due to block list strat from #337, this will no longer | ||||
|     # propagate before the root errors and cancels the spawner sub-tree. | ||||
|     child.expect(PROMPT) | ||||
| 
 | ||||
|     # only if the blocking condition doesn't kick in fast enough | ||||
|     before = str(child.before.decode()) | ||||
|     if "Debug lock blocked for ['name_error_1'" not in before: | ||||
| 
 | ||||
|     # XXX: lol honestly no idea why CI is cuck but | ||||
|     # seems like this likely falls into our unhandled nested | ||||
|     # case and isn't working in that env due to raciness.. | ||||
|     from conftest import _ci_env | ||||
|     if not ctlc and _ci_env: | ||||
|         name = 'name_error' if ctlc else 'name_error_1' | ||||
|         assert_before(child, [ | ||||
|             "Attaching to pdb in crashed actor: ('name_error_1'", | ||||
|             f"Attaching to pdb in crashed actor: ('{name}'", | ||||
|             "NameError", | ||||
|         ]) | ||||
| 
 | ||||
|         if ctlc: | ||||
|             do_ctlc(child) | ||||
| 
 | ||||
|         child.sendline('c') | ||||
|         child.expect(PROMPT) | ||||
| 
 | ||||
|     # check if the spawner crashed or was blocked from debug | ||||
|     # and if this intermediary attached check the boxed error | ||||
|     before = str(child.before.decode()) | ||||
|     if "Attaching to pdb in crashed actor: ('spawn_error'" in before: | ||||
| 
 | ||||
|         assert_before(child, [ | ||||
|             # boxed error from spawner's child | ||||
|             "RemoteActorError: ('name_error_1'", | ||||
|             "NameError", | ||||
|         ]) | ||||
| 
 | ||||
|         if ctlc: | ||||
|             do_ctlc(child) | ||||
| 
 | ||||
|         child.sendline('c') | ||||
|         child.expect(PROMPT) | ||||
| 
 | ||||
|     # expect a root actor crash | ||||
|     assert_before(child, [ | ||||
|         "RemoteActorError: ('name_error'", | ||||
|         "NameError", | ||||
| 
 | ||||
|         # error from root actor and root task that created top level nursery | ||||
|         "Attaching to pdb in crashed actor: ('root'", | ||||
|         "AssertionError", | ||||
|     ]) | ||||
|     if ctlc: | ||||
|         do_ctlc(child) | ||||
| 
 | ||||
|     child.sendline('c') | ||||
|     child.expect(pexpect.EOF) | ||||
| 
 | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
|     assert_before(child, [ | ||||
|         # "Attaching to pdb in crashed actor: ('root'", | ||||
|         "Attaching to pdb in crashed actor: ('spawn_error'", | ||||
|         # boxed error from previous step | ||||
|         "RemoteActorError: ('name_error_1'", | ||||
|         "NameError", | ||||
|     ]) | ||||
| 
 | ||||
|     if ctlc: | ||||
|         do_ctlc(child) | ||||
| 
 | ||||
|     child.sendline('c') | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
|     assert_before(child, [ | ||||
|         "Attaching to pdb in crashed actor: ('root'", | ||||
|         # boxed error from previous step | ||||
|         "RemoteActorError: ('name_error'", | ||||
|         "NameError", | ||||
|         "AssertionError", | ||||
|         'assert 0', | ||||
|     ]) | ||||
| 
 | ||||
|     # warnings assert we probably don't need | ||||
|     # assert "Cancelling nursery in ('spawn_error'," in before | ||||
| 
 | ||||
|     if ctlc: | ||||
|         do_ctlc(child) | ||||
| 
 | ||||
|     # continue again | ||||
|     child.sendline('c') | ||||
|     child.expect(pexpect.EOF) | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
|     # error from root actor and root task that created top level nursery | ||||
|     assert "AssertionError" in before | ||||
| 
 | ||||
| 
 | ||||
| @has_nested_actors | ||||
| def test_multi_nested_subactors_error_through_nurseries( | ||||
|     spawn, | ||||
| 
 | ||||
|     # TODO: address debugger issue for nested tree: | ||||
|     # https://github.com/goodboy/tractor/issues/320 | ||||
|     # <issuelink> | ||||
|     # ctlc: bool, | ||||
| ): | ||||
|     """Verify deeply nested actors that error trigger debugger entries | ||||
|  | @ -772,51 +709,41 @@ def test_multi_nested_subactors_error_through_nurseries( | |||
| 
 | ||||
|     timed_out_early: bool = False | ||||
| 
 | ||||
|     for send_char in itertools.cycle(['c', 'q']): | ||||
|     for i in range(12): | ||||
|         try: | ||||
|             child.expect(PROMPT) | ||||
|             child.sendline(send_char) | ||||
|             time.sleep(0.01) | ||||
|             child.expect(r"\(Pdb\+\+\)") | ||||
|             child.sendline('c') | ||||
|             time.sleep(0.1) | ||||
| 
 | ||||
|         except EOF: | ||||
| 
 | ||||
|             # race conditions on how fast the continue is sent? | ||||
|             print(f"Failed early on {i}?") | ||||
|             timed_out_early = True | ||||
|             break | ||||
|     else: | ||||
|         child.expect(pexpect.EOF) | ||||
| 
 | ||||
|     assert_before(child, [ | ||||
| 
 | ||||
|         # boxed source errors | ||||
|         "NameError: name 'doggypants' is not defined", | ||||
|         "tractor._exceptions.RemoteActorError: ('name_error'", | ||||
|         "bdb.BdbQuit", | ||||
| 
 | ||||
|         # first level subtrees | ||||
|         "tractor._exceptions.RemoteActorError: ('spawner0'", | ||||
|         # "tractor._exceptions.RemoteActorError: ('spawner1'", | ||||
| 
 | ||||
|         # propagation of errors up through nested subtrees | ||||
|         "tractor._exceptions.RemoteActorError: ('spawn_until_0'", | ||||
|         "tractor._exceptions.RemoteActorError: ('spawn_until_1'", | ||||
|         "tractor._exceptions.RemoteActorError: ('spawn_until_2'", | ||||
|     ]) | ||||
|     if not timed_out_early: | ||||
|         before = str(child.before.decode()) | ||||
|         assert "NameError" in before | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.timeout(15) | ||||
| @has_nested_actors | ||||
| def test_root_nursery_cancels_before_child_releases_tty_lock( | ||||
|     spawn, | ||||
|     start_method, | ||||
|     ctlc: bool, | ||||
| ): | ||||
|     ''' | ||||
|     Test that when the root sends a cancel message before a nested child | ||||
|     has unblocked (which can happen when it has the tty lock and is | ||||
|     engaged in pdb) it is indeed cancelled after exiting the debugger. | ||||
| 
 | ||||
|     ''' | ||||
|     """Test that when the root sends a cancel message before a nested | ||||
|     child has unblocked (which can happen when it has the tty lock and | ||||
|     is engaged in pdb) it is indeed cancelled after exiting the debugger. | ||||
|     """ | ||||
|     timed_out_early = False | ||||
| 
 | ||||
|     child = spawn('root_cancelled_but_child_is_in_tty_lock') | ||||
| 
 | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
|     assert "NameError: name 'doggypants' is not defined" in before | ||||
|  | @ -831,7 +758,7 @@ def test_root_nursery_cancels_before_child_releases_tty_lock( | |||
|     for i in range(4): | ||||
|         time.sleep(0.5) | ||||
|         try: | ||||
|             child.expect(PROMPT) | ||||
|             child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|         except ( | ||||
|             EOF, | ||||
|  | @ -858,24 +785,22 @@ def test_root_nursery_cancels_before_child_releases_tty_lock( | |||
| 
 | ||||
|     for i in range(3): | ||||
|         try: | ||||
|             child.expect(pexpect.EOF, timeout=0.5) | ||||
|             child.expect(pexpect.EOF) | ||||
|             break | ||||
|         except TIMEOUT: | ||||
|             child.sendline('c') | ||||
|             time.sleep(0.1) | ||||
|             print('child was able to grab tty lock again?') | ||||
|     else: | ||||
|         print('giving up on child releasing, sending `quit` cmd') | ||||
|         child.sendline('q') | ||||
|         expect(child, EOF) | ||||
|         child.expect(pexpect.EOF) | ||||
| 
 | ||||
|     if not timed_out_early: | ||||
| 
 | ||||
|         before = str(child.before.decode()) | ||||
|         assert_before(child, [ | ||||
|             "tractor._exceptions.RemoteActorError: ('spawner0'", | ||||
|             "tractor._exceptions.RemoteActorError: ('name_error'", | ||||
|             "NameError: name 'doggypants' is not defined", | ||||
|         ]) | ||||
|         assert "tractor._exceptions.RemoteActorError: ('spawner0'" in before | ||||
|         assert "tractor._exceptions.RemoteActorError: ('name_error'" in before | ||||
|         assert "NameError: name 'doggypants' is not defined" in before | ||||
| 
 | ||||
| 
 | ||||
| def test_root_cancels_child_context_during_startup( | ||||
|  | @ -888,7 +813,7 @@ def test_root_cancels_child_context_during_startup( | |||
|     ''' | ||||
|     child = spawn('fast_error_in_root_after_spawn') | ||||
| 
 | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     before = str(child.before.decode()) | ||||
|     assert "AssertionError" in before | ||||
|  | @ -905,7 +830,7 @@ def test_different_debug_mode_per_actor( | |||
|     ctlc: bool, | ||||
| ): | ||||
|     child = spawn('per_actor_debug') | ||||
|     child.expect(PROMPT) | ||||
|     child.expect(r"\(Pdb\+\+\)") | ||||
| 
 | ||||
|     # only one actor should enter the debugger | ||||
|     before = str(child.before.decode()) | ||||
|  |  | |||
|  | @ -12,17 +12,17 @@ import shutil | |||
| 
 | ||||
| import pytest | ||||
| 
 | ||||
| from conftest import ( | ||||
|     examples_dir, | ||||
| ) | ||||
| from conftest import repodir | ||||
| 
 | ||||
| 
 | ||||
| def examples_dir(): | ||||
|     """Return the abspath to the examples directory. | ||||
|     """ | ||||
|     return os.path.join(repodir(), 'examples') | ||||
| 
 | ||||
| 
 | ||||
| @pytest.fixture | ||||
| def run_example_in_subproc( | ||||
|     loglevel: str, | ||||
|     testdir, | ||||
|     arb_addr: tuple[str, int], | ||||
| ): | ||||
| def run_example_in_subproc(loglevel, testdir, arb_addr): | ||||
| 
 | ||||
|     @contextmanager | ||||
|     def run(script_code): | ||||
|  | @ -32,8 +32,8 @@ def run_example_in_subproc( | |||
|             # on windows we need to create a special __main__.py which will | ||||
|             # be executed with ``python -m <modulename>`` on windows.. | ||||
|             shutil.copyfile( | ||||
|                 examples_dir() / '__main__.py', | ||||
|                 str(testdir / '__main__.py'), | ||||
|                 os.path.join(examples_dir(), '__main__.py'), | ||||
|                 os.path.join(str(testdir), '__main__.py') | ||||
|             ) | ||||
| 
 | ||||
|             # drop the ``if __name__ == '__main__'`` guard onwards from | ||||
|  | @ -88,7 +88,6 @@ def run_example_in_subproc( | |||
|         and f[0] != '_' | ||||
|         and 'debugging' not in p[0] | ||||
|         and 'integration' not in p[0] | ||||
|         and 'advanced_faults' not in p[0] | ||||
|     ], | ||||
| 
 | ||||
|     ids=lambda t: t[1], | ||||
|  |  | |||
|  | @ -8,7 +8,6 @@ import builtins | |||
| import itertools | ||||
| import importlib | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
|  | @ -171,11 +170,11 @@ async def trio_ctx( | |||
|     # message. | ||||
|     with trio.fail_after(2): | ||||
|         async with ( | ||||
|             trio.open_nursery() as n, | ||||
| 
 | ||||
|             tractor.to_asyncio.open_channel_from( | ||||
|                 sleep_and_err, | ||||
|             ) as (first, chan), | ||||
| 
 | ||||
|             trio.open_nursery() as n, | ||||
|         ): | ||||
| 
 | ||||
|             assert first == 'start' | ||||
|  | @ -204,25 +203,24 @@ def test_context_spawns_aio_task_that_errors( | |||
|     ''' | ||||
|     async def main(): | ||||
| 
 | ||||
|         with trio.fail_after(2): | ||||
|             async with tractor.open_nursery() as n: | ||||
|                 p = await n.start_actor( | ||||
|                     'aio_daemon', | ||||
|                     enable_modules=[__name__], | ||||
|                     infect_asyncio=True, | ||||
|                     # debug_mode=True, | ||||
|                     loglevel='cancel', | ||||
|                 ) | ||||
|                 async with p.open_context( | ||||
|                     trio_ctx, | ||||
|                 ) as (ctx, first): | ||||
|         async with tractor.open_nursery() as n: | ||||
|             p = await n.start_actor( | ||||
|                 'aio_daemon', | ||||
|                 enable_modules=[__name__], | ||||
|                 infect_asyncio=True, | ||||
|                 # debug_mode=True, | ||||
|                 loglevel='cancel', | ||||
|             ) | ||||
|             async with p.open_context( | ||||
|                 trio_ctx, | ||||
|             ) as (ctx, first): | ||||
| 
 | ||||
|                     assert first == 'start' | ||||
|                 assert first == 'start' | ||||
| 
 | ||||
|                     if parent_cancels: | ||||
|                         await p.cancel_actor() | ||||
|                 if parent_cancels: | ||||
|                     await p.cancel_actor() | ||||
| 
 | ||||
|                     await trio.sleep_forever() | ||||
|                 await trio.sleep_forever() | ||||
| 
 | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
|  | @ -410,12 +408,11 @@ def test_trio_error_cancels_intertask_chan(arb_addr): | |||
|             # should trigger remote actor error | ||||
|             await portal.result() | ||||
| 
 | ||||
|     with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     # ensure boxed errors | ||||
|     for exc in excinfo.value.exceptions: | ||||
|         assert exc.type == Exception | ||||
|     # ensure boxed error is correct | ||||
|     assert excinfo.value.type == Exception | ||||
| 
 | ||||
| 
 | ||||
| def test_trio_closes_early_and_channel_exits(arb_addr): | ||||
|  | @ -444,12 +441,11 @@ def test_aio_errors_and_channel_propagates_and_closes(arb_addr): | |||
|             # should trigger remote actor error | ||||
|             await portal.result() | ||||
| 
 | ||||
|     with pytest.raises(BaseExceptionGroup) as excinfo: | ||||
|     with pytest.raises(RemoteActorError) as excinfo: | ||||
|         trio.run(main) | ||||
| 
 | ||||
|     # ensure boxed errors | ||||
|     for exc in excinfo.value.exceptions: | ||||
|         assert exc.type == Exception | ||||
|     # ensure boxed error is correct | ||||
|     assert excinfo.value.type == Exception | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
|  |  | |||
|  | @ -11,18 +11,25 @@ from conftest import tractor_test | |||
| 
 | ||||
| 
 | ||||
| @pytest.mark.trio | ||||
| async def test_no_runtime(): | ||||
| async def test_no_arbitter(): | ||||
|     """An arbitter must be established before any nurseries | ||||
|     can be created. | ||||
| 
 | ||||
|     (In other words ``tractor.open_root_actor()`` must be engaged at | ||||
|     some point?) | ||||
|     """ | ||||
|     with pytest.raises(RuntimeError) : | ||||
|         async with tractor.find_actor('doggy'): | ||||
|     with pytest.raises(RuntimeError): | ||||
|         with tractor.open_nursery(): | ||||
|             pass | ||||
| 
 | ||||
| 
 | ||||
| def test_no_main(): | ||||
|     """An async function **must** be passed to ``tractor.run()``. | ||||
|     """ | ||||
|     with pytest.raises(TypeError): | ||||
|         tractor.run(None) | ||||
| 
 | ||||
| 
 | ||||
| @tractor_test | ||||
| async def test_self_is_registered(arb_addr): | ||||
|     "Verify waiting on the arbiter to register itself using the standard api." | ||||
|  |  | |||
|  | @ -4,10 +4,9 @@ from itertools import cycle | |||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| from tractor.testing import tractor_test | ||||
| from tractor.experimental import msgpub | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| def test_type_checks(): | ||||
| 
 | ||||
|  |  | |||
|  | @ -1,73 +0,0 @@ | |||
| """ | ||||
| Verifying internal runtime state and undocumented extras. | ||||
| 
 | ||||
| """ | ||||
| import os | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| _file_path: str = '' | ||||
| 
 | ||||
| 
 | ||||
| def unlink_file(): | ||||
|     print('Removing tmp file!') | ||||
|     os.remove(_file_path) | ||||
| 
 | ||||
| 
 | ||||
| async def crash_and_clean_tmpdir( | ||||
|     tmp_file_path: str, | ||||
|     error: bool = True, | ||||
| ): | ||||
|     global _file_path | ||||
|     _file_path = tmp_file_path | ||||
| 
 | ||||
|     actor = tractor.current_actor() | ||||
|     actor.lifetime_stack.callback(unlink_file) | ||||
| 
 | ||||
|     assert os.path.isfile(tmp_file_path) | ||||
|     await trio.sleep(0.1) | ||||
|     if error: | ||||
|         assert 0 | ||||
|     else: | ||||
|         actor.cancel_soon() | ||||
| 
 | ||||
| 
 | ||||
| @pytest.mark.parametrize( | ||||
|     'error_in_child', | ||||
|     [True, False], | ||||
| ) | ||||
| @tractor_test | ||||
| async def test_lifetime_stack_wipes_tmpfile( | ||||
|     tmp_path, | ||||
|     error_in_child: bool, | ||||
| ): | ||||
|     child_tmp_file = tmp_path / "child.txt" | ||||
|     child_tmp_file.touch() | ||||
|     assert child_tmp_file.exists() | ||||
|     path = str(child_tmp_file) | ||||
| 
 | ||||
|     try: | ||||
|         with trio.move_on_after(0.5): | ||||
|             async with tractor.open_nursery() as n: | ||||
|                     await (  # inlined portal | ||||
|                         await n.run_in_actor( | ||||
|                             crash_and_clean_tmpdir, | ||||
|                             tmp_file_path=path, | ||||
|                             error=error_in_child, | ||||
|                         ) | ||||
|                     ).result() | ||||
| 
 | ||||
|     except ( | ||||
|         tractor.RemoteActorError, | ||||
|         tractor.BaseExceptionGroup, | ||||
|     ): | ||||
|         pass | ||||
| 
 | ||||
|     # tmp file should have been wiped by | ||||
|     # teardown stack. | ||||
|     assert not child_tmp_file.exists() | ||||
|  | @ -1,8 +1,7 @@ | |||
| """ | ||||
| Spawning basics | ||||
| 
 | ||||
| """ | ||||
| from typing import Optional | ||||
| from typing import Dict, Tuple, Optional | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
|  | @ -15,8 +14,8 @@ data_to_pass_down = {'doggy': 10, 'kitty': 4} | |||
| 
 | ||||
| async def spawn( | ||||
|     is_arbiter: bool, | ||||
|     data: dict, | ||||
|     arb_addr: tuple[str, int], | ||||
|     data: Dict, | ||||
|     arb_addr: Tuple[str, int], | ||||
| ): | ||||
|     namespaces = [__name__] | ||||
| 
 | ||||
|  | @ -142,7 +141,7 @@ def test_loglevel_propagated_to_subactor( | |||
|     capfd, | ||||
|     arb_addr, | ||||
| ): | ||||
|     if start_method == 'mp_forkserver': | ||||
|     if start_method == 'forkserver': | ||||
|         pytest.skip( | ||||
|             "a bug with `capfd` seems to make forkserver capture not work?") | ||||
| 
 | ||||
|  |  | |||
|  | @ -7,10 +7,9 @@ import platform | |||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| from tractor.testing import tractor_test | ||||
| import pytest | ||||
| 
 | ||||
| from conftest import tractor_test | ||||
| 
 | ||||
| 
 | ||||
| def test_must_define_ctx(): | ||||
| 
 | ||||
|  | @ -251,7 +250,7 @@ def test_a_quadruple_example(time_quad_ex, ci_env, spawn_backend): | |||
| 
 | ||||
|     results, diff = time_quad_ex | ||||
|     assert results | ||||
|     this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 3 | ||||
|     this_fast = 6 if platform.system() in ('Windows', 'Darwin') else 2.666 | ||||
|     assert diff < this_fast | ||||
| 
 | ||||
| 
 | ||||
|  | @ -6,16 +6,13 @@ from contextlib import asynccontextmanager | |||
| from functools import partial | ||||
| from itertools import cycle | ||||
| import time | ||||
| from typing import Optional | ||||
| from typing import Optional, List, Tuple | ||||
| 
 | ||||
| import pytest | ||||
| import trio | ||||
| from trio.lowlevel import current_task | ||||
| import tractor | ||||
| from tractor.trionics import ( | ||||
|     broadcast_receiver, | ||||
|     Lagged, | ||||
| ) | ||||
| from tractor.trionics import broadcast_receiver, Lagged | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
|  | @ -40,7 +37,7 @@ async def echo_sequences( | |||
| 
 | ||||
| async def ensure_sequence( | ||||
| 
 | ||||
|     stream: tractor.MsgStream, | ||||
|     stream: tractor.ReceiveMsgStream, | ||||
|     sequence: list, | ||||
|     delay: Optional[float] = None, | ||||
| 
 | ||||
|  | @ -65,8 +62,8 @@ async def ensure_sequence( | |||
| @asynccontextmanager | ||||
| async def open_sequence_streamer( | ||||
| 
 | ||||
|     sequence: list[int], | ||||
|     arb_addr: tuple[str, int], | ||||
|     sequence: List[int], | ||||
|     arb_addr: Tuple[str, int], | ||||
|     start_method: str, | ||||
| 
 | ||||
| ) -> tractor.MsgStream: | ||||
|  | @ -214,8 +211,7 @@ def test_faster_task_to_recv_is_cancelled_by_slower( | |||
|     arb_addr, | ||||
|     start_method, | ||||
| ): | ||||
|     ''' | ||||
|     Ensure that if a faster task consuming from a stream is cancelled | ||||
|     '''Ensure that if a faster task consuming from a stream is cancelled | ||||
|     the slower task can continue to receive all expected values. | ||||
| 
 | ||||
|     ''' | ||||
|  | @ -464,51 +460,3 @@ def test_first_recver_is_cancelled(): | |||
|                     assert value == 1 | ||||
| 
 | ||||
|     trio.run(main) | ||||
| 
 | ||||
| 
 | ||||
| def test_no_raise_on_lag(): | ||||
|     ''' | ||||
|     Run a simple 2-task broadcast where one task is slow but configured | ||||
|     so that it does not raise `Lagged` on overruns using | ||||
|     `raise_on_lasg=False` and verify that the task does not raise. | ||||
| 
 | ||||
|     ''' | ||||
|     size = 100 | ||||
|     tx, rx = trio.open_memory_channel(size) | ||||
|     brx = broadcast_receiver(rx, size) | ||||
| 
 | ||||
|     async def slow(): | ||||
|         async with brx.subscribe( | ||||
|             raise_on_lag=False, | ||||
|         ) as br: | ||||
|             async for msg in br: | ||||
|                 print(f'slow task got: {msg}') | ||||
|                 await trio.sleep(0.1) | ||||
| 
 | ||||
|     async def fast(): | ||||
|         async with brx.subscribe() as br: | ||||
|             async for msg in br: | ||||
|                 print(f'fast task got: {msg}') | ||||
| 
 | ||||
|     async def main(): | ||||
|         async with ( | ||||
|             tractor.open_root_actor( | ||||
|                 # NOTE: so we see the warning msg emitted by the bcaster | ||||
|                 # internals when the no raise flag is set. | ||||
|                 loglevel='warning', | ||||
|             ), | ||||
|             trio.open_nursery() as n, | ||||
|         ): | ||||
|             n.start_soon(slow) | ||||
|             n.start_soon(fast) | ||||
| 
 | ||||
|             for i in range(1000): | ||||
|                 await tx.send(i) | ||||
| 
 | ||||
|             # simulate user nailing ctl-c after realizing | ||||
|             # there's a lag in the slow task. | ||||
|             await trio.sleep(1) | ||||
|             raise KeyboardInterrupt | ||||
| 
 | ||||
|     with pytest.raises(KeyboardInterrupt): | ||||
|         trio.run(main) | ||||
|  |  | |||
|  | @ -0,0 +1,7 @@ | |||
| [tool.towncrier] | ||||
| package = "tractor" | ||||
| filename = "NEWS.rst" | ||||
| directory = "nooz/" | ||||
| title_format = "tractor {version} ({project_date})" | ||||
| version = "0.1.0a4" | ||||
| template = "nooz/_template.rst" | ||||
|  | @ -18,12 +18,13 @@ | |||
| tractor: structured concurrent "actors". | ||||
| 
 | ||||
| """ | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| from trio import MultiError | ||||
| 
 | ||||
| from ._clustering import open_actor_cluster | ||||
| from ._ipc import Channel | ||||
| from ._streaming import ( | ||||
|     Context, | ||||
|     ReceiveMsgStream, | ||||
|     MsgStream, | ||||
|     stream, | ||||
|     context, | ||||
|  | @ -35,37 +36,27 @@ from ._discovery import ( | |||
|     query_actor, | ||||
| ) | ||||
| from ._supervise import open_nursery | ||||
| from ._state import ( | ||||
|     current_actor, | ||||
|     is_root_process, | ||||
| ) | ||||
| from ._state import current_actor, is_root_process | ||||
| from ._exceptions import ( | ||||
|     RemoteActorError, | ||||
|     ModuleNotExposed, | ||||
|     ContextCancelled, | ||||
| ) | ||||
| from ._debug import ( | ||||
|     breakpoint, | ||||
|     post_mortem, | ||||
| ) | ||||
| from ._debug import breakpoint, post_mortem | ||||
| from . import msg | ||||
| from ._root import ( | ||||
|     run_daemon, | ||||
|     open_root_actor, | ||||
| ) | ||||
| from ._root import run, run_daemon, open_root_actor | ||||
| from ._portal import Portal | ||||
| from ._runtime import Actor | ||||
| 
 | ||||
| 
 | ||||
| __all__ = [ | ||||
|     'Actor', | ||||
|     'Channel', | ||||
|     'Context', | ||||
|     'ContextCancelled', | ||||
|     'ModuleNotExposed', | ||||
|     'MsgStream', | ||||
|     'BaseExceptionGroup', | ||||
|     'MultiError', | ||||
|     'Portal', | ||||
|     'ReceiveMsgStream', | ||||
|     'RemoteActorError', | ||||
|     'breakpoint', | ||||
|     'context', | ||||
|  | @ -79,6 +70,7 @@ __all__ = [ | |||
|     'open_root_actor', | ||||
|     'post_mortem', | ||||
|     'query_actor', | ||||
|     'run', | ||||
|     'run_daemon', | ||||
|     'stream', | ||||
|     'to_asyncio', | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -24,7 +24,7 @@ import argparse | |||
| 
 | ||||
| from ast import literal_eval | ||||
| 
 | ||||
| from ._runtime import Actor | ||||
| from ._actor import Actor | ||||
| from ._entry import _trio_main | ||||
| 
 | ||||
| 
 | ||||
|  |  | |||
|  | @ -32,12 +32,9 @@ import tractor | |||
| async def open_actor_cluster( | ||||
|     modules: list[str], | ||||
|     count: int = cpu_count(), | ||||
|     names: list[str] | None = None, | ||||
|     names: Optional[list[str]] = None, | ||||
|     start_method: Optional[str] = None, | ||||
|     hard_kill: bool = False, | ||||
| 
 | ||||
|     # passed through verbatim to ``open_root_actor()`` | ||||
|     **runtime_kwargs, | ||||
| 
 | ||||
| ) -> AsyncGenerator[ | ||||
|     dict[str, tractor.Portal], | ||||
|     None, | ||||
|  | @ -52,9 +49,7 @@ async def open_actor_cluster( | |||
|         raise ValueError( | ||||
|             'Number of names is {len(names)} but count it {count}') | ||||
| 
 | ||||
|     async with tractor.open_nursery( | ||||
|         **runtime_kwargs, | ||||
|     ) as an: | ||||
|     async with tractor.open_nursery(start_method=start_method) as an: | ||||
|         async with trio.open_nursery() as n: | ||||
|             uid = tractor.current_actor().uid | ||||
| 
 | ||||
|  |  | |||
|  | @ -20,16 +20,12 @@ Multi-core debugging for da peeps! | |||
| """ | ||||
| from __future__ import annotations | ||||
| import bdb | ||||
| import os | ||||
| import sys | ||||
| import signal | ||||
| from functools import ( | ||||
|     partial, | ||||
|     cached_property, | ||||
| ) | ||||
| from functools import partial | ||||
| from contextlib import asynccontextmanager as acm | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Tuple, | ||||
|     Optional, | ||||
|     Callable, | ||||
|     AsyncIterator, | ||||
|  | @ -37,23 +33,27 @@ from typing import ( | |||
| ) | ||||
| from types import FrameType | ||||
| 
 | ||||
| import pdbp | ||||
| import tractor | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| 
 | ||||
| from .log import get_logger | ||||
| from ._discovery import get_root | ||||
| from ._state import ( | ||||
|     is_root_process, | ||||
|     debug_mode, | ||||
| ) | ||||
| from ._exceptions import ( | ||||
|     is_multi_cancelled, | ||||
|     ContextCancelled, | ||||
| ) | ||||
| from ._state import is_root_process, debug_mode | ||||
| from ._exceptions import is_multi_cancelled | ||||
| from ._ipc import Channel | ||||
| 
 | ||||
| 
 | ||||
| try: | ||||
|     # wtf: only exported when installed in dev mode? | ||||
|     import pdbpp | ||||
| except ImportError: | ||||
|     # pdbpp is installed in regular mode...it monkey patches stuff | ||||
|     import pdb | ||||
|     xpm = getattr(pdb, 'xpm', None) | ||||
|     assert xpm, "pdbpp is not installed?"  # type: ignore | ||||
|     pdbpp = pdb | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -67,31 +67,14 @@ class Lock: | |||
|     Mostly to avoid a lot of ``global`` declarations for now XD. | ||||
| 
 | ||||
|     ''' | ||||
|     repl: MultiActorPdb | None = None | ||||
|     # placeholder for function to set a ``trio.Event`` on debugger exit | ||||
|     # pdb_release_hook: Optional[Callable] = None | ||||
| 
 | ||||
|     _trio_handler: Callable[ | ||||
|         [int, Optional[FrameType]], Any | ||||
|     ] | int | None = None | ||||
|     pdb_release_hook: Optional[Callable] = None | ||||
| 
 | ||||
|     # actor-wide variable pointing to current task name using debugger | ||||
|     local_task_in_debug: str | None = None | ||||
| 
 | ||||
|     # NOTE: set by the current task waiting on the root tty lock from | ||||
|     # the CALLER side of the `lock_tty_for_child()` context entry-call | ||||
|     # and must be cancelled if this actor is cancelled via IPC | ||||
|     # request-message otherwise deadlocks with the parent actor may | ||||
|     # ensure | ||||
|     _debugger_request_cs: Optional[trio.CancelScope] = None | ||||
| 
 | ||||
|     # NOTE: set only in the root actor for the **local** root spawned task | ||||
|     # which has acquired the lock (i.e. this is on the callee side of | ||||
|     # the `lock_tty_for_child()` context entry). | ||||
|     _root_local_task_cs_in_debug: Optional[trio.CancelScope] = None | ||||
|     local_task_in_debug: Optional[str] = None | ||||
| 
 | ||||
|     # actor tree-wide actor uid that supposedly has the tty lock | ||||
|     global_actor_in_debug: Optional[tuple[str, str]] = None | ||||
|     global_actor_in_debug: Optional[Tuple[str, str]] = None | ||||
| 
 | ||||
|     local_pdb_complete: Optional[trio.Event] = None | ||||
|     no_remote_has_tty: Optional[trio.Event] = None | ||||
|  | @ -99,26 +82,39 @@ class Lock: | |||
|     # lock in root actor preventing multi-access to local tty | ||||
|     _debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock() | ||||
| 
 | ||||
|     # XXX: set by the current task waiting on the root tty lock | ||||
|     # and must be cancelled if this actor is cancelled via message | ||||
|     # otherwise deadlocks with the parent actor may ensure | ||||
|     _debugger_request_cs: Optional[trio.CancelScope] = None | ||||
| 
 | ||||
|     _orig_sigint_handler: Optional[Callable] = None | ||||
|     _blocked: set[tuple[str, str]] = set() | ||||
| 
 | ||||
|     @classmethod | ||||
|     def shield_sigint(cls): | ||||
|         cls._orig_sigint_handler = signal.signal( | ||||
|             signal.SIGINT, | ||||
|             shield_sigint_handler, | ||||
|         ) | ||||
|                 signal.SIGINT, | ||||
|                 shield_sigint, | ||||
|             ) | ||||
| 
 | ||||
|     @classmethod | ||||
|     def unshield_sigint(cls): | ||||
|         # always restore ``trio``'s sigint handler. see notes below in | ||||
|         # the pdb factory about the nightmare that is that code swapping | ||||
|         # out the handler when the repl activates... | ||||
|         signal.signal(signal.SIGINT, cls._trio_handler) | ||||
|         if cls._orig_sigint_handler is not None: | ||||
|             # restore original sigint handler | ||||
|             signal.signal( | ||||
|                 signal.SIGINT, | ||||
|                 cls._orig_sigint_handler | ||||
|             ) | ||||
| 
 | ||||
|         cls._orig_sigint_handler = None | ||||
| 
 | ||||
|     @classmethod | ||||
|     def release(cls): | ||||
|     def maybe_release(cls): | ||||
|         cls.local_task_in_debug = None | ||||
|         if cls.pdb_release_hook: | ||||
|             cls.pdb_release_hook() | ||||
| 
 | ||||
|     @classmethod | ||||
|     def root_release(cls): | ||||
|         try: | ||||
|             cls._debug_lock.release() | ||||
|         except RuntimeError: | ||||
|  | @ -129,41 +125,32 @@ class Lock: | |||
|             if owner: | ||||
|                 raise | ||||
| 
 | ||||
|         # actor-local state, irrelevant for non-root. | ||||
|         cls.global_actor_in_debug = None | ||||
|         cls.local_task_in_debug = None | ||||
| 
 | ||||
|         try: | ||||
|             # sometimes the ``trio`` might already be terminated in | ||||
|             # which case this call will raise. | ||||
|             if cls.local_pdb_complete is not None: | ||||
|                 cls.local_pdb_complete.set() | ||||
|             cls.local_pdb_complete.set() | ||||
|         finally: | ||||
|             # restore original sigint handler | ||||
|             cls.unshield_sigint() | ||||
|             cls.repl = None | ||||
| 
 | ||||
| 
 | ||||
| class TractorConfig(pdbp.DefaultConfig): | ||||
| class TractorConfig(pdbpp.DefaultConfig): | ||||
|     """Custom ``pdbpp`` goodness. | ||||
|     """ | ||||
|     use_pygments = True | ||||
|     # sticky_by_default = True | ||||
|     enable_hidden_frames = False | ||||
| 
 | ||||
| 
 | ||||
| class MultiActorPdb(pdbpp.Pdb): | ||||
|     ''' | ||||
|     Custom ``pdbp`` goodness :surfer: | ||||
|     Add teardown hooks to the regular ``pdbpp.Pdb``. | ||||
| 
 | ||||
|     ''' | ||||
|     use_pygments: bool = True | ||||
|     sticky_by_default: bool = False | ||||
|     enable_hidden_frames: bool = False | ||||
| 
 | ||||
|     # much thanks @mdmintz for the hot tip! | ||||
|     # fixes line spacing issue when resizing terminal B) | ||||
|     truncate_long_lines: bool = False | ||||
| 
 | ||||
| 
 | ||||
| class MultiActorPdb(pdbp.Pdb): | ||||
|     ''' | ||||
|     Add teardown hooks to the regular ``pdbp.Pdb``. | ||||
| 
 | ||||
|     ''' | ||||
|     # override the pdbp config with our coolio one | ||||
|     # override the pdbpp config with our coolio one | ||||
|     DefaultConfig = TractorConfig | ||||
| 
 | ||||
|     # def preloop(self): | ||||
|  | @ -176,47 +163,54 @@ class MultiActorPdb(pdbp.Pdb): | |||
|         try: | ||||
|             super().set_continue() | ||||
|         finally: | ||||
|             Lock.release() | ||||
|             Lock.maybe_release() | ||||
| 
 | ||||
|     def set_quit(self): | ||||
|         try: | ||||
|             super().set_quit() | ||||
|         finally: | ||||
|             Lock.release() | ||||
|             Lock.maybe_release() | ||||
| 
 | ||||
|     # XXX NOTE: we only override this because apparently the stdlib pdb | ||||
|     # bois likes to touch the SIGINT handler as much as i like to touch | ||||
|     # my d$%&. | ||||
|     def _cmdloop(self): | ||||
|         self.cmdloop() | ||||
| 
 | ||||
|     @cached_property | ||||
|     def shname(self) -> str | None: | ||||
|         ''' | ||||
|         Attempt to return the login shell name with a special check for | ||||
|         the infamous `xonsh` since it seems to have some issues much | ||||
|         different from std shells when it comes to flushing the prompt? | ||||
| # TODO: will be needed whenever we get to true remote debugging. | ||||
| # XXX see https://github.com/goodboy/tractor/issues/130 | ||||
| 
 | ||||
|         ''' | ||||
|         # SUPER HACKY and only really works if `xonsh` is not used | ||||
|         # before spawning further sub-shells.. | ||||
|         shpath = os.getenv('SHELL', None) | ||||
| # # TODO: is there some way to determine this programatically? | ||||
| # _pdb_exit_patterns = tuple( | ||||
| #     str.encode(patt + "\n") for patt in ( | ||||
| #         'c', 'cont', 'continue', 'q', 'quit') | ||||
| # ) | ||||
| 
 | ||||
|         if shpath: | ||||
|             if ( | ||||
|                 os.getenv('XONSH_LOGIN', default=False) | ||||
|                 or 'xonsh' in shpath | ||||
|             ): | ||||
|                 return 'xonsh' | ||||
| # def subactoruid2proc( | ||||
| #     actor: 'Actor',  # noqa | ||||
| #     uid: Tuple[str, str] | ||||
| # ) -> trio.Process: | ||||
| #     n = actor._actoruid2nursery[uid] | ||||
| #     _, proc, _ = n._children[uid] | ||||
| #     return proc | ||||
| 
 | ||||
|             return os.path.basename(shpath) | ||||
| # async def hijack_stdin(): | ||||
| #     log.info(f"Hijacking stdin from {actor.uid}") | ||||
| 
 | ||||
|         return None | ||||
| #     trap std in and relay to subproc | ||||
| #     async_stdin = trio.wrap_file(sys.stdin) | ||||
| 
 | ||||
| #     async with aclosing(async_stdin): | ||||
| #         async for msg in async_stdin: | ||||
| #             log.runtime(f"Stdin input:\n{msg}") | ||||
| #             # encode to bytes | ||||
| #             bmsg = str.encode(msg) | ||||
| 
 | ||||
| #             # relay bytes to subproc over pipe | ||||
| #             # await proc.stdin.send_all(bmsg) | ||||
| 
 | ||||
| #             if bmsg in _pdb_exit_patterns: | ||||
| #                 log.info("Closing stdin hijack") | ||||
| #                 break | ||||
| 
 | ||||
| @acm | ||||
| async def _acquire_debug_lock_from_root_task( | ||||
|     uid: tuple[str, str] | ||||
| async def _acquire_debug_lock( | ||||
|     uid: Tuple[str, str] | ||||
| 
 | ||||
| ) -> AsyncIterator[trio.StrictFIFOLock]: | ||||
|     ''' | ||||
|  | @ -241,12 +235,6 @@ async def _acquire_debug_lock_from_root_task( | |||
|             f"entering lock checkpoint, remote task: {task_name}:{uid}" | ||||
|         ) | ||||
|         we_acquired = True | ||||
| 
 | ||||
|         # NOTE: if the surrounding cancel scope from the | ||||
|         # `lock_tty_for_child()` caller is cancelled, this line should | ||||
|         # unblock and NOT leave us in some kind of | ||||
|         # a "child-locked-TTY-but-child-is-uncontactable-over-IPC" | ||||
|         # condition. | ||||
|         await Lock._debug_lock.acquire() | ||||
| 
 | ||||
|         if Lock.no_remote_has_tty is None: | ||||
|  | @ -263,7 +251,7 @@ async def _acquire_debug_lock_from_root_task( | |||
|         # IF we received a cancel during the shielded lock entry of some | ||||
|         # next-in-queue requesting task, then the resumption here will | ||||
|         # result in that ``trio.Cancelled`` being raised to our caller | ||||
|         # (likely from ``lock_tty_for_child()`` below)!  In | ||||
|         # (likely from ``_hijack_stdin_for_child()`` below)!  In | ||||
|         # this case the ``finally:`` below should trigger and the | ||||
|         # surrounding caller side context should cancel normally | ||||
|         # relaying back to the caller. | ||||
|  | @ -271,6 +259,8 @@ async def _acquire_debug_lock_from_root_task( | |||
|         yield Lock._debug_lock | ||||
| 
 | ||||
|     finally: | ||||
|         # if Lock.global_actor_in_debug == uid: | ||||
| 
 | ||||
|         if ( | ||||
|             we_acquired | ||||
|             and Lock._debug_lock.locked() | ||||
|  | @ -299,34 +289,24 @@ async def _acquire_debug_lock_from_root_task( | |||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
| async def lock_tty_for_child( | ||||
| async def _hijack_stdin_for_child( | ||||
| 
 | ||||
|     ctx: tractor.Context, | ||||
|     subactor_uid: tuple[str, str] | ||||
|     subactor_uid: Tuple[str, str] | ||||
| 
 | ||||
| ) -> str: | ||||
|     ''' | ||||
|     Lock the TTY in the root process of an actor tree in a new | ||||
|     inter-actor-context-task such that the ``pdbp`` debugger console | ||||
|     can be mutex-allocated to the calling sub-actor for REPL control | ||||
|     without interference by other processes / threads. | ||||
|     Hijack the tty in the root process of an actor tree such that | ||||
|     the pdbpp debugger console can be allocated to a sub-actor for repl | ||||
|     bossing. | ||||
| 
 | ||||
|     NOTE: this task must be invoked in the root process of the actor | ||||
|     tree. It is meant to be invoked as an rpc-task and should be | ||||
|     highly reliable at releasing the mutex complete! | ||||
|     NOTE: this task is invoked in the root actor-process of the actor | ||||
|     tree. It is meant to be invoked as an rpc-task which should be | ||||
|     highly reliable at cleaning out the tty-lock state when complete! | ||||
| 
 | ||||
|     ''' | ||||
|     task_name = trio.lowlevel.current_task().name | ||||
| 
 | ||||
|     if tuple(subactor_uid) in Lock._blocked: | ||||
|         log.warning( | ||||
|             f'Actor {subactor_uid} is blocked from acquiring debug lock\n' | ||||
|             f"remote task: {task_name}:{subactor_uid}" | ||||
|         ) | ||||
|         ctx._enter_debugger_on_cancel = False | ||||
|         await ctx.cancel(f'Debug lock blocked for {subactor_uid}') | ||||
|         return 'pdb_lock_blocked' | ||||
| 
 | ||||
|     # TODO: when we get to true remote debugging | ||||
|     # this will deliver stdin data? | ||||
| 
 | ||||
|  | @ -340,10 +320,11 @@ async def lock_tty_for_child( | |||
| 
 | ||||
|     try: | ||||
|         with ( | ||||
|             trio.CancelScope(shield=True) as debug_lock_cs, | ||||
|             trio.CancelScope(shield=True), | ||||
|         ): | ||||
|             Lock._root_local_task_cs_in_debug = debug_lock_cs | ||||
|             async with _acquire_debug_lock_from_root_task(subactor_uid): | ||||
|             # try: | ||||
|             # lock = None | ||||
|             async with _acquire_debug_lock(subactor_uid):  # as lock: | ||||
| 
 | ||||
|                 # indicate to child that we've locked stdio | ||||
|                 await ctx.started('Locked') | ||||
|  | @ -355,29 +336,55 @@ async def lock_tty_for_child( | |||
|                 async with ctx.open_stream() as stream: | ||||
|                     assert await stream.receive() == 'pdb_unlock' | ||||
| 
 | ||||
|             # except ( | ||||
|             #     BaseException, | ||||
|             #     # trio.MultiError, | ||||
|             #     # Exception, | ||||
|             #     # trio.BrokenResourceError, | ||||
|             #     # trio.Cancelled,  # by local cancellation | ||||
|             #     # trio.ClosedResourceError,  # by self._rx_chan | ||||
|             #     # ContextCancelled, | ||||
|             #     # ConnectionResetError, | ||||
|             # ): | ||||
|             #     # XXX: there may be a race with the portal teardown | ||||
|             #     # with the calling actor which we can safely ignore. | ||||
|             #     # The alternative would be sending an ack message | ||||
|             #     # and allowing the client to wait for us to teardown | ||||
|             #     # first? | ||||
|             #     if lock and lock.locked(): | ||||
|             #         try: | ||||
|             #             lock.release() | ||||
|             #         except RuntimeError: | ||||
|             #             log.exception(f"we don't own the tty lock?") | ||||
| 
 | ||||
|             #     # if isinstance(err, trio.Cancelled): | ||||
|             #     raise | ||||
| 
 | ||||
|             # finally: | ||||
|             #     log.runtime( | ||||
|             #         "TTY lock released, remote task:" | ||||
|             #         f"{task_name}:{subactor_uid}" | ||||
|             #     ) | ||||
| 
 | ||||
|         return "pdb_unlock_complete" | ||||
| 
 | ||||
|     finally: | ||||
|         Lock._root_local_task_cs_in_debug = None | ||||
|         Lock.unshield_sigint() | ||||
| 
 | ||||
| 
 | ||||
| async def wait_for_parent_stdin_hijack( | ||||
|     actor_uid: tuple[str, str], | ||||
|     actor_uid: Tuple[str, str], | ||||
|     task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED | ||||
| ): | ||||
|     ''' | ||||
|     Connect to the root actor via a ``Context`` and invoke a task which | ||||
|     locks a root-local TTY lock: ``lock_tty_for_child()``; this func | ||||
|     should be called in a new task from a child actor **and never the | ||||
|     root*. | ||||
|     Connect to the root actor via a ctx and invoke a task which locks | ||||
|     a root-local TTY lock. | ||||
| 
 | ||||
|     This function is used by any sub-actor to acquire mutex access to | ||||
|     the ``pdb`` REPL and thus the root's TTY for interactive debugging | ||||
|     (see below inside ``_breakpoint()``). It can be used to ensure that | ||||
|     an intermediate nursery-owning actor does not clobber its children | ||||
|     if they are in debug (see below inside | ||||
|     ``maybe_wait_for_debugger()``). | ||||
|     pdb and the root's TTY for interactive debugging (see below inside | ||||
|     ``_breakpoint()``). It can be used to ensure that an intermediate | ||||
|     nursery-owning actor does not clobber its children if they are in | ||||
|     debug (see below inside ``maybe_wait_for_debugger()``). | ||||
| 
 | ||||
|     ''' | ||||
|     with trio.CancelScope(shield=True) as cs: | ||||
|  | @ -389,12 +396,12 @@ async def wait_for_parent_stdin_hijack( | |||
|                 # this syncs to child's ``Context.started()`` call. | ||||
|                 async with portal.open_context( | ||||
| 
 | ||||
|                     tractor._debug.lock_tty_for_child, | ||||
|                     tractor._debug._hijack_stdin_for_child, | ||||
|                     subactor_uid=actor_uid, | ||||
| 
 | ||||
|                 ) as (ctx, val): | ||||
| 
 | ||||
|                     log.debug('locked context') | ||||
|                     log.pdb('locked context') | ||||
|                     assert val == 'Locked' | ||||
| 
 | ||||
|                     async with ctx.open_stream() as stream: | ||||
|  | @ -413,21 +420,21 @@ async def wait_for_parent_stdin_hijack( | |||
|                         # sync with callee termination | ||||
|                         assert await ctx.result() == "pdb_unlock_complete" | ||||
| 
 | ||||
|                 log.debug('exitting child side locking task context') | ||||
|                 log.pdb('unlocked context') | ||||
| 
 | ||||
|         except ContextCancelled: | ||||
|         except tractor.ContextCancelled: | ||||
|             log.warning('Root actor cancelled debug lock') | ||||
|             raise | ||||
| 
 | ||||
|         finally: | ||||
|             log.pdb(f"Exiting debugger for actor {actor_uid}") | ||||
|             Lock.local_task_in_debug = None | ||||
|             log.debug('Exiting debugger from child') | ||||
|             log.pdb(f"Child {actor_uid} released parent stdio lock") | ||||
| 
 | ||||
| 
 | ||||
| def mk_mpdb() -> tuple[MultiActorPdb, Callable]: | ||||
| 
 | ||||
|     pdb = MultiActorPdb() | ||||
|     # signal.signal = pdbp.hideframe(signal.signal) | ||||
|     # signal.signal = pdbpp.hideframe(signal.signal) | ||||
| 
 | ||||
|     Lock.shield_sigint() | ||||
| 
 | ||||
|  | @ -449,13 +456,14 @@ async def _breakpoint( | |||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Breakpoint entry for engaging debugger instance sync-interaction, | ||||
|     from async code, executing in actor runtime (task). | ||||
|     breakpoint entry for engaging pdb machinery in the root or | ||||
|     a subactor. | ||||
| 
 | ||||
|     ''' | ||||
|     __tracebackhide__ = True | ||||
|     actor = tractor.current_actor() | ||||
| 
 | ||||
|     pdb, undo_sigint = mk_mpdb() | ||||
|     actor = tractor.current_actor() | ||||
|     task_name = trio.lowlevel.current_task().name | ||||
| 
 | ||||
|     # TODO: is it possible to debug a trio.Cancelled except block? | ||||
|  | @ -465,27 +473,16 @@ async def _breakpoint( | |||
|     # with trio.CancelScope(shield=shield): | ||||
|     #     await trio.lowlevel.checkpoint() | ||||
| 
 | ||||
|     if ( | ||||
|         not Lock.local_pdb_complete | ||||
|         or Lock.local_pdb_complete.is_set() | ||||
|     ): | ||||
|     if not Lock.local_pdb_complete or Lock.local_pdb_complete.is_set(): | ||||
|         Lock.local_pdb_complete = trio.Event() | ||||
| 
 | ||||
|     # TODO: need a more robust check for the "root" actor | ||||
|     if ( | ||||
|         not is_root_process() | ||||
|         and actor._parent_chan  # a connected child | ||||
|     ): | ||||
|     if actor._parent_chan and not is_root_process(): | ||||
| 
 | ||||
|         if Lock.local_task_in_debug: | ||||
| 
 | ||||
|             # Recurrence entry case: this task already has the lock and | ||||
|             # is likely recurrently entering a breakpoint | ||||
|             if Lock.local_task_in_debug == task_name: | ||||
|                 # noop on recurrent entry case but we want to trigger | ||||
|                 # a checkpoint to allow other actors error-propagate and | ||||
|                 # potetially avoid infinite re-entries in some subactor. | ||||
|                 await trio.lowlevel.checkpoint() | ||||
|                 # this task already has the lock and is | ||||
|                 # likely recurrently entering a breakpoint | ||||
|                 return | ||||
| 
 | ||||
|             # if **this** actor is already in debug mode block here | ||||
|  | @ -500,33 +497,36 @@ async def _breakpoint( | |||
|         # entries/requests to the root process | ||||
|         Lock.local_task_in_debug = task_name | ||||
| 
 | ||||
|         def child_release(): | ||||
|             try: | ||||
|                 # sometimes the ``trio`` might already be termianated in | ||||
|                 # which case this call will raise. | ||||
|                 Lock.local_pdb_complete.set() | ||||
|             finally: | ||||
|                 # restore original sigint handler | ||||
|                 undo_sigint() | ||||
|                 # should always be cleared in the hijack hook aboved right? | ||||
|                 # _local_task_in_debug = None | ||||
| 
 | ||||
|         # assign unlock callback for debugger teardown hooks | ||||
|         Lock.pdb_release_hook = child_release | ||||
| 
 | ||||
|         # this **must** be awaited by the caller and is done using the | ||||
|         # root nursery so that the debugger can continue to run without | ||||
|         # being restricted by the scope of a new task nursery. | ||||
| 
 | ||||
|         # TODO: if we want to debug a trio.Cancelled triggered exception | ||||
|         # NOTE: if we want to debug a trio.Cancelled triggered exception | ||||
|         # we have to figure out how to avoid having the service nursery | ||||
|         # cancel on this task start? I *think* this works below: | ||||
|         # ```python | ||||
|         #   actor._service_n.cancel_scope.shield = shield | ||||
|         # ``` | ||||
|         # but not entirely sure if that's a sane way to implement it? | ||||
|         # cancel on this task start? I *think* this works below? | ||||
|         # actor._service_n.cancel_scope.shield = shield | ||||
|         try: | ||||
|             with trio.CancelScope(shield=True): | ||||
|                 await actor._service_n.start( | ||||
|                     wait_for_parent_stdin_hijack, | ||||
|                     actor.uid, | ||||
|                 ) | ||||
|                 Lock.repl = pdb | ||||
|         except RuntimeError: | ||||
|             Lock.release() | ||||
| 
 | ||||
|             if actor._cancel_called: | ||||
|                 # service nursery won't be usable and we | ||||
|                 # don't want to lock up the root either way since | ||||
|                 # we're in (the midst of) cancellation. | ||||
|                 return | ||||
| 
 | ||||
|             Lock.pdb_release_hook() | ||||
|             raise | ||||
| 
 | ||||
|     elif is_root_process(): | ||||
|  | @ -534,8 +534,8 @@ async def _breakpoint( | |||
|         # we also wait in the root-parent for any child that | ||||
|         # may have the tty locked prior | ||||
|         # TODO: wait, what about multiple root tasks acquiring it though? | ||||
|         # root process (us) already has it; ignore | ||||
|         if Lock.global_actor_in_debug == actor.uid: | ||||
|             # re-entrant root process already has it: noop. | ||||
|             return | ||||
| 
 | ||||
|         # XXX: since we need to enter pdb synchronously below, | ||||
|  | @ -556,7 +556,9 @@ async def _breakpoint( | |||
| 
 | ||||
|         Lock.global_actor_in_debug = actor.uid | ||||
|         Lock.local_task_in_debug = task_name | ||||
|         Lock.repl = pdb | ||||
| 
 | ||||
|         # the lock must be released on pdb completion | ||||
|         Lock.pdb_release_hook = Lock.root_release | ||||
| 
 | ||||
|     try: | ||||
|         # block here one (at the appropriate frame *up*) where | ||||
|  | @ -565,7 +567,7 @@ async def _breakpoint( | |||
|         debug_func(actor, pdb) | ||||
| 
 | ||||
|     except bdb.BdbQuit: | ||||
|         Lock.release() | ||||
|         Lock.maybe_release() | ||||
|         raise | ||||
| 
 | ||||
|     # XXX: apparently we can't do this without showing this frame | ||||
|  | @ -577,18 +579,22 @@ async def _breakpoint( | |||
|     #     # frame = sys._getframe() | ||||
|     #     # last_f = frame.f_back | ||||
|     #     # last_f.f_globals['__tracebackhide__'] = True | ||||
|     #     # signal.signal = pdbp.hideframe(signal.signal) | ||||
|     #     # signal.signal = pdbpp.hideframe(signal.signal) | ||||
|     #     signal.signal( | ||||
|     #         signal.SIGINT, | ||||
|     #         orig_handler | ||||
|     #     ) | ||||
| 
 | ||||
| 
 | ||||
| def shield_sigint_handler( | ||||
| def shield_sigint( | ||||
|     signum: int, | ||||
|     frame: 'frame',  # type: ignore # noqa | ||||
|     # pdb_obj: Optional[MultiActorPdb] = None, | ||||
|     pdb_obj: Optional[MultiActorPdb] = None, | ||||
|     *args, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Specialized, debugger-aware SIGINT handler. | ||||
|     Specialized debugger compatible SIGINT handler. | ||||
| 
 | ||||
|     In childred we always ignore to avoid deadlocks since cancellation | ||||
|     should always be managed by the parent supervising actor. The root | ||||
|  | @ -600,7 +606,6 @@ def shield_sigint_handler( | |||
|     uid_in_debug = Lock.global_actor_in_debug | ||||
| 
 | ||||
|     actor = tractor.current_actor() | ||||
|     # print(f'{actor.uid} in HANDLER with ') | ||||
| 
 | ||||
|     def do_cancel(): | ||||
|         # If we haven't tried to cancel the runtime then do that instead | ||||
|  | @ -634,9 +639,6 @@ def shield_sigint_handler( | |||
|                 ) | ||||
|                 return do_cancel() | ||||
| 
 | ||||
|     # only set in the actor actually running the REPL | ||||
|     pdb_obj = Lock.repl | ||||
| 
 | ||||
|     # root actor branch that reports whether or not a child | ||||
|     # has locked debugger. | ||||
|     if ( | ||||
|  | @ -649,40 +651,21 @@ def shield_sigint_handler( | |||
|         # which has already terminated to unlock. | ||||
|         and any_connected | ||||
|     ): | ||||
|         # we are root and some actor is in debug mode | ||||
|         # if uid_in_debug is not None: | ||||
| 
 | ||||
|         if pdb_obj: | ||||
|             name = uid_in_debug[0] | ||||
|             if name != 'root': | ||||
|                 log.pdb( | ||||
|                     f"Ignoring SIGINT, child in debug mode: `{uid_in_debug}`" | ||||
|                 ) | ||||
| 
 | ||||
|             else: | ||||
|                 log.pdb( | ||||
|                     "Ignoring SIGINT while in debug mode" | ||||
|                 ) | ||||
|     elif ( | ||||
|         is_root_process() | ||||
|     ): | ||||
|         if pdb_obj: | ||||
|         name = uid_in_debug[0] | ||||
|         if name != 'root': | ||||
|             log.pdb( | ||||
|                 "Ignoring SIGINT since debug mode is enabled" | ||||
|                 f"Ignoring SIGINT while child in debug mode: `{uid_in_debug}`" | ||||
|             ) | ||||
| 
 | ||||
|         if ( | ||||
|             Lock._root_local_task_cs_in_debug | ||||
|             and not Lock._root_local_task_cs_in_debug.cancel_called | ||||
|         ): | ||||
|             Lock._root_local_task_cs_in_debug.cancel() | ||||
| 
 | ||||
|             # revert back to ``trio`` handler asap! | ||||
|             Lock.unshield_sigint() | ||||
|         else: | ||||
|             log.pdb( | ||||
|                 "Ignoring SIGINT while in debug mode" | ||||
|             ) | ||||
| 
 | ||||
|     # child actor that has locked the debugger | ||||
|     elif not is_root_process(): | ||||
| 
 | ||||
|     elif ( | ||||
|         not is_root_process() | ||||
|     ): | ||||
|         chan: Channel = actor._parent_chan | ||||
|         if not chan or not chan.connected(): | ||||
|             log.warning( | ||||
|  | @ -694,39 +677,29 @@ def shield_sigint_handler( | |||
|             return do_cancel() | ||||
| 
 | ||||
|         task = Lock.local_task_in_debug | ||||
|         if ( | ||||
|             task | ||||
|             and pdb_obj | ||||
|         ): | ||||
|         if task: | ||||
|             log.pdb( | ||||
|                 f"Ignoring SIGINT while task in debug mode: `{task}`" | ||||
|             ) | ||||
| 
 | ||||
|         # TODO: how to handle the case of an intermediary-child actor | ||||
|         # that **is not** marked in debug mode? See oustanding issue: | ||||
|         # https://github.com/goodboy/tractor/issues/320 | ||||
|         # that **is not** marked in debug mode? | ||||
|         # elif debug_mode(): | ||||
| 
 | ||||
|     else:  # XXX: shouldn't ever get here? | ||||
|         print("WTFWTFWTF") | ||||
|         raise KeyboardInterrupt | ||||
|     else: | ||||
|         log.pdb( | ||||
|             "Ignoring SIGINT since debug mode is enabled" | ||||
|         ) | ||||
| 
 | ||||
|     # NOTE: currently (at least on ``fancycompleter`` 0.9.2) | ||||
|     # it looks to be that the last command that was run (eg. ll) | ||||
|     # it lookks to be that the last command that was run (eg. ll) | ||||
|     # will be repeated by default. | ||||
| 
 | ||||
|     # maybe redraw/print last REPL output to console since | ||||
|     # we want to alert the user that more input is expect since | ||||
|     # nothing has been done dur to ignoring sigint. | ||||
|     # TODO: maybe redraw/print last REPL output to console | ||||
|     if ( | ||||
|         pdb_obj  # only when this actor has a REPL engaged | ||||
|         pdb_obj | ||||
|         and sys.version_info <= (3, 10) | ||||
|     ): | ||||
|         # XXX: yah, mega hack, but how else do we catch this madness XD | ||||
|         if pdb_obj.shname == 'xonsh': | ||||
|             pdb_obj.stdout.write(pdb_obj.prompt) | ||||
| 
 | ||||
|         pdb_obj.stdout.flush() | ||||
| 
 | ||||
|         # TODO: make this work like sticky mode where if there is output | ||||
|         # detected as written to the tty we redraw this part underneath | ||||
|         # and erase the past draw of this same bit above? | ||||
|  | @ -737,27 +710,36 @@ def shield_sigint_handler( | |||
|         # https://github.com/goodboy/tractor/issues/130#issuecomment-663752040 | ||||
|         # https://github.com/prompt-toolkit/python-prompt-toolkit/blob/c2c6af8a0308f9e5d7c0e28cb8a02963fe0ce07a/prompt_toolkit/patch_stdout.py | ||||
| 
 | ||||
|         # XXX LEGACY: lol, see ``pdbpp`` issue: | ||||
|         # XXX: lol, see ``pdbpp`` issue: | ||||
|         # https://github.com/pdbpp/pdbpp/issues/496 | ||||
| 
 | ||||
|         # TODO: pretty sure this is what we should expect to have to run | ||||
|         # in total but for now we're just going to wait until `pdbpp` | ||||
|         # figures out it's own stuff on 3.10 (and maybe we'll help). | ||||
|         # pdb_obj.do_longlist(None) | ||||
| 
 | ||||
|         # XXX: we were doing this but it shouldn't be required.. | ||||
|         print(pdb_obj.prompt, end='', flush=True) | ||||
| 
 | ||||
| 
 | ||||
| def _set_trace( | ||||
|     actor: tractor.Actor | None = None, | ||||
|     pdb: MultiActorPdb | None = None, | ||||
|     actor: Optional[tractor._actor.Actor] = None, | ||||
|     pdb: Optional[MultiActorPdb] = None, | ||||
| ): | ||||
|     __tracebackhide__ = True | ||||
|     actor = actor or tractor.current_actor() | ||||
| 
 | ||||
|     # XXX: on latest ``pdbpp`` i guess we don't need this? | ||||
|     # frame = sys._getframe() | ||||
|     # last_f = frame.f_back | ||||
|     # last_f.f_globals['__tracebackhide__'] = True | ||||
| 
 | ||||
|     # start 2 levels up in user code | ||||
|     frame: Optional[FrameType] = sys._getframe() | ||||
|     if frame: | ||||
|         frame = frame.f_back  # type: ignore | ||||
| 
 | ||||
|     if ( | ||||
|         frame | ||||
|         and pdb | ||||
|         and actor is not None | ||||
|     ): | ||||
|     if frame and pdb and actor is not None: | ||||
|         log.pdb(f"\nAttaching pdb to actor: {actor.uid}\n") | ||||
|         # no f!#$&* idea, but when we're in async land | ||||
|         # we need 2x frames up? | ||||
|  | @ -766,8 +748,7 @@ def _set_trace( | |||
|     else: | ||||
|         pdb, undo_sigint = mk_mpdb() | ||||
| 
 | ||||
|         # we entered the global ``breakpoint()`` built-in from sync | ||||
|         # code? | ||||
|         # we entered the global ``breakpoint()`` built-in from sync code? | ||||
|         Lock.local_task_in_debug = 'sync' | ||||
| 
 | ||||
|     pdb.set_trace(frame=frame) | ||||
|  | @ -780,7 +761,7 @@ breakpoint = partial( | |||
| 
 | ||||
| 
 | ||||
| def _post_mortem( | ||||
|     actor: tractor.Actor, | ||||
|     actor: tractor._actor.Actor, | ||||
|     pdb: MultiActorPdb, | ||||
| 
 | ||||
| ) -> None: | ||||
|  | @ -791,13 +772,29 @@ def _post_mortem( | |||
|     ''' | ||||
|     log.pdb(f"\nAttaching to pdb in crashed actor: {actor.uid}\n") | ||||
| 
 | ||||
|     # TODO: you need ``pdbpp`` master (at least this commit | ||||
|     # https://github.com/pdbpp/pdbpp/commit/b757794857f98d53e3ebbe70879663d7d843a6c2) | ||||
|     # to fix this and avoid the hang it causes. See issue: | ||||
|     # https://github.com/pdbpp/pdbpp/issues/480 | ||||
|     # TODO: help with a 3.10+ major release if/when it arrives. | ||||
|     # XXX: on py3.10 if you don't have latest ``pdbpp`` installed. | ||||
|     # The exception looks something like: | ||||
|     # Traceback (most recent call last): | ||||
|     # File ".../tractor/_debug.py", line 729, in _post_mortem | ||||
|     #   for _ in range(100): | ||||
|     # File "../site-packages/pdb.py", line 1227, in xpm | ||||
|     #   post_mortem(info[2], Pdb) | ||||
|     # File "../site-packages/pdb.py", line 1175, in post_mortem | ||||
|     #   p.interaction(None, t) | ||||
|     # File "../site-packages/pdb.py", line 216, in interaction | ||||
|     #   ret = self.setup(frame, traceback) | ||||
|     # File "../site-packages/pdb.py", line 259, in setup | ||||
|     #   ret = super(Pdb, self).setup(frame, tb) | ||||
|     # File "/usr/lib/python3.10/pdb.py", line 217, in setup | ||||
|     #   self.curframe = self.stack[self.curindex][0] | ||||
|     # IndexError: list index out of range | ||||
| 
 | ||||
|     pdbp.xpm(Pdb=lambda: pdb) | ||||
|     # NOTE: you need ``pdbpp`` master (at least this commit | ||||
|     # https://github.com/pdbpp/pdbpp/commit/b757794857f98d53e3ebbe70879663d7d843a6c2) | ||||
|     # to fix this and avoid the hang it causes XD. | ||||
|     # see also: https://github.com/pdbpp/pdbpp/issues/480 | ||||
| 
 | ||||
|     pdbpp.xpm(Pdb=lambda: pdb) | ||||
| 
 | ||||
| 
 | ||||
| post_mortem = partial( | ||||
|  | @ -826,11 +823,9 @@ async def _maybe_enter_pm(err): | |||
|         and not is_multi_cancelled(err) | ||||
|     ): | ||||
|         log.debug("Actor crashed, entering debug mode") | ||||
|         try: | ||||
|             await post_mortem() | ||||
|         finally: | ||||
|             Lock.release() | ||||
|             return True | ||||
|         await post_mortem() | ||||
|         Lock.maybe_release() | ||||
|         return True | ||||
| 
 | ||||
|     else: | ||||
|         return False | ||||
|  | @ -838,14 +833,14 @@ async def _maybe_enter_pm(err): | |||
| 
 | ||||
| @acm | ||||
| async def acquire_debug_lock( | ||||
|     subactor_uid: tuple[str, str], | ||||
|     subactor_uid: Tuple[str, str], | ||||
| ) -> AsyncGenerator[None, tuple]: | ||||
|     ''' | ||||
|     Grab root's debug lock on entry, release on exit. | ||||
| 
 | ||||
|     This helper is for actor's who don't actually need | ||||
|     to acquired the debugger but want to wait until the | ||||
|     lock is free in the process-tree root. | ||||
|     lock is free in the tree root. | ||||
| 
 | ||||
|     ''' | ||||
|     if not debug_mode(): | ||||
|  | @ -868,10 +863,7 @@ async def maybe_wait_for_debugger( | |||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     if ( | ||||
|         not debug_mode() | ||||
|         and not child_in_debug | ||||
|     ): | ||||
|     if not debug_mode() and not child_in_debug: | ||||
|         return | ||||
| 
 | ||||
|     if ( | ||||
|  | @ -890,6 +882,9 @@ async def maybe_wait_for_debugger( | |||
| 
 | ||||
|             if Lock.global_actor_in_debug: | ||||
|                 sub_in_debug = tuple(Lock.global_actor_in_debug) | ||||
|                 # alive = tractor.current_actor().child_alive(sub_in_debug) | ||||
|                 # if not alive: | ||||
|                 #     break | ||||
| 
 | ||||
|             log.debug('Root polling for debug') | ||||
| 
 | ||||
|  |  | |||
|  | @ -18,11 +18,7 @@ | |||
| Actor discovery API. | ||||
| 
 | ||||
| """ | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     Union, | ||||
|     AsyncGenerator, | ||||
| ) | ||||
| from typing import Tuple, Optional, Union, AsyncGenerator | ||||
| from contextlib import asynccontextmanager as acm | ||||
| 
 | ||||
| from ._ipc import _connect_chan, Channel | ||||
|  | @ -108,7 +104,7 @@ async def query_actor( | |||
| @acm | ||||
| async def find_actor( | ||||
|     name: str, | ||||
|     arbiter_sockaddr: tuple[str, int] | None = None | ||||
|     arbiter_sockaddr: Tuple[str, int] = None | ||||
| 
 | ||||
| ) -> AsyncGenerator[Optional[Portal], None]: | ||||
|     ''' | ||||
|  | @ -134,7 +130,7 @@ async def find_actor( | |||
| @acm | ||||
| async def wait_for_actor( | ||||
|     name: str, | ||||
|     arbiter_sockaddr: tuple[str, int] | None = None | ||||
|     arbiter_sockaddr: Tuple[str, int] = None | ||||
| ) -> AsyncGenerator[Portal, None]: | ||||
|     """Wait on an actor to register with the arbiter. | ||||
| 
 | ||||
|  |  | |||
|  | @ -18,28 +18,15 @@ | |||
| Sub-process entry points. | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| from functools import partial | ||||
| from typing import ( | ||||
|     Any, | ||||
|     TYPE_CHECKING, | ||||
| ) | ||||
| from typing import Tuple, Any | ||||
| import signal | ||||
| 
 | ||||
| import trio  # type: ignore | ||||
| 
 | ||||
| from .log import ( | ||||
|     get_console_log, | ||||
|     get_logger, | ||||
| ) | ||||
| from .log import get_console_log, get_logger | ||||
| from . import _state | ||||
| from .to_asyncio import run_as_asyncio_guest | ||||
| from ._runtime import ( | ||||
|     async_main, | ||||
|     Actor, | ||||
| ) | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     from ._spawn import SpawnMethodKey | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
|  | @ -47,11 +34,11 @@ log = get_logger(__name__) | |||
| 
 | ||||
| def _mp_main( | ||||
| 
 | ||||
|     actor: Actor,  # type: ignore | ||||
|     accept_addr: tuple[str, int], | ||||
|     forkserver_info: tuple[Any, Any, Any, Any, Any], | ||||
|     start_method: SpawnMethodKey, | ||||
|     parent_addr: tuple[str, int] | None = None, | ||||
|     actor: 'Actor',  # type: ignore | ||||
|     accept_addr: Tuple[str, int], | ||||
|     forkserver_info: Tuple[Any, Any, Any, Any, Any], | ||||
|     start_method: str, | ||||
|     parent_addr: Tuple[str, int] = None, | ||||
|     infect_asyncio: bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|  | @ -76,8 +63,7 @@ def _mp_main( | |||
| 
 | ||||
|     log.debug(f"parent_addr is {parent_addr}") | ||||
|     trio_main = partial( | ||||
|         async_main, | ||||
|         actor, | ||||
|         actor._async_main, | ||||
|         accept_addr, | ||||
|         parent_addr=parent_addr | ||||
|     ) | ||||
|  | @ -96,9 +82,9 @@ def _mp_main( | |||
| 
 | ||||
| def _trio_main( | ||||
| 
 | ||||
|     actor: Actor,  # type: ignore | ||||
|     actor: 'Actor',  # type: ignore | ||||
|     *, | ||||
|     parent_addr: tuple[str, int] | None = None, | ||||
|     parent_addr: Tuple[str, int] = None, | ||||
|     infect_asyncio: bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|  | @ -120,8 +106,7 @@ def _trio_main( | |||
| 
 | ||||
|     log.debug(f"parent_addr is {parent_addr}") | ||||
|     trio_main = partial( | ||||
|         async_main, | ||||
|         actor, | ||||
|         actor._async_main, | ||||
|         parent_addr=parent_addr | ||||
|     ) | ||||
| 
 | ||||
|  |  | |||
|  | @ -18,16 +18,11 @@ | |||
| Our classy exception set. | ||||
| 
 | ||||
| """ | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Optional, | ||||
|     Type, | ||||
| ) | ||||
| from typing import Dict, Any, Optional, Type | ||||
| import importlib | ||||
| import builtins | ||||
| import traceback | ||||
| 
 | ||||
| import exceptiongroup as eg | ||||
| import trio | ||||
| 
 | ||||
| 
 | ||||
|  | @ -53,6 +48,9 @@ class RemoteActorError(Exception): | |||
|         self.type = suberror_type | ||||
|         self.msgdata = msgdata | ||||
| 
 | ||||
|     # TODO: a trio.MultiError.catch like context manager | ||||
|     # for catching underlying remote errors of a particular type | ||||
| 
 | ||||
| 
 | ||||
| class InternalActorError(RemoteActorError): | ||||
|     """Remote internal ``tractor`` error indicating | ||||
|  | @ -97,7 +95,7 @@ def pack_error( | |||
|     exc: BaseException, | ||||
|     tb=None, | ||||
| 
 | ||||
| ) -> dict[str, Any]: | ||||
| ) -> Dict[str, Any]: | ||||
|     """Create an "error message" for tranmission over | ||||
|     a channel (aka the wire). | ||||
|     """ | ||||
|  | @ -116,17 +114,15 @@ def pack_error( | |||
| 
 | ||||
| def unpack_error( | ||||
| 
 | ||||
|     msg: dict[str, Any], | ||||
|     msg: Dict[str, Any], | ||||
|     chan=None, | ||||
|     err_type=RemoteActorError | ||||
| 
 | ||||
| ) -> Exception: | ||||
|     ''' | ||||
|     Unpack an 'error' message from the wire | ||||
|     """Unpack an 'error' message from the wire | ||||
|     into a local ``RemoteActorError``. | ||||
| 
 | ||||
|     ''' | ||||
|     __tracebackhide__ = True | ||||
|     """ | ||||
|     error = msg['error'] | ||||
| 
 | ||||
|     tb_str = error.get('tb_str', '') | ||||
|  | @ -139,12 +135,7 @@ def unpack_error( | |||
|         suberror_type = trio.Cancelled | ||||
| 
 | ||||
|     else:  # try to lookup a suitable local error type | ||||
|         for ns in [ | ||||
|             builtins, | ||||
|             _this_mod, | ||||
|             eg, | ||||
|             trio, | ||||
|         ]: | ||||
|         for ns in [builtins, _this_mod, trio]: | ||||
|             try: | ||||
|                 suberror_type = getattr(ns, type_name) | ||||
|                 break | ||||
|  | @ -163,15 +154,12 @@ def unpack_error( | |||
| 
 | ||||
| 
 | ||||
| def is_multi_cancelled(exc: BaseException) -> bool: | ||||
|     ''' | ||||
|     Predicate to determine if a possible ``eg.BaseExceptionGroup`` contains | ||||
|     only ``trio.Cancelled`` sub-exceptions (and is likely the result of | ||||
|     """Predicate to determine if a ``trio.MultiError`` contains only | ||||
|     ``trio.Cancelled`` sub-exceptions (and is likely the result of | ||||
|     cancelling a collection of subtasks. | ||||
| 
 | ||||
|     ''' | ||||
|     if isinstance(exc, eg.BaseExceptionGroup): | ||||
|         return exc.subgroup( | ||||
|             lambda exc: isinstance(exc, trio.Cancelled) | ||||
|         ) is not None | ||||
| 
 | ||||
|     return False | ||||
|     """ | ||||
|     return not trio.MultiError.filter( | ||||
|         lambda exc: exc if not isinstance(exc, trio.Cancelled) else None, | ||||
|         exc, | ||||
|     ) | ||||
|  |  | |||
|  | @ -341,7 +341,7 @@ class Channel: | |||
| 
 | ||||
|     async def connect( | ||||
|         self, | ||||
|         destaddr: tuple[Any, ...] | None = None, | ||||
|         destaddr: tuple[Any, ...] = None, | ||||
|         **kwargs | ||||
| 
 | ||||
|     ) -> MsgTransport: | ||||
|  |  | |||
|  | @ -35,7 +35,6 @@ import warnings | |||
| import trio | ||||
| from async_generator import asynccontextmanager | ||||
| 
 | ||||
| from .trionics import maybe_open_nursery | ||||
| from ._state import current_actor | ||||
| from ._ipc import Channel | ||||
| from .log import get_logger | ||||
|  | @ -45,27 +44,43 @@ from ._exceptions import ( | |||
|     NoResult, | ||||
|     ContextCancelled, | ||||
| ) | ||||
| from ._streaming import ( | ||||
|     Context, | ||||
|     MsgStream, | ||||
| ) | ||||
| from ._streaming import Context, ReceiveMsgStream | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| 
 | ||||
| @asynccontextmanager | ||||
| async def maybe_open_nursery( | ||||
|     nursery: trio.Nursery = None, | ||||
|     shield: bool = False, | ||||
| ) -> AsyncGenerator[trio.Nursery, Any]: | ||||
|     ''' | ||||
|     Create a new nursery if None provided. | ||||
| 
 | ||||
|     Blocks on exit as expected if no input nursery is provided. | ||||
| 
 | ||||
|     ''' | ||||
|     if nursery is not None: | ||||
|         yield nursery | ||||
|     else: | ||||
|         async with trio.open_nursery() as nursery: | ||||
|             nursery.cancel_scope.shield = shield | ||||
|             yield nursery | ||||
| 
 | ||||
| 
 | ||||
| def _unwrap_msg( | ||||
| 
 | ||||
|     msg: dict[str, Any], | ||||
|     channel: Channel | ||||
| 
 | ||||
| ) -> Any: | ||||
|     __tracebackhide__ = True | ||||
|     try: | ||||
|         return msg['return'] | ||||
|     except KeyError: | ||||
|         # internal error should never get here | ||||
|         assert msg.get('cid'), "Received internal error at portal?" | ||||
|         raise unpack_error(msg, channel) from None | ||||
|         raise unpack_error(msg, channel) | ||||
| 
 | ||||
| 
 | ||||
| class MessagingError(Exception): | ||||
|  | @ -104,7 +119,7 @@ class Portal: | |||
|         # it is expected that ``result()`` will be awaited at some | ||||
|         # point. | ||||
|         self._expect_result: Optional[Context] = None | ||||
|         self._streams: set[MsgStream] = set() | ||||
|         self._streams: set[ReceiveMsgStream] = set() | ||||
|         self.actor = current_actor() | ||||
| 
 | ||||
|     async def _submit_for_result( | ||||
|  | @ -139,7 +154,6 @@ class Portal: | |||
|         Return the result(s) from the remote actor's "main" task. | ||||
| 
 | ||||
|         ''' | ||||
|         # __tracebackhide__ = True | ||||
|         # Check for non-rpc errors slapped on the | ||||
|         # channel for which we always raise | ||||
|         exc = self.channel._exc | ||||
|  | @ -189,7 +203,7 @@ class Portal: | |||
| 
 | ||||
|     async def cancel_actor( | ||||
|         self, | ||||
|         timeout: float | None = None, | ||||
|         timeout: float = None, | ||||
| 
 | ||||
|     ) -> bool: | ||||
|         ''' | ||||
|  | @ -319,7 +333,7 @@ class Portal: | |||
|         async_gen_func: Callable,  # typing: ignore | ||||
|         **kwargs, | ||||
| 
 | ||||
|     ) -> AsyncGenerator[MsgStream, None]: | ||||
|     ) -> AsyncGenerator[ReceiveMsgStream, None]: | ||||
| 
 | ||||
|         if not inspect.isasyncgenfunction(async_gen_func): | ||||
|             if not ( | ||||
|  | @ -344,7 +358,7 @@ class Portal: | |||
| 
 | ||||
|         try: | ||||
|             # deliver receive only stream | ||||
|             async with MsgStream( | ||||
|             async with ReceiveMsgStream( | ||||
|                 ctx, ctx._recv_chan, | ||||
|             ) as rchan: | ||||
|                 self._streams.add(rchan) | ||||
|  | @ -464,6 +478,7 @@ class Portal: | |||
|             # sure it's worth being pedantic: | ||||
|             # Exception, | ||||
|             # trio.Cancelled, | ||||
|             # trio.MultiError, | ||||
|             # KeyboardInterrupt, | ||||
| 
 | ||||
|         ) as err: | ||||
|  | @ -500,10 +515,6 @@ class Portal: | |||
|                     f'actor: {uid}' | ||||
|                 ) | ||||
|                 result = await ctx.result() | ||||
|                 log.runtime( | ||||
|                     f'Context {fn_name} returned ' | ||||
|                     f'value from callee `{result}`' | ||||
|                 ) | ||||
| 
 | ||||
|             # though it should be impossible for any tasks | ||||
|             # operating *in* this scope to have survived | ||||
|  | @ -525,6 +536,12 @@ class Portal: | |||
|                         f'task:{cid}\n' | ||||
|                         f'actor:{uid}' | ||||
|                     ) | ||||
|             else: | ||||
|                 log.runtime( | ||||
|                     f'Context {fn_name} returned ' | ||||
|                     f'value from callee `{result}`' | ||||
|                 ) | ||||
| 
 | ||||
|             # XXX: (MEGA IMPORTANT) if this is a root opened process we | ||||
|             # wait for any immediate child in debug before popping the | ||||
|             # context from the runtime msg loop otherwise inside | ||||
|  | @ -537,10 +554,7 @@ class Portal: | |||
|             await maybe_wait_for_debugger() | ||||
| 
 | ||||
|             # remove the context from runtime tracking | ||||
|             self.actor._contexts.pop( | ||||
|                 (self.channel.uid, ctx.cid), | ||||
|                 None, | ||||
|             ) | ||||
|             self.actor._contexts.pop((self.channel.uid, ctx.cid)) | ||||
| 
 | ||||
| 
 | ||||
| @dataclass | ||||
|  | @ -597,11 +611,9 @@ async def open_portal( | |||
| 
 | ||||
|         msg_loop_cs: Optional[trio.CancelScope] = None | ||||
|         if start_msg_loop: | ||||
|             from ._runtime import process_messages | ||||
|             msg_loop_cs = await nursery.start( | ||||
|                 partial( | ||||
|                     process_messages, | ||||
|                     actor, | ||||
|                     actor._process_messages, | ||||
|                     channel, | ||||
|                     # if the local task is cancelled we want to keep | ||||
|                     # the msg loop running until our block ends | ||||
|  |  | |||
							
								
								
									
										154
									
								
								tractor/_root.py
								
								
								
								
							
							
						
						
									
										154
									
								
								tractor/_root.py
								
								
								
								
							|  | @ -22,21 +22,14 @@ from contextlib import asynccontextmanager | |||
| from functools import partial | ||||
| import importlib | ||||
| import logging | ||||
| import signal | ||||
| import sys | ||||
| import os | ||||
| from typing import Tuple, Optional, List, Any | ||||
| import typing | ||||
| import warnings | ||||
| 
 | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| 
 | ||||
| from ._runtime import ( | ||||
|     Actor, | ||||
|     Arbiter, | ||||
|     async_main, | ||||
| ) | ||||
| from ._actor import Actor, Arbiter | ||||
| from . import _debug | ||||
| from . import _spawn | ||||
| from . import _state | ||||
|  | @ -56,45 +49,37 @@ logger = log.get_logger('tractor') | |||
| @asynccontextmanager | ||||
| async def open_root_actor( | ||||
| 
 | ||||
|     *, | ||||
|     # defaults are above | ||||
|     arbiter_addr: tuple[str, int] | None = None, | ||||
|     arbiter_addr: Optional[Tuple[str, int]] = ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     ), | ||||
| 
 | ||||
|     # defaults are above | ||||
|     registry_addr: tuple[str, int] | None = None, | ||||
| 
 | ||||
|     name: str | None = 'root', | ||||
|     name: Optional[str] = 'root', | ||||
| 
 | ||||
|     # either the `multiprocessing` start method: | ||||
|     # https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods | ||||
|     # OR `trio` (the new default). | ||||
|     start_method: _spawn.SpawnMethodKey | None = None, | ||||
|     start_method: Optional[str] = None, | ||||
| 
 | ||||
|     # enables the multi-process debugger support | ||||
|     debug_mode: bool = False, | ||||
| 
 | ||||
|     # internal logging | ||||
|     loglevel: str | None = None, | ||||
|     loglevel: Optional[str] = None, | ||||
| 
 | ||||
|     enable_modules: list | None = None, | ||||
|     rpc_module_paths: list | None = None, | ||||
|     enable_modules: Optional[List] = None, | ||||
|     rpc_module_paths: Optional[List] = None, | ||||
| 
 | ||||
| ) -> typing.Any: | ||||
|     ''' | ||||
|     Runtime init entry point for ``tractor``. | ||||
|     """Async entry point for ``tractor``. | ||||
| 
 | ||||
|     ''' | ||||
|     """ | ||||
|     # Override the global debugger hook to make it play nice with | ||||
|     # ``trio``, see much discussion in: | ||||
|     # ``trio``, see: | ||||
|     # https://github.com/python-trio/trio/issues/1155#issuecomment-742964018 | ||||
|     builtin_bp_handler = sys.breakpointhook | ||||
|     orig_bp_path: str | None = os.environ.get('PYTHONBREAKPOINT', None) | ||||
|     os.environ['PYTHONBREAKPOINT'] = 'tractor._debug._set_trace' | ||||
| 
 | ||||
|     # attempt to retreive ``trio``'s sigint handler and stash it | ||||
|     # on our debugger lock state. | ||||
|     _debug.Lock._trio_handler = signal.getsignal(signal.SIGINT) | ||||
| 
 | ||||
|     # mark top most level process as root actor | ||||
|     _state._runtime_vars['_is_root'] = True | ||||
| 
 | ||||
|  | @ -113,21 +98,9 @@ async def open_root_actor( | |||
|     if start_method is not None: | ||||
|         _spawn.try_set_start_method(start_method) | ||||
| 
 | ||||
|     if arbiter_addr is not None: | ||||
|         warnings.warn( | ||||
|             '`arbiter_addr` is now deprecated and has been renamed to' | ||||
|             '`registry_addr`.\nUse that instead..', | ||||
|             DeprecationWarning, | ||||
|             stacklevel=2, | ||||
|         ) | ||||
| 
 | ||||
|     registry_addr = (host, port) = ( | ||||
|         registry_addr | ||||
|         or arbiter_addr | ||||
|         or ( | ||||
|             _default_arbiter_host, | ||||
|             _default_arbiter_port, | ||||
|         ) | ||||
|     arbiter_addr = (host, port) = arbiter_addr or ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     ) | ||||
| 
 | ||||
|     loglevel = (loglevel or log._default_loglevel).upper() | ||||
|  | @ -173,7 +146,7 @@ async def open_root_actor( | |||
| 
 | ||||
|     except OSError: | ||||
|         # TODO: make this a "discovery" log level? | ||||
|         logger.warning(f"No actor registry found @ {host}:{port}") | ||||
|         logger.warning(f"No actor could be found @ {host}:{port}") | ||||
| 
 | ||||
|     # create a local actor and start up its main routine/task | ||||
|     if arbiter_found: | ||||
|  | @ -183,7 +156,7 @@ async def open_root_actor( | |||
| 
 | ||||
|         actor = Actor( | ||||
|             name or 'anonymous', | ||||
|             arbiter_addr=registry_addr, | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             loglevel=loglevel, | ||||
|             enable_modules=enable_modules, | ||||
|         ) | ||||
|  | @ -199,7 +172,7 @@ async def open_root_actor( | |||
| 
 | ||||
|         actor = Arbiter( | ||||
|             name or 'arbiter', | ||||
|             arbiter_addr=registry_addr, | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             loglevel=loglevel, | ||||
|             enable_modules=enable_modules, | ||||
|         ) | ||||
|  | @ -215,14 +188,13 @@ async def open_root_actor( | |||
|         # start the actor runtime in a new task | ||||
|         async with trio.open_nursery() as nursery: | ||||
| 
 | ||||
|             # ``_runtime.async_main()`` creates an internal nursery and | ||||
|             # ``Actor._async_main()`` creates an internal nursery and | ||||
|             # thus blocks here until the entire underlying actor tree has | ||||
|             # terminated thereby conducting structured concurrency. | ||||
| 
 | ||||
|             await nursery.start( | ||||
|                 partial( | ||||
|                     async_main, | ||||
|                     actor, | ||||
|                     actor._async_main, | ||||
|                     accept_addr=(host, port), | ||||
|                     parent_addr=None | ||||
|                 ) | ||||
|  | @ -230,10 +202,7 @@ async def open_root_actor( | |||
|             try: | ||||
|                 yield actor | ||||
| 
 | ||||
|             except ( | ||||
|                 Exception, | ||||
|                 BaseExceptionGroup, | ||||
|             ) as err: | ||||
|             except (Exception, trio.MultiError) as err: | ||||
| 
 | ||||
|                 entered = await _debug._maybe_enter_pm(err) | ||||
| 
 | ||||
|  | @ -257,40 +226,64 @@ async def open_root_actor( | |||
|                 await actor.cancel() | ||||
|     finally: | ||||
|         _state._current_actor = None | ||||
| 
 | ||||
|         # restore breakpoint hook state | ||||
|         sys.breakpointhook = builtin_bp_handler | ||||
|         if orig_bp_path is not None: | ||||
|             os.environ['PYTHONBREAKPOINT'] = orig_bp_path | ||||
|         else: | ||||
|             # clear env back to having no entry | ||||
|             os.environ.pop('PYTHONBREAKPOINT') | ||||
| 
 | ||||
|         logger.runtime("Root actor terminated") | ||||
| 
 | ||||
| 
 | ||||
| def run_daemon( | ||||
|     enable_modules: list[str], | ||||
| def run( | ||||
| 
 | ||||
|     # target | ||||
|     async_fn: typing.Callable[..., typing.Awaitable], | ||||
|     *args, | ||||
| 
 | ||||
|     # runtime kwargs | ||||
|     name: str | None = 'root', | ||||
|     registry_addr: tuple[str, int] = ( | ||||
|     name: Optional[str] = 'root', | ||||
|     arbiter_addr: Tuple[str, int] = ( | ||||
|         _default_arbiter_host, | ||||
|         _default_arbiter_port, | ||||
|     ), | ||||
| 
 | ||||
|     start_method: str | None = None, | ||||
|     start_method: Optional[str] = None, | ||||
|     debug_mode: bool = False, | ||||
|     **kwargs | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> Any: | ||||
|     """Run a trio-actor async function in process. | ||||
| 
 | ||||
|     This is tractor's main entry and the start point for any async actor. | ||||
|     """ | ||||
|     async def _main(): | ||||
| 
 | ||||
|         async with open_root_actor( | ||||
|             arbiter_addr=arbiter_addr, | ||||
|             name=name, | ||||
|             start_method=start_method, | ||||
|             debug_mode=debug_mode, | ||||
|             **kwargs, | ||||
|         ): | ||||
| 
 | ||||
|             return await async_fn(*args) | ||||
| 
 | ||||
|     warnings.warn( | ||||
|         "`tractor.run()` is now deprecated. `tractor` now" | ||||
|         " implicitly starts the root actor on first actor nursery" | ||||
|         " use. If you want to start the root actor manually, use" | ||||
|         " `tractor.open_root_actor()`.", | ||||
|         DeprecationWarning, | ||||
|         stacklevel=2, | ||||
|     ) | ||||
|     return trio.run(_main) | ||||
| 
 | ||||
| 
 | ||||
| def run_daemon( | ||||
|     enable_modules: list[str], | ||||
|     **kwargs | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Spawn daemon actor which will respond to RPC; the main task simply | ||||
|     starts the runtime and then sleeps forever. | ||||
|     Spawn daemon actor which will respond to RPC. | ||||
| 
 | ||||
|     This is a very minimal convenience wrapper around starting | ||||
|     a "run-until-cancelled" root actor which can be started with a set | ||||
|     of enabled modules for RPC request handling. | ||||
|     This is a convenience wrapper around | ||||
|     ``tractor.run(trio.sleep(float('inf')))`` such that the first actor spawned | ||||
|     is meant to run forever responding to RPC requests. | ||||
| 
 | ||||
|     ''' | ||||
|     kwargs['enable_modules'] = list(enable_modules) | ||||
|  | @ -298,15 +291,4 @@ def run_daemon( | |||
|     for path in enable_modules: | ||||
|         importlib.import_module(path) | ||||
| 
 | ||||
|     async def _main(): | ||||
| 
 | ||||
|         async with open_root_actor( | ||||
|             registry_addr=registry_addr, | ||||
|             name=name, | ||||
|             start_method=start_method, | ||||
|             debug_mode=debug_mode, | ||||
|             **kwargs, | ||||
|         ): | ||||
|             return await trio.sleep_forever() | ||||
| 
 | ||||
|     return trio.run(_main) | ||||
|     return run(partial(trio.sleep, float('inf')), **kwargs) | ||||
|  |  | |||
|  | @ -22,15 +22,10 @@ from __future__ import annotations | |||
| import sys | ||||
| import platform | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Awaitable, | ||||
|     Literal, | ||||
|     Callable, | ||||
|     TypeVar, | ||||
|     TYPE_CHECKING, | ||||
|     Any, Optional, Callable, TypeVar, TYPE_CHECKING | ||||
| ) | ||||
| from collections.abc import Awaitable | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| 
 | ||||
|  | @ -44,28 +39,23 @@ from ._state import ( | |||
|     is_root_process, | ||||
|     debug_mode, | ||||
| ) | ||||
| 
 | ||||
| from .log import get_logger | ||||
| from ._portal import Portal | ||||
| from ._runtime import Actor | ||||
| from ._actor import Actor | ||||
| from ._entry import _mp_main | ||||
| from ._exceptions import ActorFailure | ||||
| 
 | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     from ._supervise import ActorNursery | ||||
|     import multiprocessing as mp | ||||
|     ProcessType = TypeVar('ProcessType', mp.Process, trio.Process) | ||||
| 
 | ||||
| log = get_logger('tractor') | ||||
| 
 | ||||
| # placeholder for an mp start context if so using that backend | ||||
| _ctx: mp.context.BaseContext | None = None | ||||
| SpawnMethodKey = Literal[ | ||||
|     'trio',  # supported on all platforms | ||||
|     'mp_spawn', | ||||
|     'mp_forkserver',  # posix only | ||||
| ] | ||||
| _spawn_method: SpawnMethodKey = 'trio' | ||||
| _ctx: Optional[mp.context.BaseContext] = None | ||||
| _spawn_method: str = "trio" | ||||
| 
 | ||||
| 
 | ||||
| if platform.system() == 'Windows': | ||||
|  | @ -82,10 +72,7 @@ else: | |||
|         await trio.lowlevel.wait_readable(proc.sentinel) | ||||
| 
 | ||||
| 
 | ||||
| def try_set_start_method( | ||||
|     key: SpawnMethodKey | ||||
| 
 | ||||
| ) -> mp.context.BaseContext | None: | ||||
| def try_set_start_method(name: str) -> Optional[mp.context.BaseContext]: | ||||
|     ''' | ||||
|     Attempt to set the method for process starting, aka the "actor | ||||
|     spawning backend". | ||||
|  | @ -100,30 +87,28 @@ def try_set_start_method( | |||
|     global _ctx | ||||
|     global _spawn_method | ||||
| 
 | ||||
|     mp_methods = mp.get_all_start_methods() | ||||
|     if 'fork' in mp_methods: | ||||
|     methods = mp.get_all_start_methods() | ||||
|     if 'fork' in methods: | ||||
|         # forking is incompatible with ``trio``s global task tree | ||||
|         mp_methods.remove('fork') | ||||
|         methods.remove('fork') | ||||
| 
 | ||||
|     match key: | ||||
|         case 'mp_forkserver': | ||||
|             from . import _forkserver_override | ||||
|             _forkserver_override.override_stdlib() | ||||
|             _ctx = mp.get_context('forkserver') | ||||
|     # supported on all platforms | ||||
|     methods += ['trio'] | ||||
| 
 | ||||
|         case 'mp_spawn': | ||||
|             _ctx = mp.get_context('spawn') | ||||
|     if name not in methods: | ||||
|         raise ValueError( | ||||
|             f"Spawn method `{name}` is invalid please choose one of {methods}" | ||||
|         ) | ||||
|     elif name == 'forkserver': | ||||
|         from . import _forkserver_override | ||||
|         _forkserver_override.override_stdlib() | ||||
|         _ctx = mp.get_context(name) | ||||
|     elif name == 'trio': | ||||
|         _ctx = None | ||||
|     else: | ||||
|         _ctx = mp.get_context(name) | ||||
| 
 | ||||
|         case 'trio': | ||||
|             _ctx = None | ||||
| 
 | ||||
|         case _: | ||||
|             raise ValueError( | ||||
|                 f'Spawn method `{key}` is invalid!\n' | ||||
|                 f'Please choose one of {SpawnMethodKey}' | ||||
|             ) | ||||
| 
 | ||||
|     _spawn_method = key | ||||
|     _spawn_method = name | ||||
|     return _ctx | ||||
| 
 | ||||
| 
 | ||||
|  | @ -139,7 +124,6 @@ async def exhaust_portal( | |||
|     If the main task is an async generator do our best to consume | ||||
|     what's left of it. | ||||
|     ''' | ||||
|     __tracebackhide__ = True | ||||
|     try: | ||||
|         log.debug(f"Waiting on final result from {actor.uid}") | ||||
| 
 | ||||
|  | @ -147,11 +131,8 @@ async def exhaust_portal( | |||
|         # always be established and shutdown using a context manager api | ||||
|         final = await portal.result() | ||||
| 
 | ||||
|     except ( | ||||
|         Exception, | ||||
|         BaseExceptionGroup, | ||||
|     ) as err: | ||||
|         # we reraise in the parent task via a ``BaseExceptionGroup`` | ||||
|     except (Exception, trio.MultiError) as err: | ||||
|         # we reraise in the parent task via a ``trio.MultiError`` | ||||
|         return err | ||||
|     except trio.Cancelled as err: | ||||
|         # lol, of course we need this too ;P | ||||
|  | @ -179,7 +160,7 @@ async def cancel_on_completion( | |||
|     ''' | ||||
|     # if this call errors we store the exception for later | ||||
|     # in ``errors`` which will be reraised inside | ||||
|     # an exception group and we still send out a cancel request | ||||
|     # a MultiError and we still send out a cancel request | ||||
|     result = await exhaust_portal(portal, actor) | ||||
|     if isinstance(result, Exception): | ||||
|         errors[actor.uid] = result | ||||
|  | @ -199,37 +180,16 @@ async def cancel_on_completion( | |||
| async def do_hard_kill( | ||||
|     proc: trio.Process, | ||||
|     terminate_after: int = 3, | ||||
| 
 | ||||
| ) -> None: | ||||
|     # NOTE: this timeout used to do nothing since we were shielding | ||||
|     # the ``.wait()`` inside ``new_proc()`` which will pretty much | ||||
|     # never release until the process exits, now it acts as | ||||
|     # a hard-kill time ultimatum. | ||||
|     log.debug(f"Terminating {proc}") | ||||
|     with trio.move_on_after(terminate_after) as cs: | ||||
| 
 | ||||
|         # NOTE: code below was copied verbatim from the now deprecated | ||||
|         # (in 0.20.0) ``trio._subrocess.Process.aclose()``, orig doc | ||||
|         # string: | ||||
|         # | ||||
|         # Close any pipes we have to the process (both input and output) | ||||
|         # and wait for it to exit. If cancelled, kills the process and | ||||
|         # waits for it to finish exiting before propagating the | ||||
|         # cancellation. | ||||
|         with trio.CancelScope(shield=True): | ||||
|             if proc.stdin is not None: | ||||
|                 await proc.stdin.aclose() | ||||
|             if proc.stdout is not None: | ||||
|                 await proc.stdout.aclose() | ||||
|             if proc.stderr is not None: | ||||
|                 await proc.stderr.aclose() | ||||
|         try: | ||||
|             await proc.wait() | ||||
|         finally: | ||||
|             if proc.returncode is None: | ||||
|                 proc.kill() | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     await proc.wait() | ||||
|         # NOTE: This ``__aexit__()`` shields internally. | ||||
|         async with proc:  # calls ``trio.Process.aclose()`` | ||||
|             log.debug(f"Terminating {proc}") | ||||
| 
 | ||||
|     if cs.cancelled_caught: | ||||
|         # XXX: should pretty much never get here unless we have | ||||
|  | @ -280,17 +240,16 @@ async def soft_wait( | |||
| 
 | ||||
|             if proc.poll() is None:  # type: ignore | ||||
|                 log.warning( | ||||
|                     'Actor still alive after cancel request:\n' | ||||
|                     f'{uid}' | ||||
|                 ) | ||||
|                     f'Process still alive after cancel request:\n{uid}') | ||||
| 
 | ||||
|                 n.cancel_scope.cancel() | ||||
|         raise | ||||
| 
 | ||||
| 
 | ||||
| async def new_proc( | ||||
| 
 | ||||
|     name: str, | ||||
|     actor_nursery: ActorNursery, | ||||
|     actor_nursery: 'ActorNursery',  # type: ignore  # noqa | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
| 
 | ||||
|  | @ -304,41 +263,6 @@ async def new_proc( | |||
|     infect_asyncio: bool = False, | ||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||
| 
 | ||||
| ) -> None: | ||||
| 
 | ||||
|     # lookup backend spawning target | ||||
|     target = _methods[_spawn_method] | ||||
| 
 | ||||
|     # mark the new actor with the global spawn method | ||||
|     subactor._spawn_method = _spawn_method | ||||
| 
 | ||||
|     await target( | ||||
|         name, | ||||
|         actor_nursery, | ||||
|         subactor, | ||||
|         errors, | ||||
|         bind_addr, | ||||
|         parent_addr, | ||||
|         _runtime_vars,  # run time vars | ||||
|         infect_asyncio=infect_asyncio, | ||||
|         task_status=task_status, | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| async def trio_proc( | ||||
|     name: str, | ||||
|     actor_nursery: ActorNursery, | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
| 
 | ||||
|     # passed through to actor main | ||||
|     bind_addr: tuple[str, int], | ||||
|     parent_addr: tuple[str, int], | ||||
|     _runtime_vars: dict[str, Any],  # serialized and sent to _child | ||||
|     *, | ||||
|     infect_asyncio: bool = False, | ||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Create a new ``Process`` using a "spawn method" as (configured using | ||||
|  | @ -348,165 +272,178 @@ async def trio_proc( | |||
|     here is to be considered the core supervision strategy. | ||||
| 
 | ||||
|     ''' | ||||
|     spawn_cmd = [ | ||||
|         sys.executable, | ||||
|         "-m", | ||||
|         # Hardcode this (instead of using ``_child.__name__`` to avoid a | ||||
|         # double import warning: https://stackoverflow.com/a/45070583 | ||||
|         "tractor._child", | ||||
|         # We provide the child's unique identifier on this exec/spawn | ||||
|         # line for debugging purposes when viewing the process tree from | ||||
|         # the OS; it otherwise can be passed via the parent channel if | ||||
|         # we prefer in the future (for privacy). | ||||
|         "--uid", | ||||
|         str(subactor.uid), | ||||
|         # Address the child must connect to on startup | ||||
|         "--parent_addr", | ||||
|         str(parent_addr) | ||||
|     ] | ||||
|     # mark the new actor with the global spawn method | ||||
|     subactor._spawn_method = _spawn_method | ||||
|     uid = subactor.uid | ||||
| 
 | ||||
|     if subactor.loglevel: | ||||
|         spawn_cmd += [ | ||||
|             "--loglevel", | ||||
|             subactor.loglevel | ||||
|     if _spawn_method == 'trio': | ||||
|         spawn_cmd = [ | ||||
|             sys.executable, | ||||
|             "-m", | ||||
|             # Hardcode this (instead of using ``_child.__name__`` to avoid a | ||||
|             # double import warning: https://stackoverflow.com/a/45070583 | ||||
|             "tractor._child", | ||||
|             # We provide the child's unique identifier on this exec/spawn | ||||
|             # line for debugging purposes when viewing the process tree from | ||||
|             # the OS; it otherwise can be passed via the parent channel if | ||||
|             # we prefer in the future (for privacy). | ||||
|             "--uid", | ||||
|             str(subactor.uid), | ||||
|             # Address the child must connect to on startup | ||||
|             "--parent_addr", | ||||
|             str(parent_addr) | ||||
|         ] | ||||
|     # Tell child to run in guest mode on top of ``asyncio`` loop | ||||
|     if infect_asyncio: | ||||
|         spawn_cmd.append("--asyncio") | ||||
| 
 | ||||
|     cancelled_during_spawn: bool = False | ||||
|     proc: trio.Process | None = None | ||||
|     try: | ||||
|         if subactor.loglevel: | ||||
|             spawn_cmd += [ | ||||
|                 "--loglevel", | ||||
|                 subactor.loglevel | ||||
|             ] | ||||
|         # Tell child to run in guest mode on top of ``asyncio`` loop | ||||
|         if infect_asyncio: | ||||
|             spawn_cmd.append("--asyncio") | ||||
| 
 | ||||
|         cancelled_during_spawn: bool = False | ||||
|         proc: Optional[trio.Process] = None | ||||
|         try: | ||||
|             # TODO: needs ``trio_typing`` patch? | ||||
|             proc = await trio.lowlevel.open_process(spawn_cmd) | ||||
|             try: | ||||
|                 # TODO: needs ``trio_typing`` patch? | ||||
|                 proc = await trio.lowlevel.open_process(spawn_cmd)  # type: ignore | ||||
| 
 | ||||
|             log.runtime(f"Started {proc}") | ||||
|                 log.runtime(f"Started {proc}") | ||||
| 
 | ||||
|             # wait for actor to spawn and connect back to us | ||||
|             # channel should have handshake completed by the | ||||
|             # local actor by the time we get a ref to it | ||||
|             event, chan = await actor_nursery._actor.wait_for_peer( | ||||
|                 subactor.uid) | ||||
|                 # wait for actor to spawn and connect back to us | ||||
|                 # channel should have handshake completed by the | ||||
|                 # local actor by the time we get a ref to it | ||||
|                 event, chan = await actor_nursery._actor.wait_for_peer( | ||||
|                     subactor.uid) | ||||
| 
 | ||||
|         except trio.Cancelled: | ||||
|             cancelled_during_spawn = True | ||||
|             # we may cancel before the child connects back in which | ||||
|             # case avoid clobbering the pdb tty. | ||||
|             if debug_mode(): | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     # don't clobber an ongoing pdb | ||||
|                     if is_root_process(): | ||||
|                         await maybe_wait_for_debugger() | ||||
|             except trio.Cancelled: | ||||
|                 cancelled_during_spawn = True | ||||
|                 # we may cancel before the child connects back in which | ||||
|                 # case avoid clobbering the pdb tty. | ||||
|                 if debug_mode(): | ||||
|                     with trio.CancelScope(shield=True): | ||||
|                         # don't clobber an ongoing pdb | ||||
|                         if is_root_process(): | ||||
|                             await maybe_wait_for_debugger() | ||||
| 
 | ||||
|                     elif proc is not None: | ||||
|                         async with acquire_debug_lock(subactor.uid): | ||||
|                             # soft wait on the proc to terminate | ||||
|                             with trio.move_on_after(0.5): | ||||
|                                 await proc.wait() | ||||
|             raise | ||||
|                         elif proc is not None: | ||||
|                             async with acquire_debug_lock(uid): | ||||
|                                 # soft wait on the proc to terminate | ||||
|                                 with trio.move_on_after(0.5): | ||||
|                                     await proc.wait() | ||||
|                 raise | ||||
| 
 | ||||
|         # a sub-proc ref **must** exist now | ||||
|         assert proc | ||||
|             # a sub-proc ref **must** exist now | ||||
|             assert proc | ||||
| 
 | ||||
|         portal = Portal(chan) | ||||
|         actor_nursery._children[subactor.uid] = ( | ||||
|             subactor, | ||||
|             proc, | ||||
|             portal, | ||||
|         ) | ||||
|             portal = Portal(chan) | ||||
|             actor_nursery._children[subactor.uid] = ( | ||||
|                 subactor, proc, portal) | ||||
| 
 | ||||
|         # send additional init params | ||||
|         await chan.send({ | ||||
|             "_parent_main_data": subactor._parent_main_data, | ||||
|             "enable_modules": subactor.enable_modules, | ||||
|             "_arb_addr": subactor._arb_addr, | ||||
|             "bind_host": bind_addr[0], | ||||
|             "bind_port": bind_addr[1], | ||||
|             "_runtime_vars": _runtime_vars, | ||||
|         }) | ||||
|             # send additional init params | ||||
|             await chan.send({ | ||||
|                 "_parent_main_data": subactor._parent_main_data, | ||||
|                 "enable_modules": subactor.enable_modules, | ||||
|                 "_arb_addr": subactor._arb_addr, | ||||
|                 "bind_host": bind_addr[0], | ||||
|                 "bind_port": bind_addr[1], | ||||
|                 "_runtime_vars": _runtime_vars, | ||||
|             }) | ||||
| 
 | ||||
|         # track subactor in current nursery | ||||
|         curr_actor = current_actor() | ||||
|         curr_actor._actoruid2nursery[subactor.uid] = actor_nursery | ||||
|             # track subactor in current nursery | ||||
|             curr_actor = current_actor() | ||||
|             curr_actor._actoruid2nursery[subactor.uid] = actor_nursery | ||||
| 
 | ||||
|         # resume caller at next checkpoint now that child is up | ||||
|         task_status.started(portal) | ||||
|             # resume caller at next checkpoint now that child is up | ||||
|             task_status.started(portal) | ||||
| 
 | ||||
|         # wait for ActorNursery.wait() to be called | ||||
|         with trio.CancelScope(shield=True): | ||||
|             await actor_nursery._join_procs.wait() | ||||
| 
 | ||||
|         async with trio.open_nursery() as nursery: | ||||
|             if portal in actor_nursery._cancel_after_result_on_exit: | ||||
|                 nursery.start_soon( | ||||
|                     cancel_on_completion, | ||||
|                     portal, | ||||
|                     subactor, | ||||
|                     errors | ||||
|                 ) | ||||
| 
 | ||||
|             # This is a "soft" (cancellable) join/reap which | ||||
|             # will remote cancel the actor on a ``trio.Cancelled`` | ||||
|             # condition. | ||||
|             await soft_wait( | ||||
|                 proc, | ||||
|                 trio.Process.wait, | ||||
|                 portal | ||||
|             ) | ||||
| 
 | ||||
|             # cancel result waiter that may have been spawned in | ||||
|             # tandem if not done already | ||||
|             log.warning( | ||||
|                 "Cancelling existing result waiter task for " | ||||
|                 f"{subactor.uid}") | ||||
|             nursery.cancel_scope.cancel() | ||||
| 
 | ||||
|     finally: | ||||
|         # XXX NOTE XXX: The "hard" reap since no actor zombies are | ||||
|         # allowed! Do this **after** cancellation/teardown to avoid | ||||
|         # killing the process too early. | ||||
|         if proc: | ||||
|             log.cancel(f'Hard reap sequence starting for {subactor.uid}') | ||||
|             # wait for ActorNursery.wait() to be called | ||||
|             with trio.CancelScope(shield=True): | ||||
|                 await actor_nursery._join_procs.wait() | ||||
| 
 | ||||
|                 # don't clobber an ongoing pdb | ||||
|                 if cancelled_during_spawn: | ||||
|                     # Try again to avoid TTY clobbering. | ||||
|                     async with acquire_debug_lock(subactor.uid): | ||||
|                         with trio.move_on_after(0.5): | ||||
|                             await proc.wait() | ||||
| 
 | ||||
|                 if is_root_process(): | ||||
|                     # TODO: solve the following issue where we need | ||||
|                     # to do a similar wait like this but in an | ||||
|                     # "intermediary" parent actor that itself isn't | ||||
|                     # in debug but has a child that is, and we need | ||||
|                     # to hold off on relaying SIGINT until that child | ||||
|                     # is complete. | ||||
|                     # https://github.com/goodboy/tractor/issues/320 | ||||
|                     await maybe_wait_for_debugger( | ||||
|                         child_in_debug=_runtime_vars.get( | ||||
|                             '_debug_mode', False), | ||||
|             async with trio.open_nursery() as nursery: | ||||
|                 if portal in actor_nursery._cancel_after_result_on_exit: | ||||
|                     nursery.start_soon( | ||||
|                         cancel_on_completion, | ||||
|                         portal, | ||||
|                         subactor, | ||||
|                         errors | ||||
|                     ) | ||||
| 
 | ||||
|                 if proc.poll() is None: | ||||
|                     log.cancel(f"Attempting to hard kill {proc}") | ||||
|                     await do_hard_kill(proc) | ||||
|                 # This is a "soft" (cancellable) join/reap which | ||||
|                 # will remote cancel the actor on a ``trio.Cancelled`` | ||||
|                 # condition. | ||||
|                 await soft_wait( | ||||
|                     proc, | ||||
|                     trio.Process.wait, | ||||
|                     portal | ||||
|                 ) | ||||
| 
 | ||||
|                 log.debug(f"Joined {proc}") | ||||
|         else: | ||||
|             log.warning('Nursery cancelled before sub-proc started') | ||||
|                 # cancel result waiter that may have been spawned in | ||||
|                 # tandem if not done already | ||||
|                 log.warning( | ||||
|                     "Cancelling existing result waiter task for " | ||||
|                     f"{subactor.uid}") | ||||
|                 nursery.cancel_scope.cancel() | ||||
| 
 | ||||
|         if not cancelled_during_spawn: | ||||
|             # pop child entry to indicate we no longer managing this | ||||
|             # subactor | ||||
|             actor_nursery._children.pop(subactor.uid) | ||||
|         finally: | ||||
|             # The "hard" reap since no actor zombies are allowed! | ||||
|             # XXX: do this **after** cancellation/tearfown to avoid | ||||
|             # killing the process too early. | ||||
|             if proc: | ||||
|                 log.cancel(f'Hard reap sequence starting for {uid}') | ||||
|                 with trio.CancelScope(shield=True): | ||||
| 
 | ||||
|                     # don't clobber an ongoing pdb | ||||
|                     if cancelled_during_spawn: | ||||
|                         # Try again to avoid TTY clobbering. | ||||
|                         async with acquire_debug_lock(uid): | ||||
|                             with trio.move_on_after(0.5): | ||||
|                                 await proc.wait() | ||||
| 
 | ||||
|                     if is_root_process(): | ||||
|                         await maybe_wait_for_debugger( | ||||
|                             child_in_debug=_runtime_vars.get( | ||||
|                                 '_debug_mode', False), | ||||
|                         ) | ||||
| 
 | ||||
|                     if proc.poll() is None: | ||||
|                         log.cancel(f"Attempting to hard kill {proc}") | ||||
|                         await do_hard_kill(proc) | ||||
| 
 | ||||
|                     log.debug(f"Joined {proc}") | ||||
|             else: | ||||
|                 log.warning('Nursery cancelled before sub-proc started') | ||||
| 
 | ||||
|             if not cancelled_during_spawn: | ||||
|                 # pop child entry to indicate we no longer managing this | ||||
|                 # subactor | ||||
|                 actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
|     else: | ||||
|         # `multiprocessing` | ||||
|         # async with trio.open_nursery() as nursery: | ||||
|         await mp_new_proc( | ||||
|             name=name, | ||||
|             actor_nursery=actor_nursery, | ||||
|             subactor=subactor, | ||||
|             errors=errors, | ||||
| 
 | ||||
|             # passed through to actor main | ||||
|             bind_addr=bind_addr, | ||||
|             parent_addr=parent_addr, | ||||
|             _runtime_vars=_runtime_vars, | ||||
|             infect_asyncio=infect_asyncio, | ||||
|             task_status=task_status, | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| async def mp_proc( | ||||
| async def mp_new_proc( | ||||
| 
 | ||||
|     name: str, | ||||
|     actor_nursery: ActorNursery,  # type: ignore  # noqa | ||||
|     actor_nursery: 'ActorNursery',  # type: ignore  # noqa | ||||
|     subactor: Actor, | ||||
|     errors: dict[tuple[str, str], Exception], | ||||
|     # passed through to actor main | ||||
|  | @ -531,7 +468,6 @@ async def mp_proc( | |||
|     assert _ctx | ||||
|     start_method = _ctx.get_start_method() | ||||
|     if start_method == 'forkserver': | ||||
| 
 | ||||
|         from multiprocessing import forkserver  # type: ignore | ||||
|         # XXX do our hackery on the stdlib to avoid multiple | ||||
|         # forkservers (one at each subproc layer). | ||||
|  | @ -544,24 +480,23 @@ async def mp_proc( | |||
|             # forkserver.set_forkserver_preload(enable_modules) | ||||
|             forkserver.ensure_running() | ||||
|             fs_info = ( | ||||
|                 fs._forkserver_address,  # type: ignore  # noqa | ||||
|                 fs._forkserver_alive_fd,  # type: ignore  # noqa | ||||
|                 fs._forkserver_address, | ||||
|                 fs._forkserver_alive_fd, | ||||
|                 getattr(fs, '_forkserver_pid', None), | ||||
|                 getattr( | ||||
|                     resource_tracker._resource_tracker, '_pid', None), | ||||
|                 resource_tracker._resource_tracker._fd, | ||||
|             ) | ||||
|         else:  # request to forkerserver to fork a new child | ||||
|         else: | ||||
|             assert curr_actor._forkserver_info | ||||
|             fs_info = ( | ||||
|                 fs._forkserver_address,  # type: ignore  # noqa | ||||
|                 fs._forkserver_alive_fd,  # type: ignore  # noqa | ||||
|                 fs._forkserver_pid,  # type: ignore  # noqa | ||||
|                 fs._forkserver_address, | ||||
|                 fs._forkserver_alive_fd, | ||||
|                 fs._forkserver_pid, | ||||
|                 resource_tracker._resource_tracker._pid, | ||||
|                 resource_tracker._resource_tracker._fd, | ||||
|              ) = curr_actor._forkserver_info | ||||
|     else: | ||||
|         # spawn method | ||||
|         fs_info = (None, None, None, None, None) | ||||
| 
 | ||||
|     proc: mp.Process = _ctx.Process(  # type: ignore | ||||
|  | @ -570,7 +505,7 @@ async def mp_proc( | |||
|             subactor, | ||||
|             bind_addr, | ||||
|             fs_info, | ||||
|             _spawn_method, | ||||
|             start_method, | ||||
|             parent_addr, | ||||
|             infect_asyncio, | ||||
|         ), | ||||
|  | @ -664,16 +599,4 @@ async def mp_proc( | |||
|         log.debug(f"Joined {proc}") | ||||
| 
 | ||||
|         # pop child entry to indicate we are no longer managing subactor | ||||
|         actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
|         # TODO: prolly report to ``mypy`` how this causes all sorts of | ||||
|         # false errors.. | ||||
|         # subactor, proc, portal = actor_nursery._children.pop(subactor.uid) | ||||
| 
 | ||||
| 
 | ||||
| # proc spawning backend target map | ||||
| _methods: dict[SpawnMethodKey, Callable] = { | ||||
|     'trio': trio_proc, | ||||
|     'mp_spawn': mp_proc, | ||||
|     'mp_forkserver': mp_proc, | ||||
| } | ||||
|         subactor, proc, portal = actor_nursery._children.pop(subactor.uid) | ||||
|  |  | |||
|  | @ -18,10 +18,8 @@ | |||
| Per process state | ||||
| 
 | ||||
| """ | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     Any, | ||||
| ) | ||||
| from typing import Optional, Dict, Any | ||||
| from collections.abc import Mapping | ||||
| 
 | ||||
| import trio | ||||
| 
 | ||||
|  | @ -29,7 +27,7 @@ from ._exceptions import NoRuntime | |||
| 
 | ||||
| 
 | ||||
| _current_actor: Optional['Actor'] = None  # type: ignore # noqa | ||||
| _runtime_vars: dict[str, Any] = { | ||||
| _runtime_vars: Dict[str, Any] = { | ||||
|     '_debug_mode': False, | ||||
|     '_is_root': False, | ||||
|     '_root_mailbox': (None, None) | ||||
|  | @ -45,6 +43,30 @@ def current_actor(err_on_no_runtime: bool = True) -> 'Actor':  # type: ignore # | |||
|     return _current_actor | ||||
| 
 | ||||
| 
 | ||||
| _conc_name_getters = { | ||||
|     'task': trio.lowlevel.current_task, | ||||
|     'actor': current_actor | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| class ActorContextInfo(Mapping): | ||||
|     "Dyanmic lookup for local actor and task names" | ||||
|     _context_keys = ('task', 'actor') | ||||
| 
 | ||||
|     def __len__(self): | ||||
|         return len(self._context_keys) | ||||
| 
 | ||||
|     def __iter__(self): | ||||
|         return iter(self._context_keys) | ||||
| 
 | ||||
|     def __getitem__(self, key: str) -> str: | ||||
|         try: | ||||
|             return _conc_name_getters[key]().name  # type: ignore | ||||
|         except RuntimeError: | ||||
|             # no local actor/task context initialized yet | ||||
|             return f'no {key} context' | ||||
| 
 | ||||
| 
 | ||||
| def is_main_process() -> bool: | ||||
|     """Bool determining if this actor is running in the top-most process. | ||||
|     """ | ||||
|  |  | |||
|  | @ -23,10 +23,8 @@ import inspect | |||
| from contextlib import asynccontextmanager | ||||
| from dataclasses import dataclass | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Optional, | ||||
|     Callable, | ||||
|     AsyncGenerator, | ||||
|     Any, Optional, Callable, | ||||
|     AsyncGenerator, Dict, | ||||
|     AsyncIterator | ||||
| ) | ||||
| 
 | ||||
|  | @ -50,13 +48,12 @@ log = get_logger(__name__) | |||
| # - use __slots__ on ``Context``? | ||||
| 
 | ||||
| 
 | ||||
| class MsgStream(trio.abc.Channel): | ||||
| class ReceiveMsgStream(trio.abc.ReceiveChannel): | ||||
|     ''' | ||||
|     A bidirectional message stream for receiving logically sequenced | ||||
|     values over an inter-actor IPC ``Channel``. | ||||
| 
 | ||||
|     This is the type returned to a local task which entered either | ||||
|     ``Portal.open_stream_from()`` or ``Context.open_stream()``. | ||||
|     A IPC message stream for receiving logically sequenced values over | ||||
|     an inter-actor ``Channel``. This is the type returned to a local | ||||
|     task which entered either ``Portal.open_stream_from()`` or | ||||
|     ``Context.open_stream()``. | ||||
| 
 | ||||
|     Termination rules: | ||||
| 
 | ||||
|  | @ -98,9 +95,6 @@ class MsgStream(trio.abc.Channel): | |||
|         if self._eoc: | ||||
|             raise trio.EndOfChannel | ||||
| 
 | ||||
|         if self._closed: | ||||
|             raise trio.ClosedResourceError('This stream was closed') | ||||
| 
 | ||||
|         try: | ||||
|             msg = await self._rx_chan.receive() | ||||
|             return msg['yield'] | ||||
|  | @ -114,9 +108,6 @@ class MsgStream(trio.abc.Channel): | |||
|             # - 'error' | ||||
|             # possibly just handle msg['stop'] here! | ||||
| 
 | ||||
|             if self._closed: | ||||
|                 raise trio.ClosedResourceError('This stream was closed') | ||||
| 
 | ||||
|             if msg.get('stop') or self._eoc: | ||||
|                 log.debug(f"{self} was stopped at remote end") | ||||
| 
 | ||||
|  | @ -196,6 +187,7 @@ class MsgStream(trio.abc.Channel): | |||
|             return | ||||
| 
 | ||||
|         self._eoc = True | ||||
|         self._closed = True | ||||
| 
 | ||||
|         # NOTE: this is super subtle IPC messaging stuff: | ||||
|         # Relay stop iteration to far end **iff** we're | ||||
|  | @ -212,32 +204,29 @@ class MsgStream(trio.abc.Channel): | |||
| 
 | ||||
|         # In the bidirectional case, `Context.open_stream()` will create | ||||
|         # the `Actor._cids2qs` entry from a call to | ||||
|         # `Actor.get_context()` and will call us here to send the stop | ||||
|         # msg in ``__aexit__()`` on teardown. | ||||
|         try: | ||||
|             # NOTE: if this call is cancelled we expect this end to | ||||
|             # handle as though the stop was never sent (though if it | ||||
|             # was it shouldn't matter since it's unlikely a user | ||||
|             # will try to re-use a stream after attemping to close | ||||
|             # it). | ||||
|             with trio.CancelScope(shield=True): | ||||
|                 await self._ctx.send_stop() | ||||
|         # `Actor.get_context()` and will send the stop message in | ||||
|         # ``__aexit__()`` on teardown so it **does not** need to be | ||||
|         # called here. | ||||
|         if not self._ctx._portal: | ||||
|             # Only for 2 way streams can we can send stop from the | ||||
|             # caller side. | ||||
|             try: | ||||
|                 # NOTE: if this call is cancelled we expect this end to | ||||
|                 # handle as though the stop was never sent (though if it | ||||
|                 # was it shouldn't matter since it's unlikely a user | ||||
|                 # will try to re-use a stream after attemping to close | ||||
|                 # it). | ||||
|                 with trio.CancelScope(shield=True): | ||||
|                     await self._ctx.send_stop() | ||||
| 
 | ||||
|         except ( | ||||
|             trio.BrokenResourceError, | ||||
|             trio.ClosedResourceError | ||||
|         ): | ||||
|             # the underlying channel may already have been pulled | ||||
|             # in which case our stop message is meaningless since | ||||
|             # it can't traverse the transport. | ||||
|             ctx = self._ctx | ||||
|             log.warning( | ||||
|                 f'Stream was already destroyed?\n' | ||||
|                 f'actor: {ctx.chan.uid}\n' | ||||
|                 f'ctx id: {ctx.cid}' | ||||
|             ) | ||||
| 
 | ||||
|         self._closed = True | ||||
|             except ( | ||||
|                 trio.BrokenResourceError, | ||||
|                 trio.ClosedResourceError | ||||
|             ): | ||||
|                 # the underlying channel may already have been pulled | ||||
|                 # in which case our stop message is meaningless since | ||||
|                 # it can't traverse the transport. | ||||
|                 log.debug(f'Channel for {self} was already closed') | ||||
| 
 | ||||
|         # Do we close the local mem chan ``self._rx_chan`` ??!? | ||||
| 
 | ||||
|  | @ -280,8 +269,7 @@ class MsgStream(trio.abc.Channel): | |||
|         self, | ||||
| 
 | ||||
|     ) -> AsyncIterator[BroadcastReceiver]: | ||||
|         ''' | ||||
|         Allocate and return a ``BroadcastReceiver`` which delegates | ||||
|         '''Allocate and return a ``BroadcastReceiver`` which delegates | ||||
|         to this message stream. | ||||
| 
 | ||||
|         This allows multiple local tasks to receive each their own copy | ||||
|  | @ -318,15 +306,15 @@ class MsgStream(trio.abc.Channel): | |||
|         async with self._broadcaster.subscribe() as bstream: | ||||
|             assert bstream.key != self._broadcaster.key | ||||
|             assert bstream._recv == self._broadcaster._recv | ||||
| 
 | ||||
|             # NOTE: we patch on a `.send()` to the bcaster so that the | ||||
|             # caller can still conduct 2-way streaming using this | ||||
|             # ``bstream`` handle transparently as though it was the msg | ||||
|             # stream instance. | ||||
|             bstream.send = self.send  # type: ignore | ||||
| 
 | ||||
|             yield bstream | ||||
| 
 | ||||
| 
 | ||||
| class MsgStream(ReceiveMsgStream, trio.abc.Channel): | ||||
|     ''' | ||||
|     Bidirectional message stream for use within an inter-actor actor | ||||
|     ``Context```. | ||||
| 
 | ||||
|     ''' | ||||
|     async def send( | ||||
|         self, | ||||
|         data: Any | ||||
|  | @ -381,8 +369,6 @@ class Context: | |||
| 
 | ||||
|     # status flags | ||||
|     _cancel_called: bool = False | ||||
|     _cancel_msg: Optional[str] = None | ||||
|     _enter_debugger_on_cancel: bool = True | ||||
|     _started_called: bool = False | ||||
|     _started_received: bool = False | ||||
|     _stream_opened: bool = False | ||||
|  | @ -407,7 +393,7 @@ class Context: | |||
| 
 | ||||
|     async def _maybe_raise_from_remote_msg( | ||||
|         self, | ||||
|         msg: dict[str, Any], | ||||
|         msg: Dict[str, Any], | ||||
| 
 | ||||
|     ) -> None: | ||||
|         ''' | ||||
|  | @ -464,11 +450,7 @@ class Context: | |||
|                 if not self._scope_nursery._closed:  # type: ignore | ||||
|                     self._scope_nursery.start_soon(raiser) | ||||
| 
 | ||||
|     async def cancel( | ||||
|         self, | ||||
|         msg: Optional[str] = None, | ||||
| 
 | ||||
|     ) -> None: | ||||
|     async def cancel(self) -> None: | ||||
|         ''' | ||||
|         Cancel this inter-actor-task context. | ||||
| 
 | ||||
|  | @ -477,8 +459,6 @@ class Context: | |||
| 
 | ||||
|         ''' | ||||
|         side = 'caller' if self._portal else 'callee' | ||||
|         if msg: | ||||
|             assert side == 'callee', 'Only callee side can provide cancel msg' | ||||
| 
 | ||||
|         log.cancel(f'Cancelling {side} side of context to {self.chan.uid}') | ||||
| 
 | ||||
|  | @ -515,10 +495,8 @@ class Context: | |||
|                     log.cancel( | ||||
|                         "Timed out on cancelling remote task " | ||||
|                         f"{cid} for {self._portal.channel.uid}") | ||||
| 
 | ||||
|         # callee side remote task | ||||
|         else: | ||||
|             self._cancel_msg = msg | ||||
|             # callee side remote task | ||||
| 
 | ||||
|             # TODO: should we have an explicit cancel message | ||||
|             # or is relaying the local `trio.Cancelled` as an | ||||
|  | @ -603,23 +581,23 @@ class Context: | |||
|         async with MsgStream( | ||||
|             ctx=self, | ||||
|             rx_chan=ctx._recv_chan, | ||||
|         ) as stream: | ||||
|         ) as rchan: | ||||
| 
 | ||||
|             if self._portal: | ||||
|                 self._portal._streams.add(stream) | ||||
|                 self._portal._streams.add(rchan) | ||||
| 
 | ||||
|             try: | ||||
|                 self._stream_opened = True | ||||
| 
 | ||||
|                 # XXX: do we need this? | ||||
|                 # ensure we aren't cancelled before yielding the stream | ||||
|                 # ensure we aren't cancelled before delivering | ||||
|                 # the stream | ||||
|                 # await trio.lowlevel.checkpoint() | ||||
|                 yield stream | ||||
|                 yield rchan | ||||
| 
 | ||||
|                 # NOTE: Make the stream "one-shot use".  On exit, signal | ||||
|                 # XXX: Make the stream "one-shot use".  On exit, signal | ||||
|                 # ``trio.EndOfChannel``/``StopAsyncIteration`` to the | ||||
|                 # far end. | ||||
|                 await stream.aclose() | ||||
|                 await self.send_stop() | ||||
| 
 | ||||
|             finally: | ||||
|                 if self._portal: | ||||
|  |  | |||
|  | @ -18,23 +18,19 @@ | |||
| ``trio`` inspired apis and helpers | ||||
| 
 | ||||
| """ | ||||
| from contextlib import asynccontextmanager as acm | ||||
| from functools import partial | ||||
| import inspect | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     TYPE_CHECKING, | ||||
| ) | ||||
| from typing import Tuple, List, Dict, Optional, TYPE_CHECKING | ||||
| import typing | ||||
| import warnings | ||||
| 
 | ||||
| from exceptiongroup import BaseExceptionGroup | ||||
| import trio | ||||
| from async_generator import asynccontextmanager | ||||
| 
 | ||||
| from ._debug import maybe_wait_for_debugger | ||||
| from ._state import current_actor, is_main_process | ||||
| from .log import get_logger, get_loglevel | ||||
| from ._runtime import Actor | ||||
| from ._actor import Actor | ||||
| from ._portal import Portal | ||||
| from ._exceptions import is_multi_cancelled | ||||
| from ._root import open_root_actor | ||||
|  | @ -47,7 +43,7 @@ if TYPE_CHECKING: | |||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| _default_bind_addr: tuple[str, int] = ('127.0.0.1', 0) | ||||
| _default_bind_addr: Tuple[str, int] = ('127.0.0.1', 0) | ||||
| 
 | ||||
| 
 | ||||
| class ActorNursery: | ||||
|  | @ -83,19 +79,15 @@ class ActorNursery: | |||
|         actor: Actor, | ||||
|         ria_nursery: trio.Nursery, | ||||
|         da_nursery: trio.Nursery, | ||||
|         errors: dict[tuple[str, str], BaseException], | ||||
|         errors: Dict[Tuple[str, str], Exception], | ||||
|     ) -> None: | ||||
|         # self.supervisor = supervisor  # TODO | ||||
|         self._actor: Actor = actor | ||||
|         self._ria_nursery = ria_nursery | ||||
|         self._da_nursery = da_nursery | ||||
|         self._children: dict[ | ||||
|             tuple[str, str], | ||||
|             tuple[ | ||||
|                 Actor, | ||||
|                 trio.Process | mp.Process, | ||||
|                 Optional[Portal], | ||||
|             ] | ||||
|         self._children: Dict[ | ||||
|             Tuple[str, str], | ||||
|             Tuple[Actor, mp.Process, Optional[Portal]] | ||||
|         ] = {} | ||||
|         # portals spawned with ``run_in_actor()`` are | ||||
|         # cancelled when their "main" result arrives | ||||
|  | @ -110,12 +102,12 @@ class ActorNursery: | |||
|         self, | ||||
|         name: str, | ||||
|         *, | ||||
|         bind_addr: tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: list[str] | None = None, | ||||
|         enable_modules: list[str] | None = None, | ||||
|         loglevel: str | None = None,  # set log level per subactor | ||||
|         nursery: trio.Nursery | None = None, | ||||
|         debug_mode: Optional[bool] | None = None, | ||||
|         bind_addr: Tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: List[str] = None, | ||||
|         enable_modules: List[str] = None, | ||||
|         loglevel: str = None,  # set log level per subactor | ||||
|         nursery: trio.Nursery = None, | ||||
|         debug_mode: Optional[bool] = None, | ||||
|         infect_asyncio: bool = False, | ||||
|     ) -> Portal: | ||||
|         ''' | ||||
|  | @ -181,10 +173,10 @@ class ActorNursery: | |||
|         *, | ||||
| 
 | ||||
|         name: Optional[str] = None, | ||||
|         bind_addr: tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: list[str] | None = None, | ||||
|         enable_modules: list[str] | None = None, | ||||
|         loglevel: str | None = None,  # set log level per subactor | ||||
|         bind_addr: Tuple[str, int] = _default_bind_addr, | ||||
|         rpc_module_paths: Optional[List[str]] = None, | ||||
|         enable_modules: List[str] = None, | ||||
|         loglevel: str = None,  # set log level per subactor | ||||
|         infect_asyncio: bool = False, | ||||
| 
 | ||||
|         **kwargs,  # explicit args to ``fn`` | ||||
|  | @ -295,17 +287,13 @@ class ActorNursery: | |||
|         self._join_procs.set() | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| @asynccontextmanager | ||||
| async def _open_and_supervise_one_cancels_all_nursery( | ||||
|     actor: Actor, | ||||
| 
 | ||||
| ) -> typing.AsyncGenerator[ActorNursery, None]: | ||||
| 
 | ||||
|     # TODO: yay or nay? | ||||
|     __tracebackhide__ = True | ||||
| 
 | ||||
|     # the collection of errors retreived from spawned sub-actors | ||||
|     errors: dict[tuple[str, str], BaseException] = {} | ||||
|     errors: Dict[Tuple[str, str], Exception] = {} | ||||
| 
 | ||||
|     # This is the outermost level "deamon actor" nursery. It is awaited | ||||
|     # **after** the below inner "run in actor nursery". This allows for | ||||
|  | @ -338,17 +326,19 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|                     # after we yield upwards | ||||
|                     yield anursery | ||||
| 
 | ||||
|                     # When we didn't error in the caller's scope, | ||||
|                     # signal all process-monitor-tasks to conduct | ||||
|                     # the "hard join phase". | ||||
|                     log.runtime( | ||||
|                         f"Waiting on subactors {anursery._children} " | ||||
|                         "to complete" | ||||
|                     ) | ||||
| 
 | ||||
|                     # Last bit before first nursery block ends in the case | ||||
|                     # where we didn't error in the caller's scope | ||||
| 
 | ||||
|                     # signal all process monitor tasks to conduct | ||||
|                     # hard join phase. | ||||
|                     anursery._join_procs.set() | ||||
| 
 | ||||
|                 except BaseException as inner_err: | ||||
|                     errors[actor.uid] = inner_err | ||||
|                 except BaseException as err: | ||||
| 
 | ||||
|                     # If we error in the root but the debugger is | ||||
|                     # engaged we don't want to prematurely kill (and | ||||
|  | @ -365,42 +355,49 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|                     # worry more are coming). | ||||
|                     anursery._join_procs.set() | ||||
| 
 | ||||
|                     # XXX: hypothetically an error could be | ||||
|                     # raised and then a cancel signal shows up | ||||
|                     # slightly after in which case the `else:` | ||||
|                     # block here might not complete?  For now, | ||||
|                     # shield both. | ||||
|                     with trio.CancelScope(shield=True): | ||||
|                         etype = type(inner_err) | ||||
|                         if etype in ( | ||||
|                             trio.Cancelled, | ||||
|                             KeyboardInterrupt | ||||
|                         ) or ( | ||||
|                             is_multi_cancelled(inner_err) | ||||
|                         ): | ||||
|                             log.cancel( | ||||
|                                 f"Nursery for {current_actor().uid} " | ||||
|                                 f"was cancelled with {etype}") | ||||
|                         else: | ||||
|                             log.exception( | ||||
|                                 f"Nursery for {current_actor().uid} " | ||||
|                                 f"errored with") | ||||
|                     try: | ||||
|                         # XXX: hypothetically an error could be | ||||
|                         # raised and then a cancel signal shows up | ||||
|                         # slightly after in which case the `else:` | ||||
|                         # block here might not complete?  For now, | ||||
|                         # shield both. | ||||
|                         with trio.CancelScope(shield=True): | ||||
|                             etype = type(err) | ||||
|                             if etype in ( | ||||
|                                 trio.Cancelled, | ||||
|                                 KeyboardInterrupt | ||||
|                             ) or ( | ||||
|                                 is_multi_cancelled(err) | ||||
|                             ): | ||||
|                                 log.cancel( | ||||
|                                     f"Nursery for {current_actor().uid} " | ||||
|                                     f"was cancelled with {etype}") | ||||
|                             else: | ||||
|                                 log.exception( | ||||
|                                     f"Nursery for {current_actor().uid} " | ||||
|                                     f"errored with {err}, ") | ||||
| 
 | ||||
|                         # cancel all subactors | ||||
|                         await anursery.cancel() | ||||
|                             # cancel all subactors | ||||
|                             await anursery.cancel() | ||||
| 
 | ||||
|             # ria_nursery scope end | ||||
|                     except trio.MultiError as merr: | ||||
|                         # If we receive additional errors while waiting on | ||||
|                         # remaining subactors that were cancelled, | ||||
|                         # aggregate those errors with the original error | ||||
|                         # that triggered this teardown. | ||||
|                         if err not in merr.exceptions: | ||||
|                             raise trio.MultiError(merr.exceptions + [err]) | ||||
|                     else: | ||||
|                         raise | ||||
| 
 | ||||
|         # TODO: this is the handler around the ``.run_in_actor()`` | ||||
|         # nursery. Ideally we can drop this entirely in the future as | ||||
|         # the whole ``.run_in_actor()`` API should be built "on top of" | ||||
|         # this lower level spawn-request-cancel "daemon actor" API where | ||||
|         # a local in-actor task nursery is used with one-to-one task | ||||
|         # + `await Portal.run()` calls and the results/errors are | ||||
|         # handled directly (inline) and errors by the local nursery. | ||||
|                 # ria_nursery scope end | ||||
| 
 | ||||
|         # XXX: do we need a `trio.Cancelled` catch here as well? | ||||
|         # this is the catch around the ``.run_in_actor()`` nursery | ||||
|         except ( | ||||
| 
 | ||||
|             Exception, | ||||
|             BaseExceptionGroup, | ||||
|             trio.MultiError, | ||||
|             trio.Cancelled | ||||
| 
 | ||||
|         ) as err: | ||||
|  | @ -432,20 +429,18 @@ async def _open_and_supervise_one_cancels_all_nursery( | |||
|                     with trio.CancelScope(shield=True): | ||||
|                         await anursery.cancel() | ||||
| 
 | ||||
|                 # use `BaseExceptionGroup` as needed | ||||
|                 # use `MultiError` as needed | ||||
|                 if len(errors) > 1: | ||||
|                     raise BaseExceptionGroup( | ||||
|                         'tractor.ActorNursery errored with', | ||||
|                         tuple(errors.values()), | ||||
|                     ) | ||||
|                     raise trio.MultiError(tuple(errors.values())) | ||||
|                 else: | ||||
|                     raise list(errors.values())[0] | ||||
| 
 | ||||
|         # da_nursery scope end - nursery checkpoint | ||||
|     # final exit | ||||
|         # ria_nursery scope end - nursery checkpoint | ||||
| 
 | ||||
|     # after nursery exit | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| @asynccontextmanager | ||||
| async def open_nursery( | ||||
|     **kwargs, | ||||
| 
 | ||||
|  |  | |||
|  | @ -26,10 +26,7 @@ support provided by ``tractor.Context.open_stream()`` and friends. | |||
| from __future__ import annotations | ||||
| import inspect | ||||
| import typing | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Callable, | ||||
| ) | ||||
| from typing import Dict, Any, Set, Callable, List, Tuple | ||||
| from functools import partial | ||||
| from async_generator import aclosing | ||||
| 
 | ||||
|  | @ -47,8 +44,8 @@ log = get_logger('messaging') | |||
| 
 | ||||
| async def fan_out_to_ctxs( | ||||
|     pub_async_gen_func: typing.Callable,  # it's an async gen ... gd mypy | ||||
|     topics2ctxs: dict[str, list], | ||||
|     packetizer: typing.Callable | None = None, | ||||
|     topics2ctxs: Dict[str, list], | ||||
|     packetizer: typing.Callable = None, | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Request and fan out quotes to each subscribed actor channel. | ||||
|  | @ -64,7 +61,7 @@ async def fan_out_to_ctxs( | |||
| 
 | ||||
|         async for published in pub_gen: | ||||
| 
 | ||||
|             ctx_payloads: list[tuple[Context, Any]] = [] | ||||
|             ctx_payloads: List[Tuple[Context, Any]] = [] | ||||
| 
 | ||||
|             for topic, data in published.items(): | ||||
|                 log.debug(f"publishing {topic, data}") | ||||
|  | @ -106,8 +103,8 @@ async def fan_out_to_ctxs( | |||
| 
 | ||||
| def modify_subs( | ||||
| 
 | ||||
|     topics2ctxs: dict[str, list[Context]], | ||||
|     topics: set[str], | ||||
|     topics2ctxs: Dict[str, List[Context]], | ||||
|     topics: Set[str], | ||||
|     ctx: Context, | ||||
| 
 | ||||
| ) -> None: | ||||
|  | @ -139,20 +136,20 @@ def modify_subs( | |||
|             topics2ctxs.pop(topic) | ||||
| 
 | ||||
| 
 | ||||
| _pub_state: dict[str, dict] = {} | ||||
| _pubtask2lock: dict[str, trio.StrictFIFOLock] = {} | ||||
| _pub_state: Dict[str, dict] = {} | ||||
| _pubtask2lock: Dict[str, trio.StrictFIFOLock] = {} | ||||
| 
 | ||||
| 
 | ||||
| def pub( | ||||
|     wrapped: typing.Callable | None = None, | ||||
|     wrapped: typing.Callable = None, | ||||
|     *, | ||||
|     tasks: set[str] = set(), | ||||
|     tasks: Set[str] = set(), | ||||
| ): | ||||
|     """Publisher async generator decorator. | ||||
| 
 | ||||
|     A publisher can be called multiple times from different actors but | ||||
|     will only spawn a finite set of internal tasks to stream values to | ||||
|     each caller. The ``tasks: set[str]`` argument to the decorator | ||||
|     each caller. The ``tasks: Set[str]`` argument to the decorator | ||||
|     specifies the names of the mutex set of publisher tasks.  When the | ||||
|     publisher function is called, an argument ``task_name`` must be | ||||
|     passed to specify which task (of the set named in ``tasks``) should | ||||
|  | @ -161,9 +158,9 @@ def pub( | |||
|     necessary. | ||||
| 
 | ||||
|     Values yielded from the decorated async generator must be | ||||
|     ``dict[str, dict[str, Any]]`` where the fist level key is the topic | ||||
|     ``Dict[str, Dict[str, Any]]`` where the fist level key is the topic | ||||
|     string and determines which subscription the packet will be | ||||
|     delivered to and the value is a packet ``dict[str, Any]`` by default | ||||
|     delivered to and the value is a packet ``Dict[str, Any]`` by default | ||||
|     of the form: | ||||
| 
 | ||||
|     .. ::python | ||||
|  | @ -189,7 +186,7 @@ def pub( | |||
| 
 | ||||
| 
 | ||||
|     The publisher must be called passing in the following arguments: | ||||
|     - ``topics: set[str]`` the topic sequence or "subscriptions" | ||||
|     - ``topics: Set[str]`` the topic sequence or "subscriptions" | ||||
|     - ``task_name: str`` the task to use (if ``tasks`` was passed) | ||||
|     - ``ctx: Context`` the tractor context (only needed if calling the | ||||
|       pub func without a nursery, otherwise this is provided implicitly) | ||||
|  | @ -234,7 +231,7 @@ def pub( | |||
|     if wrapped is None: | ||||
|         return partial(pub, tasks=tasks) | ||||
| 
 | ||||
|     task2lock: dict[str, trio.StrictFIFOLock] = {} | ||||
|     task2lock: Dict[str, trio.StrictFIFOLock] = {} | ||||
| 
 | ||||
|     for name in tasks: | ||||
|         task2lock[name] = trio.StrictFIFOLock() | ||||
|  | @ -246,11 +243,11 @@ def pub( | |||
|         # `wrapt` docs | ||||
|         async def _execute( | ||||
|             ctx: Context, | ||||
|             topics: set[str], | ||||
|             topics: Set[str], | ||||
|             *args, | ||||
|             # *, | ||||
|             task_name: str | None = None,  # default: only one task allocated | ||||
|             packetizer: Callable | None = None, | ||||
|             task_name: str = None,  # default: only one task allocated | ||||
|             packetizer: Callable = None, | ||||
|             **kwargs, | ||||
|         ): | ||||
|             if task_name is None: | ||||
|  |  | |||
|  | @ -18,14 +18,12 @@ | |||
| Log like a forester! | ||||
| 
 | ||||
| """ | ||||
| from collections.abc import Mapping | ||||
| import sys | ||||
| import logging | ||||
| import colorlog  # type: ignore | ||||
| from typing import Optional | ||||
| 
 | ||||
| import trio | ||||
| 
 | ||||
| from ._state import current_actor | ||||
| from ._state import ActorContextInfo | ||||
| 
 | ||||
| 
 | ||||
| _proj_name: str = 'tractor' | ||||
|  | @ -38,8 +36,7 @@ LOG_FORMAT = ( | |||
|     # "{bold_white}{log_color}{asctime}{reset}" | ||||
|     "{log_color}{asctime}{reset}" | ||||
|     " {bold_white}{thin_white}({reset}" | ||||
|     "{thin_white}{actor_name}[{actor_uid}], " | ||||
|     "{process}, {task}){reset}{bold_white}{thin_white})" | ||||
|     "{thin_white}{actor}, {process}, {task}){reset}{bold_white}{thin_white})" | ||||
|     " {reset}{log_color}[{reset}{bold_log_color}{levelname}{reset}{log_color}]" | ||||
|     " {log_color}{name}" | ||||
|     " {thin_white}{filename}{log_color}:{reset}{thin_white}{lineno}{log_color}" | ||||
|  | @ -139,40 +136,9 @@ class StackLevelAdapter(logging.LoggerAdapter): | |||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| _conc_name_getters = { | ||||
|     'task': lambda: trio.lowlevel.current_task().name, | ||||
|     'actor': lambda: current_actor(), | ||||
|     'actor_name': lambda: current_actor().name, | ||||
|     'actor_uid': lambda: current_actor().uid[1][:6], | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| class ActorContextInfo(Mapping): | ||||
|     "Dyanmic lookup for local actor and task names" | ||||
|     _context_keys = ( | ||||
|         'task', | ||||
|         'actor', | ||||
|         'actor_name', | ||||
|         'actor_uid', | ||||
|     ) | ||||
| 
 | ||||
|     def __len__(self): | ||||
|         return len(self._context_keys) | ||||
| 
 | ||||
|     def __iter__(self): | ||||
|         return iter(self._context_keys) | ||||
| 
 | ||||
|     def __getitem__(self, key: str) -> str: | ||||
|         try: | ||||
|             return _conc_name_getters[key]() | ||||
|         except RuntimeError: | ||||
|             # no local actor/task context initialized yet | ||||
|             return f'no {key} context' | ||||
| 
 | ||||
| 
 | ||||
| def get_logger( | ||||
| 
 | ||||
|     name: str | None = None, | ||||
|     name: str = None, | ||||
|     _root_name: str = _proj_name, | ||||
| 
 | ||||
| ) -> StackLevelAdapter: | ||||
|  | @ -207,7 +173,7 @@ def get_logger( | |||
| 
 | ||||
| 
 | ||||
| def get_console_log( | ||||
|     level: str | None = None, | ||||
|     level: str = None, | ||||
|     **kwargs, | ||||
| ) -> logging.LoggerAdapter: | ||||
|     '''Get the package logger and enable a handler which writes to stderr. | ||||
|  |  | |||
|  | @ -24,7 +24,7 @@ Built-in messaging patterns, types, APIs and helpers. | |||
| # ``pkgutil.resolve_name()`` internally uses | ||||
| # ``importlib.import_module()`` which can be filtered by inserting | ||||
| # a ``MetaPathFinder`` into ``sys.meta_path`` (which we could do before | ||||
| # entering the ``_runtime.process_messages()`` loop). | ||||
| # entering the ``Actor._process_messages()`` loop). | ||||
| # - https://github.com/python/cpython/blob/main/Lib/pkgutil.py#L645 | ||||
| # - https://stackoverflow.com/questions/1350466/preventing-python-code-from-importing-certain-modules | ||||
| #   - https://stackoverflow.com/a/63320902 | ||||
|  |  | |||
|  | @ -0,0 +1,17 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| from ._tractor_test import tractor_test | ||||
|  | @ -0,0 +1,104 @@ | |||
| # tractor: structured concurrent "actors". | ||||
| # Copyright 2018-eternity Tyler Goodlet. | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| import inspect | ||||
| import platform | ||||
| from functools import partial, wraps | ||||
| 
 | ||||
| import trio | ||||
| import tractor | ||||
| 
 | ||||
| 
 | ||||
| __all__ = ['tractor_test'] | ||||
| 
 | ||||
| 
 | ||||
| def tractor_test(fn): | ||||
|     """ | ||||
|     Use: | ||||
| 
 | ||||
|     @tractor_test | ||||
|     async def test_whatever(): | ||||
|         await ... | ||||
| 
 | ||||
|     If fixtures: | ||||
| 
 | ||||
|         - ``arb_addr`` (a socket addr tuple where arbiter is listening) | ||||
|         - ``loglevel`` (logging level passed to tractor internals) | ||||
|         - ``start_method`` (subprocess spawning backend) | ||||
| 
 | ||||
|     are defined in the `pytest` fixture space they will be automatically | ||||
|     injected to tests declaring these funcargs. | ||||
|     """ | ||||
|     @wraps(fn) | ||||
|     def wrapper( | ||||
|         *args, | ||||
|         loglevel=None, | ||||
|         arb_addr=None, | ||||
|         start_method=None, | ||||
|         **kwargs | ||||
|     ): | ||||
|         # __tracebackhide__ = True | ||||
| 
 | ||||
|         if 'arb_addr' in inspect.signature(fn).parameters: | ||||
|             # injects test suite fixture value to test as well | ||||
|             # as `run()` | ||||
|             kwargs['arb_addr'] = arb_addr | ||||
| 
 | ||||
|         if 'loglevel' in inspect.signature(fn).parameters: | ||||
|             # allows test suites to define a 'loglevel' fixture | ||||
|             # that activates the internal logging | ||||
|             kwargs['loglevel'] = loglevel | ||||
| 
 | ||||
|         if start_method is None: | ||||
|             if platform.system() == "Windows": | ||||
|                 start_method = 'spawn' | ||||
|             else: | ||||
|                 start_method = 'trio' | ||||
| 
 | ||||
|         if 'start_method' in inspect.signature(fn).parameters: | ||||
|             # set of subprocess spawning backends | ||||
|             kwargs['start_method'] = start_method | ||||
| 
 | ||||
|         if kwargs: | ||||
| 
 | ||||
|             # use explicit root actor start | ||||
| 
 | ||||
|             async def _main(): | ||||
|                 async with tractor.open_root_actor( | ||||
|                     # **kwargs, | ||||
|                     arbiter_addr=arb_addr, | ||||
|                     loglevel=loglevel, | ||||
|                     start_method=start_method, | ||||
| 
 | ||||
|                     # TODO: only enable when pytest is passed --pdb | ||||
|                     # debug_mode=True, | ||||
| 
 | ||||
|                 ) as actor: | ||||
|                     await fn(*args, **kwargs) | ||||
| 
 | ||||
|             main = _main | ||||
| 
 | ||||
|         else: | ||||
|             # use implicit root actor start | ||||
|             main = partial(fn, *args, **kwargs) | ||||
| 
 | ||||
|         return trio.run(main) | ||||
|             # arbiter_addr=arb_addr, | ||||
|             # loglevel=loglevel, | ||||
|             # start_method=start_method, | ||||
|         # ) | ||||
| 
 | ||||
|     return wrapper | ||||
|  | @ -466,11 +466,11 @@ async def open_channel_from( | |||
|         ): | ||||
|             # sync to a "started()"-like first delivered value from the | ||||
|             # ``asyncio`` task. | ||||
|             first = await chan.receive() | ||||
| 
 | ||||
|             # deliver stream handle upward | ||||
|             try: | ||||
|                 with chan._trio_cs: | ||||
|                     first = await chan.receive() | ||||
| 
 | ||||
|                     # deliver stream handle upward | ||||
|                     yield first, chan | ||||
|             finally: | ||||
|                 chan._trio_exited = True | ||||
|  | @ -491,18 +491,16 @@ def run_as_asyncio_guest( | |||
|     SC semantics. | ||||
| 
 | ||||
|     ''' | ||||
|     # Uh, oh. | ||||
|     # | ||||
|     # :o | ||||
|     # Uh, oh. :o | ||||
| 
 | ||||
|     # It looks like your event loop has caught a case of the ``trio``s. | ||||
| 
 | ||||
|     # :() | ||||
| 
 | ||||
|     # Don't worry, we've heard you'll barely notice. You might | ||||
|     # hallucinate a few more propagating errors and feel like your | ||||
|     # digestion has slowed but if anything get's too bad your parents | ||||
|     # will know about it. | ||||
|     # Don't worry, we've heard you'll barely notice. You might hallucinate | ||||
|     # a few more propagating errors and feel like your digestion has | ||||
|     # slowed but if anything get's too bad your parents will know about | ||||
|     # it. | ||||
| 
 | ||||
|     # :) | ||||
| 
 | ||||
|  |  | |||
|  | @ -21,7 +21,6 @@ Sugary patterns for trio + tractor designs. | |||
| from ._mngrs import ( | ||||
|     gather_contexts, | ||||
|     maybe_open_context, | ||||
|     maybe_open_nursery, | ||||
| ) | ||||
| from ._broadcast import ( | ||||
|     broadcast_receiver, | ||||
|  | @ -36,5 +35,4 @@ __all__ = [ | |||
|     'BroadcastReceiver', | ||||
|     'Lagged', | ||||
|     'maybe_open_context', | ||||
|     'maybe_open_nursery', | ||||
| ] | ||||
|  |  | |||
|  | @ -23,6 +23,7 @@ from __future__ import annotations | |||
| from abc import abstractmethod | ||||
| from collections import deque | ||||
| from contextlib import asynccontextmanager | ||||
| from dataclasses import dataclass | ||||
| from functools import partial | ||||
| from operator import ne | ||||
| from typing import Optional, Callable, Awaitable, Any, AsyncIterator, Protocol | ||||
|  | @ -32,10 +33,7 @@ import trio | |||
| from trio._core._run import Task | ||||
| from trio.abc import ReceiveChannel | ||||
| from trio.lowlevel import current_task | ||||
| from msgspec import Struct | ||||
| from tractor.log import get_logger | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| # A regular invariant generic type | ||||
| T = TypeVar("T") | ||||
|  | @ -88,7 +86,8 @@ class Lagged(trio.TooSlowError): | |||
|     ''' | ||||
| 
 | ||||
| 
 | ||||
| class BroadcastState(Struct): | ||||
| @dataclass | ||||
| class BroadcastState: | ||||
|     ''' | ||||
|     Common state to all receivers of a broadcast. | ||||
| 
 | ||||
|  | @ -111,35 +110,7 @@ class BroadcastState(Struct): | |||
|     eoc: bool = False | ||||
| 
 | ||||
|     # If the broadcaster was cancelled, we might as well track it | ||||
|     cancelled: dict[int, Task] = {} | ||||
| 
 | ||||
|     def statistics(self) -> dict[str, Any]: | ||||
|         ''' | ||||
|         Return broadcast receiver group "statistics" like many of | ||||
|         ``trio``'s internal task-sync primitives. | ||||
| 
 | ||||
|         ''' | ||||
|         key: int | None | ||||
|         ev: trio.Event | None | ||||
| 
 | ||||
|         subs = self.subs | ||||
|         if self.recv_ready is not None: | ||||
|             key, ev = self.recv_ready | ||||
|         else: | ||||
|             key = ev = None | ||||
| 
 | ||||
|         qlens: dict[int, int] = {} | ||||
|         for tid, sz in subs.items(): | ||||
|             qlens[tid] = sz if sz != -1 else 0 | ||||
| 
 | ||||
|         return { | ||||
|             'open_consumers': len(subs), | ||||
|             'queued_len_by_task': qlens, | ||||
|             'max_buffer_size': self.maxlen, | ||||
|             'tasks_waiting': ev.statistics().tasks_waiting if ev else 0, | ||||
|             'tasks_cancelled': self.cancelled, | ||||
|             'next_value_receiver_id': key, | ||||
|         } | ||||
|     cancelled: bool = False | ||||
| 
 | ||||
| 
 | ||||
| class BroadcastReceiver(ReceiveChannel): | ||||
|  | @ -157,40 +128,23 @@ class BroadcastReceiver(ReceiveChannel): | |||
|         rx_chan: AsyncReceiver, | ||||
|         state: BroadcastState, | ||||
|         receive_afunc: Optional[Callable[[], Awaitable[Any]]] = None, | ||||
|         raise_on_lag: bool = True, | ||||
| 
 | ||||
|     ) -> None: | ||||
| 
 | ||||
|         # register the original underlying (clone) | ||||
|         self.key = id(self) | ||||
|         self._state = state | ||||
| 
 | ||||
|         # each consumer has an int count which indicates | ||||
|         # which index contains the next value that the task has not yet | ||||
|         # consumed and thus should read. In the "up-to-date" case the | ||||
|         # consumer task must wait for a new value from the underlying | ||||
|         # receiver and we use ``-1`` as the sentinel for this state. | ||||
|         state.subs[self.key] = -1 | ||||
| 
 | ||||
|         # underlying for this receiver | ||||
|         self._rx = rx_chan | ||||
|         self._recv = receive_afunc or rx_chan.receive | ||||
|         self._closed: bool = False | ||||
|         self._raise_on_lag = raise_on_lag | ||||
| 
 | ||||
|     def receive_nowait( | ||||
|         self, | ||||
|         _key: int | None = None, | ||||
|         _state: BroadcastState | None = None, | ||||
|     async def receive(self) -> ReceiveType: | ||||
| 
 | ||||
|     ) -> Any: | ||||
|         ''' | ||||
|         Sync version of `.receive()` which does all the low level work | ||||
|         of receiving from the underlying/wrapped receive channel. | ||||
| 
 | ||||
|         ''' | ||||
|         key = _key or self.key | ||||
|         state = _state or self._state | ||||
|         key = self.key | ||||
|         state = self._state | ||||
| 
 | ||||
|         # TODO: ideally we can make some way to "lock out" the | ||||
|         # underlying receive channel in some way such that if some task | ||||
|  | @ -223,173 +177,128 @@ class BroadcastReceiver(ReceiveChannel): | |||
|                 # return this value." | ||||
|                 # https://docs.rs/tokio/1.11.0/tokio/sync/broadcast/index.html#lagging | ||||
| 
 | ||||
|                 mxln = state.maxlen | ||||
|                 lost = seq - mxln | ||||
| 
 | ||||
|                 # decrement to the last value and expect | ||||
|                 # consumer to either handle the ``Lagged`` and come back | ||||
|                 # or bail out on its own (thus un-subscribing) | ||||
|                 state.subs[key] = mxln - 1 | ||||
|                 state.subs[key] = state.maxlen - 1 | ||||
| 
 | ||||
|                 # this task was overrun by the producer side | ||||
|                 task: Task = current_task() | ||||
|                 msg = f'Task `{task.name}` overrun and dropped `{lost}` values' | ||||
| 
 | ||||
|                 if self._raise_on_lag: | ||||
|                     raise Lagged(msg) | ||||
|                 else: | ||||
|                     log.warning(msg) | ||||
|                     return self.receive_nowait(_key, _state) | ||||
|                 raise Lagged(f'Task {task.name} was overrun') | ||||
| 
 | ||||
|             state.subs[key] -= 1 | ||||
|             return value | ||||
| 
 | ||||
|         raise trio.WouldBlock | ||||
|         # current task already has the latest value **and** is the | ||||
|         # first task to begin waiting for a new one | ||||
|         if state.recv_ready is None: | ||||
| 
 | ||||
|     async def _receive_from_underlying( | ||||
|         self, | ||||
|         key: int, | ||||
|         state: BroadcastState, | ||||
|             if self._closed: | ||||
|                 raise trio.ClosedResourceError | ||||
| 
 | ||||
|     ) -> ReceiveType: | ||||
|             event = trio.Event() | ||||
|             state.recv_ready = key, event | ||||
| 
 | ||||
|         if self._closed: | ||||
|             raise trio.ClosedResourceError | ||||
| 
 | ||||
|         event = trio.Event() | ||||
|         assert state.recv_ready is None | ||||
|         state.recv_ready = key, event | ||||
| 
 | ||||
|         try: | ||||
|             # if we're cancelled here it should be | ||||
|             # fine to bail without affecting any other consumers | ||||
|             # right? | ||||
|             value = await self._recv() | ||||
|             try: | ||||
|                 value = await self._recv() | ||||
| 
 | ||||
|             # items with lower indices are "newer" | ||||
|             # NOTE: ``collections.deque`` implicitly takes care of | ||||
|             # trucating values outside our ``state.maxlen``. In the | ||||
|             # alt-backend-array-case we'll need to make sure this is | ||||
|             # implemented in similar ringer-buffer-ish style. | ||||
|             state.queue.appendleft(value) | ||||
|                 # items with lower indices are "newer" | ||||
|                 # NOTE: ``collections.deque`` implicitly takes care of | ||||
|                 # trucating values outside our ``state.maxlen``. In the | ||||
|                 # alt-backend-array-case we'll need to make sure this is | ||||
|                 # implemented in similar ringer-buffer-ish style. | ||||
|                 state.queue.appendleft(value) | ||||
| 
 | ||||
|             # broadcast new value to all subscribers by increasing | ||||
|             # all sequence numbers that will point in the queue to | ||||
|             # their latest available value. | ||||
|                 # broadcast new value to all subscribers by increasing | ||||
|                 # all sequence numbers that will point in the queue to | ||||
|                 # their latest available value. | ||||
| 
 | ||||
|             # don't decrement the sequence for this task since we | ||||
|             # already retreived the last value | ||||
|                 # don't decrement the sequence for this task since we | ||||
|                 # already retreived the last value | ||||
| 
 | ||||
|             # XXX: which of these impls is fastest? | ||||
|             # subs = state.subs.copy() | ||||
|             # subs.pop(key) | ||||
|                 # XXX: which of these impls is fastest? | ||||
| 
 | ||||
|             for sub_key in filter( | ||||
|                 # lambda k: k != key, state.subs, | ||||
|                 partial(ne, key), state.subs, | ||||
|                 # subs = state.subs.copy() | ||||
|                 # subs.pop(key) | ||||
| 
 | ||||
|                 for sub_key in filter( | ||||
|                     # lambda k: k != key, state.subs, | ||||
|                     partial(ne, key), state.subs, | ||||
|                 ): | ||||
|                     state.subs[sub_key] += 1 | ||||
| 
 | ||||
|                 # NOTE: this should ONLY be set if the above task was *NOT* | ||||
|                 # cancelled on the `._recv()` call. | ||||
|                 event.set() | ||||
|                 return value | ||||
| 
 | ||||
|             except trio.EndOfChannel: | ||||
|                 # if any one consumer gets an EOC from the underlying | ||||
|                 # receiver we need to unblock and send that signal to | ||||
|                 # all other consumers. | ||||
|                 self._state.eoc = True | ||||
|                 if event.statistics().tasks_waiting: | ||||
|                     event.set() | ||||
|                 raise | ||||
| 
 | ||||
|             except ( | ||||
|                 trio.Cancelled, | ||||
|             ): | ||||
|                 state.subs[sub_key] += 1 | ||||
|                 # handle cancelled specially otherwise sibling | ||||
|                 # consumers will be awoken with a sequence of -1 | ||||
|                 # and will potentially try to rewait the underlying | ||||
|                 # receiver instead of just cancelling immediately. | ||||
|                 self._state.cancelled = True | ||||
|                 if event.statistics().tasks_waiting: | ||||
|                     event.set() | ||||
|                 raise | ||||
| 
 | ||||
|             # NOTE: this should ONLY be set if the above task was *NOT* | ||||
|             # cancelled on the `._recv()` call. | ||||
|             event.set() | ||||
|             return value | ||||
|             finally: | ||||
| 
 | ||||
|         except trio.EndOfChannel: | ||||
|             # if any one consumer gets an EOC from the underlying | ||||
|             # receiver we need to unblock and send that signal to | ||||
|             # all other consumers. | ||||
|             self._state.eoc = True | ||||
|             if event.statistics().tasks_waiting: | ||||
|                 event.set() | ||||
|             raise | ||||
| 
 | ||||
|         except ( | ||||
|             trio.Cancelled, | ||||
|         ): | ||||
|             # handle cancelled specially otherwise sibling | ||||
|             # consumers will be awoken with a sequence of -1 | ||||
|             # and will potentially try to rewait the underlying | ||||
|             # receiver instead of just cancelling immediately. | ||||
|             self._state.cancelled[key] = current_task() | ||||
|             if event.statistics().tasks_waiting: | ||||
|                 event.set() | ||||
|             raise | ||||
| 
 | ||||
|         finally: | ||||
|             # Reset receiver waiter task event for next blocking condition. | ||||
|             # this MUST be reset even if the above ``.recv()`` call | ||||
|             # was cancelled to avoid the next consumer from blocking on | ||||
|             # an event that won't be set! | ||||
|             state.recv_ready = None | ||||
| 
 | ||||
|     async def receive(self) -> ReceiveType: | ||||
|         key = self.key | ||||
|         state = self._state | ||||
| 
 | ||||
|         try: | ||||
|             return self.receive_nowait( | ||||
|                 _key=key, | ||||
|                 _state=state, | ||||
|             ) | ||||
|         except trio.WouldBlock: | ||||
|             pass | ||||
| 
 | ||||
|         # current task already has the latest value **and** is the | ||||
|         # first task to begin waiting for a new one so we begin blocking | ||||
|         # until rescheduled with the a new value from the underlying. | ||||
|         if state.recv_ready is None: | ||||
|             return await self._receive_from_underlying(key, state) | ||||
|                 # Reset receiver waiter task event for next blocking condition. | ||||
|                 # this MUST be reset even if the above ``.recv()`` call | ||||
|                 # was cancelled to avoid the next consumer from blocking on | ||||
|                 # an event that won't be set! | ||||
|                 state.recv_ready = None | ||||
| 
 | ||||
|         # This task is all caught up and ready to receive the latest | ||||
|         # value, so queue/schedule it to be woken on the next internal | ||||
|         # event. | ||||
|         # value, so queue sched it on the internal event. | ||||
|         else: | ||||
|             while state.recv_ready is not None: | ||||
|                 # seq = state.subs[key] | ||||
|                 # assert seq == -1  # sanity | ||||
|                 _, ev = state.recv_ready | ||||
|                 await ev.wait() | ||||
|                 try: | ||||
|                     return self.receive_nowait( | ||||
|                         _key=key, | ||||
|                         _state=state, | ||||
|                     ) | ||||
|                 except trio.WouldBlock: | ||||
|                     if self._closed: | ||||
|                         raise trio.ClosedResourceError | ||||
|             seq = state.subs[key] | ||||
|             assert seq == -1  # sanity | ||||
|             _, ev = state.recv_ready | ||||
|             await ev.wait() | ||||
| 
 | ||||
|                     subs = state.subs | ||||
|                     if ( | ||||
|                         len(subs) == 1 | ||||
|                         and key in subs | ||||
|                         # or cancelled | ||||
|                     ): | ||||
|                         # XXX: we are the last and only user of this BR so | ||||
|                         # likely it makes sense to unwind back to the | ||||
|                         # underlying? | ||||
|                         # import tractor | ||||
|                         # await tractor.breakpoint() | ||||
|                         log.warning( | ||||
|                             f'Only one sub left for {self}?\n' | ||||
|                             'We can probably unwind from breceiver?' | ||||
|                         ) | ||||
|             # NOTE: if we ever would like the behaviour where if the | ||||
|             # first task to recv on the underlying is cancelled but it | ||||
|             # still DOES trigger the ``.recv_ready``, event we'll likely need | ||||
|             # this logic: | ||||
| 
 | ||||
|                     # XXX: In the case where the first task to allocate the | ||||
|                     # ``.recv_ready`` event is cancelled we will be woken | ||||
|                     # with a non-incremented sequence number (the ``-1`` | ||||
|                     # sentinel) and thus will read the oldest value if we | ||||
|                     # use that. Instead we need to detect if we have not | ||||
|                     # been incremented and then receive again. | ||||
|                     # return await self.receive() | ||||
|             if seq > -1: | ||||
|                 # stuff from above.. | ||||
|                 seq = state.subs[key] | ||||
| 
 | ||||
|             return await self._receive_from_underlying(key, state) | ||||
|                 value = state.queue[seq] | ||||
|                 state.subs[key] -= 1 | ||||
|                 return value | ||||
| 
 | ||||
|             elif seq == -1: | ||||
|                 # XXX: In the case where the first task to allocate the | ||||
|                 # ``.recv_ready`` event is cancelled we will be woken with | ||||
|                 # a non-incremented sequence number and thus will read the | ||||
|                 # oldest value if we use that. Instead we need to detect if | ||||
|                 # we have not been incremented and then receive again. | ||||
|                 return await self.receive() | ||||
| 
 | ||||
|             else: | ||||
|                 raise ValueError(f'Invalid sequence {seq}!?') | ||||
| 
 | ||||
|     @asynccontextmanager | ||||
|     async def subscribe( | ||||
|         self, | ||||
|         raise_on_lag: bool = True, | ||||
| 
 | ||||
|     ) -> AsyncIterator[BroadcastReceiver]: | ||||
|         ''' | ||||
|         Subscribe for values from this broadcast receiver. | ||||
|  | @ -407,7 +316,6 @@ class BroadcastReceiver(ReceiveChannel): | |||
|             rx_chan=self._rx, | ||||
|             state=state, | ||||
|             receive_afunc=self._recv, | ||||
|             raise_on_lag=raise_on_lag, | ||||
|         ) | ||||
|         # assert clone in state.subs | ||||
|         assert br.key in state.subs | ||||
|  | @ -444,8 +352,7 @@ def broadcast_receiver( | |||
| 
 | ||||
|     recv_chan: AsyncReceiver, | ||||
|     max_buffer_size: int, | ||||
|     receive_afunc: Optional[Callable[[], Awaitable[Any]]] = None, | ||||
|     raise_on_lag: bool = True, | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> BroadcastReceiver: | ||||
| 
 | ||||
|  | @ -456,6 +363,5 @@ def broadcast_receiver( | |||
|             maxlen=max_buffer_size, | ||||
|             subs={}, | ||||
|         ), | ||||
|         receive_afunc=receive_afunc, | ||||
|         raise_on_lag=raise_on_lag, | ||||
|         **kwargs, | ||||
|     ) | ||||
|  |  | |||
|  | @ -19,7 +19,6 @@ Async context manager primitives with hard ``trio``-aware semantics | |||
| 
 | ||||
| ''' | ||||
| from contextlib import asynccontextmanager as acm | ||||
| import inspect | ||||
| from typing import ( | ||||
|     Any, | ||||
|     AsyncContextManager, | ||||
|  | @ -35,8 +34,8 @@ from typing import ( | |||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| 
 | ||||
| from .._state import current_actor | ||||
| from ..log import get_logger | ||||
| from .._state import current_actor | ||||
| 
 | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
|  | @ -45,25 +44,6 @@ log = get_logger(__name__) | |||
| T = TypeVar("T") | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def maybe_open_nursery( | ||||
|     nursery: trio.Nursery | None = None, | ||||
|     shield: bool = False, | ||||
| ) -> AsyncGenerator[trio.Nursery, Any]: | ||||
|     ''' | ||||
|     Create a new nursery if None provided. | ||||
| 
 | ||||
|     Blocks on exit as expected if no input nursery is provided. | ||||
| 
 | ||||
|     ''' | ||||
|     if nursery is not None: | ||||
|         yield nursery | ||||
|     else: | ||||
|         async with trio.open_nursery() as nursery: | ||||
|             nursery.cancel_scope.shield = shield | ||||
|             yield nursery | ||||
| 
 | ||||
| 
 | ||||
| async def _enter_and_wait( | ||||
| 
 | ||||
|     mngr: AsyncContextManager[T], | ||||
|  | @ -101,7 +81,7 @@ async def gather_contexts( | |||
|     This function is somewhat similar to common usage of | ||||
|     ``contextlib.AsyncExitStack.enter_async_context()`` (in a loop) in | ||||
|     combo with ``asyncio.gather()`` except the managers are concurrently | ||||
|     entered and exited, and cancellation just works. | ||||
|     entered and exited cancellation just works. | ||||
| 
 | ||||
|     ''' | ||||
|     unwrapped: dict[int, Optional[T]] = {}.fromkeys(id(mngr) for mngr in mngrs) | ||||
|  | @ -109,17 +89,6 @@ async def gather_contexts( | |||
|     all_entered = trio.Event() | ||||
|     parent_exit = trio.Event() | ||||
| 
 | ||||
|     # XXX: ensure greedy sequence of manager instances | ||||
|     # since a lazy inline generator doesn't seem to work | ||||
|     # with `async with` syntax. | ||||
|     mngrs = list(mngrs) | ||||
| 
 | ||||
|     if not mngrs: | ||||
|         raise ValueError( | ||||
|             'input mngrs is empty?\n' | ||||
|             'Did try to use inline generator syntax?' | ||||
|         ) | ||||
| 
 | ||||
|     async with trio.open_nursery() as n: | ||||
|         for mngr in mngrs: | ||||
|             n.start_soon( | ||||
|  | @ -133,13 +102,11 @@ async def gather_contexts( | |||
|         # deliver control once all managers have started up | ||||
|         await all_entered.wait() | ||||
| 
 | ||||
|         try: | ||||
|             yield tuple(unwrapped.values()) | ||||
|         finally: | ||||
|             # NOTE: this is ABSOLUTELY REQUIRED to avoid | ||||
|             # the following wacky bug: | ||||
|             # <tractorbugurlhere> | ||||
|             parent_exit.set() | ||||
|         yield tuple(unwrapped.values()) | ||||
| 
 | ||||
|         # we don't need a try/finally since cancellation will be triggered | ||||
|         # by the surrounding nursery on error. | ||||
|         parent_exit.set() | ||||
| 
 | ||||
| 
 | ||||
| # Per actor task caching helpers. | ||||
|  | @ -152,15 +119,13 @@ class _Cache: | |||
|     a kept-alive-while-in-use async resource. | ||||
| 
 | ||||
|     ''' | ||||
|     service_n: Optional[trio.Nursery] = None | ||||
|     locks: dict[Hashable, trio.Lock] = {} | ||||
|     lock = trio.Lock() | ||||
|     users: int = 0 | ||||
|     values: dict[Any,  Any] = {} | ||||
|     resources: dict[ | ||||
|         Hashable, | ||||
|         tuple[trio.Nursery, trio.Event] | ||||
|     ] = {} | ||||
|     # nurseries: dict[int, trio.Nursery] = {} | ||||
|     no_more_users: Optional[trio.Event] = None | ||||
| 
 | ||||
|     @classmethod | ||||
|  | @ -191,7 +156,7 @@ async def maybe_open_context( | |||
|     # XXX: used as cache key after conversion to tuple | ||||
|     # and all embedded values must also be hashable | ||||
|     kwargs: dict = {}, | ||||
|     key: Hashable | Callable[..., Hashable] = None, | ||||
|     key: Hashable = None, | ||||
| 
 | ||||
| ) -> AsyncIterator[tuple[bool, T]]: | ||||
|     ''' | ||||
|  | @ -200,69 +165,51 @@ async def maybe_open_context( | |||
|     _Cached instance on a _Cache hit. | ||||
| 
 | ||||
|     ''' | ||||
|     fid = id(acm_func) | ||||
|     # lock resource acquisition around task racing  / ``trio``'s | ||||
|     # scheduler protocol | ||||
|     await _Cache.lock.acquire() | ||||
| 
 | ||||
|     if inspect.isfunction(key): | ||||
|         ctx_key = (fid, key(**kwargs)) | ||||
|     else: | ||||
|         ctx_key = (fid, key or tuple(kwargs.items())) | ||||
| 
 | ||||
|     # yielded output | ||||
|     yielded: Any = None | ||||
| 
 | ||||
|     # Lock resource acquisition around task racing  / ``trio``'s | ||||
|     # scheduler protocol. | ||||
|     # NOTE: the lock is target context manager func specific in order | ||||
|     # to allow re-entrant use cases where one `maybe_open_context()` | ||||
|     # wrapped factor may want to call into another. | ||||
|     lock = _Cache.locks.setdefault(fid, trio.Lock()) | ||||
|     await lock.acquire() | ||||
| 
 | ||||
|     # XXX: one singleton nursery per actor and we want to | ||||
|     # have it not be closed until all consumers have exited (which is | ||||
|     # currently difficult to implement any other way besides using our | ||||
|     # pre-allocated runtime instance..) | ||||
|     service_n: trio.Nursery = current_actor()._service_n | ||||
| 
 | ||||
|     # TODO: is there any way to allocate | ||||
|     # a 'stays-open-till-last-task-finshed nursery? | ||||
|     # service_n: trio.Nursery | ||||
|     # async with maybe_open_nursery(_Cache.service_n) as service_n: | ||||
|     #     _Cache.service_n = service_n | ||||
|     ctx_key = (id(acm_func), key or tuple(kwargs.items())) | ||||
|     value = None | ||||
| 
 | ||||
|     try: | ||||
|         # **critical section** that should prevent other tasks from | ||||
|         # checking the _Cache until complete otherwise the scheduler | ||||
|         # may switch and by accident we create more then one resource. | ||||
|         yielded = _Cache.values[ctx_key] | ||||
|         value = _Cache.values[ctx_key] | ||||
| 
 | ||||
|     except KeyError: | ||||
|         log.info(f'Allocating new {acm_func} for {ctx_key}') | ||||
| 
 | ||||
|         mngr = acm_func(**kwargs) | ||||
|         # TODO: avoid pulling from ``tractor`` internals and | ||||
|         # instead offer a "root nursery" in piker actors? | ||||
|         service_n = current_actor()._service_n | ||||
| 
 | ||||
|         # TODO: does this need to be a tractor "root nursery"? | ||||
|         resources = _Cache.resources | ||||
|         assert not resources.get(ctx_key), f'Resource exists? {ctx_key}' | ||||
|         resources[ctx_key] = (service_n, trio.Event()) | ||||
|         ln, _ = resources[ctx_key] = (service_n, trio.Event()) | ||||
| 
 | ||||
|         # sync up to the mngr's yielded value | ||||
|         yielded = await service_n.start( | ||||
|         value = await ln.start( | ||||
|             _Cache.run_ctx, | ||||
|             mngr, | ||||
|             ctx_key, | ||||
|         ) | ||||
|         _Cache.users += 1 | ||||
|         lock.release() | ||||
|         yield False, yielded | ||||
|         _Cache.lock.release() | ||||
|         yield False, value | ||||
| 
 | ||||
|     else: | ||||
|         log.info(f'Reusing _Cached resource for {ctx_key}') | ||||
|         _Cache.users += 1 | ||||
|         lock.release() | ||||
|         yield True, yielded | ||||
|         _Cache.lock.release() | ||||
|         yield True, value | ||||
| 
 | ||||
|     finally: | ||||
|         _Cache.users -= 1 | ||||
| 
 | ||||
|         if yielded is not None: | ||||
|         if value is not None: | ||||
|             # if no more consumers, teardown the client | ||||
|             if _Cache.users <= 0: | ||||
|                 log.info(f'De-allocating resource for {ctx_key}') | ||||
|  | @ -274,5 +221,3 @@ async def maybe_open_context( | |||
|                 if entry: | ||||
|                     _, no_more_users = entry | ||||
|                     no_more_users.set() | ||||
| 
 | ||||
|                 _Cache.locks.pop(fid) | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue