Compare commits
	
		
			72 Commits 
		
	
	
		
			master
			...
			transport_
		
	
	| Author | SHA1 | Date | 
|---|---|---|
|  | acd955714f | |
|  | 7f84d9f048 | |
|  | fc831dbfd6 | |
|  | 084becc62f | |
|  | f37d46585c | |
|  | 669559380d | |
|  | a5d27ebcf5 | |
|  | 1084ca99bf | |
|  | e4d6810623 | |
|  | 2556a568e7 | |
|  | 6d72a4ef45 | |
|  | f57cc66a7f | |
|  | d3e508b7f7 | |
|  | 8663805042 | |
|  | 9ddb636452 | |
|  | 210dfdf70f | |
|  | 31733b80f0 | |
|  | 5983c08218 | |
|  | 93dedefd1f | |
|  | 5936e8e2b9 | |
|  | 378c8cee52 | |
|  | 2f804a977c | |
|  | 6aa390f714 | |
|  | 2bcab8b228 | |
|  | dc6fe3137c | |
|  | b02731ca75 | |
|  | e51ba27d01 | |
|  | 9f0fedea14 | |
|  | e74e93f857 | |
|  | c92fc33b7c | |
|  | 23dabb9502 | |
|  | 9469a4b1d8 | |
|  | e2a556a0f7 | |
|  | 11d471a4cd | |
|  | b63ccf0007 | |
|  | d896d84b28 | |
|  | fc8d02f963 | |
|  | 3b48f89056 | |
|  | a146034cb7 | |
|  | 0e6f017929 | |
|  | 0d8a5506da | |
|  | fed927d00f | |
|  | 1c7c9da99c | |
|  | b6dd58b1cf | |
|  | 9b70f5d312 | |
|  | b92ed701bd | |
|  | 4c712a8e47 | |
|  | 6bec0ddf0c | |
|  | 7192654d51 | |
|  | 9d5f193dfd | |
|  | 8b13dc4967 | |
|  | 6f62277c82 | |
|  | 207a88e3a8 | |
|  | 2905127dc7 | |
|  | 7d9a551f77 | |
|  | 66d18be2ec | |
|  | bc689427ef | |
|  | 490cc8ac53 | |
|  | cdafdde092 | |
|  | 2870828c34 | |
|  | 76f07898d9 | |
|  | 1e49066b16 | |
|  | b1de90b175 | |
|  | 2bd6bbc1b7 | |
|  | 700f09ce9b | |
|  | d8dcee3713 | |
|  | 6463aa1559 | |
|  | 39453e43e0 | |
|  | d89e632a16 | |
|  | b38b4fe188 | |
|  | e6aecf2ae5 | |
|  | b44652c5d5 | 
|  | @ -0,0 +1,53 @@ | ||||||
|  | ''' | ||||||
|  | fast fail test with a context. | ||||||
|  | ensure the partially initialized sub-actor process | ||||||
|  | doesn't cause a hang on error/cancel of the parent | ||||||
|  | nrusery. | ||||||
|  | 
 | ||||||
|  | ''' | ||||||
|  | import trio | ||||||
|  | import tractor | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def sleep( | ||||||
|  |     ctx: tractor.Context, | ||||||
|  | ): | ||||||
|  |     await trio.sleep(0.5) | ||||||
|  |     await ctx.started() | ||||||
|  |     await trio.sleep_forever() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | async def open_ctx( | ||||||
|  |     n: tractor._trionics.ActorNursery | ||||||
|  | ): | ||||||
|  | 
 | ||||||
|  |     # spawn both actors | ||||||
|  |     portal = await n.start_actor( | ||||||
|  |         name='sleeper', | ||||||
|  |         enable_modules=[__name__], | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     async with portal.open_context( | ||||||
|  |         sleep, | ||||||
|  |     ) as (ctx, first): | ||||||
|  |         assert first is None | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | async def main(): | ||||||
|  | 
 | ||||||
|  |     async with tractor.open_nursery( | ||||||
|  |         debug_mode=True, | ||||||
|  |         loglevel='runtime', | ||||||
|  |     ) as an: | ||||||
|  | 
 | ||||||
|  |         async with trio.open_nursery() as n: | ||||||
|  |             n.start_soon(open_ctx, an) | ||||||
|  | 
 | ||||||
|  |             await trio.sleep(0.2) | ||||||
|  |             await trio.sleep(0.1) | ||||||
|  |             assert 0 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     trio.run(main) | ||||||
|  | @ -0,0 +1,31 @@ | ||||||
|  | 
 | ||||||
|  | import trio | ||||||
|  | import tractor | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | async def key_error(): | ||||||
|  |     "Raise a ``NameError``" | ||||||
|  |     return {}['doggy'] | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | async def main(): | ||||||
|  |     """Root dies  | ||||||
|  | 
 | ||||||
|  |     """ | ||||||
|  |     async with tractor.open_nursery( | ||||||
|  |         debug_mode=True, | ||||||
|  |         loglevel='debug' | ||||||
|  |     ) as n: | ||||||
|  | 
 | ||||||
|  |         # spawn both actors | ||||||
|  |         portal = await n.run_in_actor(key_error) | ||||||
|  | 
 | ||||||
|  |         # XXX: originally a bug causes by this | ||||||
|  |         # where root would enter debugger even | ||||||
|  |         # though child should have it locked. | ||||||
|  |         with trio.fail_after(1): | ||||||
|  |             await trio.Event().wait() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     trio.run(main) | ||||||
							
								
								
									
										18
									
								
								setup.py
								
								
								
								
							
							
						
						
									
										18
									
								
								setup.py
								
								
								
								
							|  | @ -38,14 +38,28 @@ setup( | ||||||
|         'tractor.testing', |         'tractor.testing', | ||||||
|     ], |     ], | ||||||
|     install_requires=[ |     install_requires=[ | ||||||
|  | 
 | ||||||
|  |         # trio related | ||||||
|         'trio>0.8', |         'trio>0.8', | ||||||
|         'msgpack', |  | ||||||
|         'async_generator', |         'async_generator', | ||||||
|  |         'tricycle', | ||||||
|  |         'trio_typing', | ||||||
|  | 
 | ||||||
|  |         # tooling | ||||||
|         'colorlog', |         'colorlog', | ||||||
|         'wrapt', |         'wrapt', | ||||||
|         'trio_typing', |  | ||||||
|         'pdbpp', |         'pdbpp', | ||||||
|  | 
 | ||||||
|  |         # serialization | ||||||
|  |         'msgpack', | ||||||
|  | 
 | ||||||
|     ], |     ], | ||||||
|  |     extras_require={ | ||||||
|  | 
 | ||||||
|  |         # serialization | ||||||
|  |         'msgspec': ["msgspec; python_version >= '3.9'"], | ||||||
|  | 
 | ||||||
|  |     }, | ||||||
|     tests_require=['pytest'], |     tests_require=['pytest'], | ||||||
|     python_requires=">=3.7", |     python_requires=">=3.7", | ||||||
|     keywords=[ |     keywords=[ | ||||||
|  |  | ||||||
|  | @ -0,0 +1,498 @@ | ||||||
|  | """ | ||||||
|  | Bidirectional streaming and context API. | ||||||
|  | 
 | ||||||
|  | """ | ||||||
|  | import pytest | ||||||
|  | import trio | ||||||
|  | import tractor | ||||||
|  | 
 | ||||||
|  | from conftest import tractor_test | ||||||
|  | 
 | ||||||
|  | # the general stream semantics are | ||||||
|  | # - normal termination: far end relays a stop message which | ||||||
|  | # terminates an ongoing ``MsgStream`` iteration | ||||||
|  | # - cancel termination: context is cancelled on either side cancelling | ||||||
|  | #  the "linked" inter-actor task context | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | _state: bool = False | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def simple_setup_teardown( | ||||||
|  | 
 | ||||||
|  |     ctx: tractor.Context, | ||||||
|  |     data: int, | ||||||
|  |     block_forever: bool = False, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  | 
 | ||||||
|  |     # startup phase | ||||||
|  |     global _state | ||||||
|  |     _state = True | ||||||
|  | 
 | ||||||
|  |     # signal to parent that we're up | ||||||
|  |     await ctx.started(data + 1) | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         if block_forever: | ||||||
|  |             # block until cancelled | ||||||
|  |             await trio.sleep_forever() | ||||||
|  |         else: | ||||||
|  |             return 'yo' | ||||||
|  |     finally: | ||||||
|  |         _state = False | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | async def assert_state(value: bool): | ||||||
|  |     global _state | ||||||
|  |     assert _state == value | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @pytest.mark.parametrize( | ||||||
|  |     'error_parent', | ||||||
|  |     [False, True], | ||||||
|  | ) | ||||||
|  | @pytest.mark.parametrize( | ||||||
|  |     'callee_blocks_forever', | ||||||
|  |     [False, True], | ||||||
|  | ) | ||||||
|  | def test_simple_context( | ||||||
|  |     error_parent, | ||||||
|  |     callee_blocks_forever, | ||||||
|  | ): | ||||||
|  | 
 | ||||||
|  |     async def main(): | ||||||
|  | 
 | ||||||
|  |         async with tractor.open_nursery() as n: | ||||||
|  | 
 | ||||||
|  |             portal = await n.start_actor( | ||||||
|  |                 'simple_context', | ||||||
|  |                 enable_modules=[__name__], | ||||||
|  |             ) | ||||||
|  | 
 | ||||||
|  |             async with portal.open_context( | ||||||
|  |                 simple_setup_teardown, | ||||||
|  |                 data=10, | ||||||
|  |                 block_forever=callee_blocks_forever, | ||||||
|  |             ) as (ctx, sent): | ||||||
|  | 
 | ||||||
|  |                 assert sent == 11 | ||||||
|  | 
 | ||||||
|  |                 if callee_blocks_forever: | ||||||
|  |                     await portal.run(assert_state, value=True) | ||||||
|  |                     await ctx.cancel() | ||||||
|  |                 else: | ||||||
|  |                     assert await ctx.result() == 'yo' | ||||||
|  | 
 | ||||||
|  |             # after cancellation | ||||||
|  |             await portal.run(assert_state, value=False) | ||||||
|  | 
 | ||||||
|  |             if error_parent: | ||||||
|  |                 raise ValueError | ||||||
|  | 
 | ||||||
|  |             # shut down daemon | ||||||
|  |             await portal.cancel_actor() | ||||||
|  | 
 | ||||||
|  |     if error_parent: | ||||||
|  |         try: | ||||||
|  |             trio.run(main) | ||||||
|  |         except ValueError: | ||||||
|  |             pass | ||||||
|  |     else: | ||||||
|  |         trio.run(main) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # basic stream terminations: | ||||||
|  | # - callee context closes without using stream | ||||||
|  | # - caller context closes without using stream | ||||||
|  | # - caller context calls `Context.cancel()` while streaming | ||||||
|  | #   is ongoing resulting in callee being cancelled | ||||||
|  | # - callee calls `Context.cancel()` while streaming and caller | ||||||
|  | #   sees stream terminated in `RemoteActorError` | ||||||
|  | 
 | ||||||
|  | # TODO: future possible features | ||||||
|  | # - restart request: far end raises `ContextRestart` | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def close_ctx_immediately( | ||||||
|  | 
 | ||||||
|  |     ctx: tractor.Context, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  | 
 | ||||||
|  |     await ctx.started() | ||||||
|  |     global _state | ||||||
|  | 
 | ||||||
|  |     async with ctx.open_stream(): | ||||||
|  |         pass | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor_test | ||||||
|  | async def test_callee_closes_ctx_after_stream_open(): | ||||||
|  |     'callee context closes without using stream' | ||||||
|  | 
 | ||||||
|  |     async with tractor.open_nursery() as n: | ||||||
|  | 
 | ||||||
|  |         portal = await n.start_actor( | ||||||
|  |             'fast_stream_closer', | ||||||
|  |             enable_modules=[__name__], | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |         async with portal.open_context( | ||||||
|  |             close_ctx_immediately, | ||||||
|  | 
 | ||||||
|  |             # flag to avoid waiting the final result | ||||||
|  |             # cancel_on_exit=True, | ||||||
|  | 
 | ||||||
|  |         ) as (ctx, sent): | ||||||
|  | 
 | ||||||
|  |             assert sent is None | ||||||
|  | 
 | ||||||
|  |             with trio.fail_after(0.5): | ||||||
|  |                 async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |                     # should fall through since ``StopAsyncIteration`` | ||||||
|  |                     # should be raised through translation of | ||||||
|  |                     # a ``trio.EndOfChannel`` by | ||||||
|  |                     # ``trio.abc.ReceiveChannel.__anext__()`` | ||||||
|  |                     async for _ in stream: | ||||||
|  |                         assert 0 | ||||||
|  |                     else: | ||||||
|  | 
 | ||||||
|  |                         # verify stream is now closed | ||||||
|  |                         try: | ||||||
|  |                             await stream.receive() | ||||||
|  |                         except trio.EndOfChannel: | ||||||
|  |                             pass | ||||||
|  | 
 | ||||||
|  |             # TODO: should be just raise the closed resource err | ||||||
|  |             # directly here to enforce not allowing a re-open | ||||||
|  |             # of a stream to the context (at least until a time of | ||||||
|  |             # if/when we decide that's a good idea?) | ||||||
|  |             try: | ||||||
|  |                 async with ctx.open_stream() as stream: | ||||||
|  |                     pass | ||||||
|  |             except trio.ClosedResourceError: | ||||||
|  |                 pass | ||||||
|  | 
 | ||||||
|  |         await portal.cancel_actor() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def expect_cancelled( | ||||||
|  | 
 | ||||||
|  |     ctx: tractor.Context, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  |     global _state | ||||||
|  |     _state = True | ||||||
|  | 
 | ||||||
|  |     await ctx.started() | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         async with ctx.open_stream() as stream: | ||||||
|  |             async for msg in stream: | ||||||
|  |                 await stream.send(msg)  # echo server | ||||||
|  | 
 | ||||||
|  |     except trio.Cancelled: | ||||||
|  |         # expected case | ||||||
|  |         _state = False | ||||||
|  |         raise | ||||||
|  | 
 | ||||||
|  |     else: | ||||||
|  |         assert 0, "Wasn't cancelled!?" | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @pytest.mark.parametrize( | ||||||
|  |     'use_ctx_cancel_method', | ||||||
|  |     [False, True], | ||||||
|  | ) | ||||||
|  | @tractor_test | ||||||
|  | async def test_caller_closes_ctx_after_callee_opens_stream( | ||||||
|  |     use_ctx_cancel_method: bool, | ||||||
|  | ): | ||||||
|  |     'caller context closes without using stream' | ||||||
|  | 
 | ||||||
|  |     async with tractor.open_nursery() as n: | ||||||
|  | 
 | ||||||
|  |         portal = await n.start_actor( | ||||||
|  |             'ctx_cancelled', | ||||||
|  |             enable_modules=[__name__], | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |         async with portal.open_context( | ||||||
|  |             expect_cancelled, | ||||||
|  |         ) as (ctx, sent): | ||||||
|  |             await portal.run(assert_state, value=True) | ||||||
|  | 
 | ||||||
|  |             assert sent is None | ||||||
|  | 
 | ||||||
|  |             # call cancel explicitly | ||||||
|  |             if use_ctx_cancel_method: | ||||||
|  | 
 | ||||||
|  |                 await ctx.cancel() | ||||||
|  | 
 | ||||||
|  |                 try: | ||||||
|  |                     async with ctx.open_stream() as stream: | ||||||
|  |                         async for msg in stream: | ||||||
|  |                             pass | ||||||
|  | 
 | ||||||
|  |                 except tractor.ContextCancelled: | ||||||
|  |                     raise  # XXX: must be propagated to __aexit__ | ||||||
|  | 
 | ||||||
|  |                 else: | ||||||
|  |                     assert 0, "Should have context cancelled?" | ||||||
|  | 
 | ||||||
|  |                 # channel should still be up | ||||||
|  |                 assert portal.channel.connected() | ||||||
|  | 
 | ||||||
|  |                 # ctx is closed here | ||||||
|  |                 await portal.run(assert_state, value=False) | ||||||
|  | 
 | ||||||
|  |             else: | ||||||
|  |                 try: | ||||||
|  |                     with trio.fail_after(0.2): | ||||||
|  |                         await ctx.result() | ||||||
|  |                         assert 0, "Callee should have blocked!?" | ||||||
|  |                 except trio.TooSlowError: | ||||||
|  |                     await ctx.cancel() | ||||||
|  |         try: | ||||||
|  |             async with ctx.open_stream() as stream: | ||||||
|  |                 async for msg in stream: | ||||||
|  |                     pass | ||||||
|  |         except tractor.ContextCancelled: | ||||||
|  |             pass | ||||||
|  |         else: | ||||||
|  |             assert 0, "Should have received closed resource error?" | ||||||
|  | 
 | ||||||
|  |         # ctx is closed here | ||||||
|  |         await portal.run(assert_state, value=False) | ||||||
|  | 
 | ||||||
|  |         # channel should not have been destroyed yet, only the | ||||||
|  |         # inter-actor-task context | ||||||
|  |         assert portal.channel.connected() | ||||||
|  | 
 | ||||||
|  |         # teardown the actor | ||||||
|  |         await portal.cancel_actor() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor_test | ||||||
|  | async def test_multitask_caller_cancels_from_nonroot_task(): | ||||||
|  | 
 | ||||||
|  |     async with tractor.open_nursery() as n: | ||||||
|  | 
 | ||||||
|  |         portal = await n.start_actor( | ||||||
|  |             'ctx_cancelled', | ||||||
|  |             enable_modules=[__name__], | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |         async with portal.open_context( | ||||||
|  |             expect_cancelled, | ||||||
|  |         ) as (ctx, sent): | ||||||
|  | 
 | ||||||
|  |             await portal.run(assert_state, value=True) | ||||||
|  |             assert sent is None | ||||||
|  | 
 | ||||||
|  |             async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |                 async def send_msg_then_cancel(): | ||||||
|  |                     await stream.send('yo') | ||||||
|  |                     await portal.run(assert_state, value=True) | ||||||
|  |                     await ctx.cancel() | ||||||
|  |                     await portal.run(assert_state, value=False) | ||||||
|  | 
 | ||||||
|  |                 async with trio.open_nursery() as n: | ||||||
|  |                     n.start_soon(send_msg_then_cancel) | ||||||
|  | 
 | ||||||
|  |                     try: | ||||||
|  |                         async for msg in stream: | ||||||
|  |                             assert msg == 'yo' | ||||||
|  | 
 | ||||||
|  |                     except tractor.ContextCancelled: | ||||||
|  |                         raise  # XXX: must be propagated to __aexit__ | ||||||
|  | 
 | ||||||
|  |                 # channel should still be up | ||||||
|  |                 assert portal.channel.connected() | ||||||
|  | 
 | ||||||
|  |                 # ctx is closed here | ||||||
|  |                 await portal.run(assert_state, value=False) | ||||||
|  | 
 | ||||||
|  |         # channel should not have been destroyed yet, only the | ||||||
|  |         # inter-actor-task context | ||||||
|  |         assert portal.channel.connected() | ||||||
|  | 
 | ||||||
|  |         # teardown the actor | ||||||
|  |         await portal.cancel_actor() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def cancel_self( | ||||||
|  | 
 | ||||||
|  |     ctx: tractor.Context, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  |     global _state | ||||||
|  |     _state = True | ||||||
|  | 
 | ||||||
|  |     await ctx.cancel() | ||||||
|  |     try: | ||||||
|  |         with trio.fail_after(0.1): | ||||||
|  |             await trio.sleep_forever() | ||||||
|  | 
 | ||||||
|  |     except trio.Cancelled: | ||||||
|  |         raise | ||||||
|  | 
 | ||||||
|  |     except trio.TooSlowError: | ||||||
|  |         # should never get here | ||||||
|  |         assert 0 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor_test | ||||||
|  | async def test_callee_cancels_before_started(): | ||||||
|  |     '''callee calls `Context.cancel()` while streaming and caller | ||||||
|  |     sees stream terminated in `ContextCancelled`. | ||||||
|  | 
 | ||||||
|  |     ''' | ||||||
|  |     async with tractor.open_nursery() as n: | ||||||
|  | 
 | ||||||
|  |         portal = await n.start_actor( | ||||||
|  |             'cancels_self', | ||||||
|  |             enable_modules=[__name__], | ||||||
|  |         ) | ||||||
|  |         try: | ||||||
|  | 
 | ||||||
|  |             async with portal.open_context( | ||||||
|  |                 cancel_self, | ||||||
|  |             ) as (ctx, sent): | ||||||
|  |                 async with ctx.open_stream(): | ||||||
|  | 
 | ||||||
|  |                     await trio.sleep_forever() | ||||||
|  | 
 | ||||||
|  |         # raises a special cancel signal | ||||||
|  |         except tractor.ContextCancelled as ce: | ||||||
|  |             ce.type == trio.Cancelled | ||||||
|  | 
 | ||||||
|  |         # teardown the actor | ||||||
|  |         await portal.cancel_actor() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def simple_rpc( | ||||||
|  | 
 | ||||||
|  |     ctx: tractor.Context, | ||||||
|  |     data: int, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  |     """Test a small ping-pong server. | ||||||
|  | 
 | ||||||
|  |     """ | ||||||
|  |     # signal to parent that we're up | ||||||
|  |     await ctx.started(data + 1) | ||||||
|  | 
 | ||||||
|  |     print('opening stream in callee') | ||||||
|  |     async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |         count = 0 | ||||||
|  |         while True: | ||||||
|  |             try: | ||||||
|  |                 await stream.receive() == 'ping' | ||||||
|  |             except trio.EndOfChannel: | ||||||
|  |                 assert count == 10 | ||||||
|  |                 break | ||||||
|  |             else: | ||||||
|  |                 print('pong') | ||||||
|  |                 await stream.send('pong') | ||||||
|  |                 count += 1 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def simple_rpc_with_forloop( | ||||||
|  | 
 | ||||||
|  |     ctx: tractor.Context, | ||||||
|  |     data: int, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  |     """Same as previous test but using ``async for`` syntax/api. | ||||||
|  | 
 | ||||||
|  |     """ | ||||||
|  | 
 | ||||||
|  |     # signal to parent that we're up | ||||||
|  |     await ctx.started(data + 1) | ||||||
|  | 
 | ||||||
|  |     print('opening stream in callee') | ||||||
|  |     async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |         count = 0 | ||||||
|  |         async for msg in stream: | ||||||
|  | 
 | ||||||
|  |             assert msg == 'ping' | ||||||
|  |             print('pong') | ||||||
|  |             await stream.send('pong') | ||||||
|  |             count += 1 | ||||||
|  | 
 | ||||||
|  |         else: | ||||||
|  |             assert count == 10 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @pytest.mark.parametrize( | ||||||
|  |     'use_async_for', | ||||||
|  |     [True, False], | ||||||
|  | ) | ||||||
|  | @pytest.mark.parametrize( | ||||||
|  |     'server_func', | ||||||
|  |     [simple_rpc, simple_rpc_with_forloop], | ||||||
|  | ) | ||||||
|  | def test_simple_rpc(server_func, use_async_for): | ||||||
|  |     """The simplest request response pattern. | ||||||
|  | 
 | ||||||
|  |     """ | ||||||
|  |     async def main(): | ||||||
|  |         async with tractor.open_nursery() as n: | ||||||
|  | 
 | ||||||
|  |             portal = await n.start_actor( | ||||||
|  |                 'rpc_server', | ||||||
|  |                 enable_modules=[__name__], | ||||||
|  |             ) | ||||||
|  | 
 | ||||||
|  |             async with portal.open_context( | ||||||
|  |                 server_func,  # taken from pytest parameterization | ||||||
|  |                 data=10, | ||||||
|  |             ) as (ctx, sent): | ||||||
|  | 
 | ||||||
|  |                 assert sent == 11 | ||||||
|  | 
 | ||||||
|  |                 async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |                     if use_async_for: | ||||||
|  | 
 | ||||||
|  |                         count = 0 | ||||||
|  |                         # receive msgs using async for style | ||||||
|  |                         print('ping') | ||||||
|  |                         await stream.send('ping') | ||||||
|  | 
 | ||||||
|  |                         async for msg in stream: | ||||||
|  |                             assert msg == 'pong' | ||||||
|  |                             print('ping') | ||||||
|  |                             await stream.send('ping') | ||||||
|  |                             count += 1 | ||||||
|  | 
 | ||||||
|  |                             if count >= 9: | ||||||
|  |                                 break | ||||||
|  | 
 | ||||||
|  |                     else: | ||||||
|  |                         # classic send/receive style | ||||||
|  |                         for _ in range(10): | ||||||
|  | 
 | ||||||
|  |                             print('ping') | ||||||
|  |                             await stream.send('ping') | ||||||
|  |                             assert await stream.receive() == 'pong' | ||||||
|  | 
 | ||||||
|  |                 # stream should terminate here | ||||||
|  | 
 | ||||||
|  |             # final context result(s) should be consumed here in __aexit__() | ||||||
|  | 
 | ||||||
|  |             await portal.cancel_actor() | ||||||
|  | 
 | ||||||
|  |     trio.run(main) | ||||||
|  | @ -0,0 +1,220 @@ | ||||||
|  | """ | ||||||
|  | Advanced streaming patterns using bidirectional streams and contexts. | ||||||
|  | 
 | ||||||
|  | """ | ||||||
|  | import itertools | ||||||
|  | from typing import Set, Dict, List | ||||||
|  | 
 | ||||||
|  | import trio | ||||||
|  | import tractor | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | _registry: Dict[str, Set[tractor.ReceiveMsgStream]] = { | ||||||
|  |     'even': set(), | ||||||
|  |     'odd': set(), | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | async def publisher( | ||||||
|  | 
 | ||||||
|  |     seed: int = 0, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  | 
 | ||||||
|  |     global _registry | ||||||
|  | 
 | ||||||
|  |     def is_even(i): | ||||||
|  |         return i % 2 == 0 | ||||||
|  | 
 | ||||||
|  |     for val in itertools.count(seed): | ||||||
|  | 
 | ||||||
|  |         sub = 'even' if is_even(val) else 'odd' | ||||||
|  | 
 | ||||||
|  |         for sub_stream in _registry[sub].copy(): | ||||||
|  |             await sub_stream.send(val) | ||||||
|  | 
 | ||||||
|  |         # throttle send rate to ~1kHz | ||||||
|  |         # making it readable to a human user | ||||||
|  |         await trio.sleep(1/1000) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def subscribe( | ||||||
|  | 
 | ||||||
|  |     ctx: tractor.Context, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  | 
 | ||||||
|  |     global _registry | ||||||
|  | 
 | ||||||
|  |     # syn caller | ||||||
|  |     await ctx.started(None) | ||||||
|  | 
 | ||||||
|  |     async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |         # update subs list as consumer requests | ||||||
|  |         async for new_subs in stream: | ||||||
|  | 
 | ||||||
|  |             new_subs = set(new_subs) | ||||||
|  |             remove = new_subs - _registry.keys() | ||||||
|  | 
 | ||||||
|  |             print(f'setting sub to {new_subs} for {ctx.chan.uid}') | ||||||
|  | 
 | ||||||
|  |             # remove old subs | ||||||
|  |             for sub in remove: | ||||||
|  |                 _registry[sub].remove(stream) | ||||||
|  | 
 | ||||||
|  |             # add new subs for consumer | ||||||
|  |             for sub in new_subs: | ||||||
|  |                 _registry[sub].add(stream) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | async def consumer( | ||||||
|  | 
 | ||||||
|  |     subs: List[str], | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  | 
 | ||||||
|  |     uid = tractor.current_actor().uid | ||||||
|  | 
 | ||||||
|  |     async with tractor.wait_for_actor('publisher') as portal: | ||||||
|  |         async with portal.open_context(subscribe) as (ctx, first): | ||||||
|  |             async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |                 # flip between the provided subs dynamically | ||||||
|  |                 if len(subs) > 1: | ||||||
|  | 
 | ||||||
|  |                     for sub in itertools.cycle(subs): | ||||||
|  |                         print(f'setting dynamic sub to {sub}') | ||||||
|  |                         await stream.send([sub]) | ||||||
|  | 
 | ||||||
|  |                         count = 0 | ||||||
|  |                         async for value in stream: | ||||||
|  |                             print(f'{uid} got: {value}') | ||||||
|  |                             if count > 5: | ||||||
|  |                                 break | ||||||
|  |                             count += 1 | ||||||
|  | 
 | ||||||
|  |                 else:  # static sub | ||||||
|  | 
 | ||||||
|  |                     await stream.send(subs) | ||||||
|  |                     async for value in stream: | ||||||
|  |                         print(f'{uid} got: {value}') | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def test_dynamic_pub_sub(): | ||||||
|  | 
 | ||||||
|  |     global _registry | ||||||
|  | 
 | ||||||
|  |     from multiprocessing import cpu_count | ||||||
|  |     cpus = cpu_count() | ||||||
|  | 
 | ||||||
|  |     async def main(): | ||||||
|  |         async with tractor.open_nursery() as n: | ||||||
|  | 
 | ||||||
|  |             # name of this actor will be same as target func | ||||||
|  |             await n.run_in_actor(publisher) | ||||||
|  | 
 | ||||||
|  |             for i, sub in zip( | ||||||
|  |                 range(cpus - 2), | ||||||
|  |                 itertools.cycle(_registry.keys()) | ||||||
|  |             ): | ||||||
|  |                 await n.run_in_actor( | ||||||
|  |                     consumer, | ||||||
|  |                     name=f'consumer_{sub}', | ||||||
|  |                     subs=[sub], | ||||||
|  |                 ) | ||||||
|  | 
 | ||||||
|  |             # make one dynamic subscriber | ||||||
|  |             await n.run_in_actor( | ||||||
|  |                 consumer, | ||||||
|  |                 name='consumer_dynamic', | ||||||
|  |                 subs=list(_registry.keys()), | ||||||
|  |             ) | ||||||
|  | 
 | ||||||
|  |             # block until cancelled by user | ||||||
|  |             with trio.fail_after(3): | ||||||
|  |                 await trio.sleep_forever() | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         trio.run(main) | ||||||
|  |     except trio.TooSlowError: | ||||||
|  |         pass | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @tractor.context | ||||||
|  | async def one_task_streams_and_one_handles_reqresp( | ||||||
|  | 
 | ||||||
|  |     ctx: tractor.Context, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  | 
 | ||||||
|  |     await ctx.started() | ||||||
|  | 
 | ||||||
|  |     async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |         async def pingpong(): | ||||||
|  |             '''Run a simple req/response service. | ||||||
|  | 
 | ||||||
|  |             ''' | ||||||
|  |             async for msg in stream: | ||||||
|  |                 print('rpc server ping') | ||||||
|  |                 assert msg == 'ping' | ||||||
|  |                 print('rpc server pong') | ||||||
|  |                 await stream.send('pong') | ||||||
|  | 
 | ||||||
|  |         async with trio.open_nursery() as n: | ||||||
|  |             n.start_soon(pingpong) | ||||||
|  | 
 | ||||||
|  |             for _ in itertools.count(): | ||||||
|  |                 await stream.send('yo') | ||||||
|  |                 await trio.sleep(0.01) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def test_reqresp_ontopof_streaming(): | ||||||
|  |     '''Test a subactor that both streams with one task and | ||||||
|  |     spawns another which handles a small requests-response | ||||||
|  |     dialogue over the same bidir-stream. | ||||||
|  | 
 | ||||||
|  |     ''' | ||||||
|  |     async def main(): | ||||||
|  | 
 | ||||||
|  |         with trio.move_on_after(2): | ||||||
|  |             async with tractor.open_nursery() as n: | ||||||
|  | 
 | ||||||
|  |                 # name of this actor will be same as target func | ||||||
|  |                 portal = await n.start_actor( | ||||||
|  |                     'dual_tasks', | ||||||
|  |                     enable_modules=[__name__] | ||||||
|  |                 ) | ||||||
|  | 
 | ||||||
|  |                 # flat to make sure we get at least one pong | ||||||
|  |                 got_pong: bool = False | ||||||
|  | 
 | ||||||
|  |                 async with portal.open_context( | ||||||
|  |                     one_task_streams_and_one_handles_reqresp, | ||||||
|  | 
 | ||||||
|  |                 ) as (ctx, first): | ||||||
|  | 
 | ||||||
|  |                     assert first is None | ||||||
|  | 
 | ||||||
|  |                     async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |                         await stream.send('ping') | ||||||
|  | 
 | ||||||
|  |                         async for msg in stream: | ||||||
|  |                             print(f'client received: {msg}') | ||||||
|  | 
 | ||||||
|  |                             assert msg in {'pong', 'yo'} | ||||||
|  | 
 | ||||||
|  |                             if msg == 'pong': | ||||||
|  |                                 got_pong = True | ||||||
|  |                                 await stream.send('ping') | ||||||
|  |                                 print('client sent ping') | ||||||
|  | 
 | ||||||
|  |         assert got_pong | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         trio.run(main) | ||||||
|  |     except trio.TooSlowError: | ||||||
|  |         pass | ||||||
|  | @ -123,8 +123,15 @@ def test_multierror_fast_nursery(arb_addr, start_method, num_subactors, delay): | ||||||
| 
 | 
 | ||||||
|     assert exc_info.type == tractor.MultiError |     assert exc_info.type == tractor.MultiError | ||||||
|     err = exc_info.value |     err = exc_info.value | ||||||
|     assert len(err.exceptions) == num_subactors |     exceptions = err.exceptions | ||||||
|     for exc in err.exceptions: | 
 | ||||||
|  |     if len(exceptions) == 2: | ||||||
|  |         # sometimes oddly now there's an embedded BrokenResourceError ? | ||||||
|  |         exceptions = exceptions[1].exceptions | ||||||
|  | 
 | ||||||
|  |     assert len(exceptions) == num_subactors | ||||||
|  | 
 | ||||||
|  |     for exc in exceptions: | ||||||
|         assert isinstance(exc, tractor.RemoteActorError) |         assert isinstance(exc, tractor.RemoteActorError) | ||||||
|         assert exc.type == AssertionError |         assert exc.type == AssertionError | ||||||
| 
 | 
 | ||||||
|  |  | ||||||
|  | @ -307,19 +307,46 @@ def test_multi_daemon_subactors(spawn, loglevel): | ||||||
|     before = str(child.before.decode()) |     before = str(child.before.decode()) | ||||||
|     assert "NameError" in before |     assert "NameError" in before | ||||||
| 
 | 
 | ||||||
|     child.sendline('c') |     # XXX: hoorayy the root clobering the child here was fixed! | ||||||
| 
 | 
 | ||||||
|  |     # now the root actor won't clobber the bp_forever child | ||||||
|  |     # during it's first access to the debug lock, but will instead | ||||||
|  |     # wait for the lock to release, by the edge triggered | ||||||
|  |     # ``_debug._no_remote_has_tty`` event before sending cancel messages | ||||||
|  |     # (via portals) to its underlings B) | ||||||
|  | 
 | ||||||
|  |     # IMO, this demonstrates the true power of SC system design. | ||||||
|  |     child.sendline('c') | ||||||
|  |     child.expect(r"\(Pdb\+\+\)") | ||||||
|  |     before = str(child.before.decode()) | ||||||
|  |     assert "Attaching pdb to actor: ('bp_forever'," in before | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     child.sendline('c') | ||||||
|  |     child.expect(r"\(Pdb\+\+\)") | ||||||
|  |     before = str(child.before.decode()) | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         # final error in root | ||||||
|  |         assert "tractor._exceptions.RemoteActorError: ('name_error'" in before | ||||||
|  | 
 | ||||||
|  |     except AssertionError: | ||||||
|  |     # except pexpect.exceptions.TIMEOUT: | ||||||
|  | 
 | ||||||
|  |         # one last entry in the root | ||||||
|  |         child.sendline('c') | ||||||
|         child.expect(r"\(Pdb\+\+\)") |         child.expect(r"\(Pdb\+\+\)") | ||||||
|         before = str(child.before.decode()) |         before = str(child.before.decode()) | ||||||
|         assert "tractor._exceptions.RemoteActorError: ('name_error'" in before |         assert "tractor._exceptions.RemoteActorError: ('name_error'" in before | ||||||
| 
 | 
 | ||||||
|     try: |         # theory there should have been some msg like this from | ||||||
|         child.sendline('c') |         # root announcing it avoided a clobber of the child's lock, | ||||||
|         child.expect(pexpect.EOF) |         # but it seems unreliable in testing here to gnab it. | ||||||
|     except pexpect.exceptions.TIMEOUT: |         # assert "in use by child ('bp_forever'," in before | ||||||
|         # Failed to exit using continue..? |  | ||||||
| 
 | 
 | ||||||
|         child.sendline('q') |     child.sendline('c') | ||||||
|  |     # final error in root | ||||||
|  |     assert "tractor._exceptions.RemoteActorError: ('name_error'" in before | ||||||
|     child.expect(pexpect.EOF) |     child.expect(pexpect.EOF) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -372,7 +399,7 @@ def test_multi_nested_subactors_error_through_nurseries(spawn): | ||||||
|     child = spawn('multi_nested_subactors_error_up_through_nurseries') |     child = spawn('multi_nested_subactors_error_up_through_nurseries') | ||||||
| 
 | 
 | ||||||
|     # startup time can be iffy |     # startup time can be iffy | ||||||
|     time.sleep(1) |     # time.sleep(1) | ||||||
| 
 | 
 | ||||||
|     for i in range(12): |     for i in range(12): | ||||||
|         try: |         try: | ||||||
|  | @ -454,3 +481,21 @@ def test_root_nursery_cancels_before_child_releases_tty_lock( | ||||||
|         assert "tractor._exceptions.RemoteActorError: ('spawner0'" in before |         assert "tractor._exceptions.RemoteActorError: ('spawner0'" in before | ||||||
|         assert "tractor._exceptions.RemoteActorError: ('name_error'" in before |         assert "tractor._exceptions.RemoteActorError: ('name_error'" in before | ||||||
|         assert "NameError: name 'doggypants' is not defined" in before |         assert "NameError: name 'doggypants' is not defined" in before | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def test_root_cancels_child_context_during_startup( | ||||||
|  |     spawn, | ||||||
|  | ): | ||||||
|  |     '''Verify a fast fail in the root doesn't lock up the child reaping | ||||||
|  |     and all while using the new context api. | ||||||
|  | 
 | ||||||
|  |     ''' | ||||||
|  |     child = spawn('fast_error_in_root_after_spawn') | ||||||
|  | 
 | ||||||
|  |     child.expect(r"\(Pdb\+\+\)") | ||||||
|  | 
 | ||||||
|  |     before = str(child.before.decode()) | ||||||
|  |     assert "AssertionError" in before | ||||||
|  | 
 | ||||||
|  |     child.sendline('c') | ||||||
|  |     child.expect(pexpect.EOF) | ||||||
|  |  | ||||||
|  | @ -32,13 +32,16 @@ async def async_gen_stream(sequence): | ||||||
| 
 | 
 | ||||||
|     # block indefinitely waiting to be cancelled by ``aclose()`` call |     # block indefinitely waiting to be cancelled by ``aclose()`` call | ||||||
|     with trio.CancelScope() as cs: |     with trio.CancelScope() as cs: | ||||||
|         await trio.sleep(float('inf')) |         await trio.sleep_forever() | ||||||
|         assert 0 |         assert 0 | ||||||
|     assert cs.cancelled_caught |     assert cs.cancelled_caught | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @tractor.stream | @tractor.stream | ||||||
| async def context_stream(ctx, sequence): | async def context_stream( | ||||||
|  |     ctx: tractor.Context, | ||||||
|  |     sequence | ||||||
|  | ): | ||||||
|     for i in sequence: |     for i in sequence: | ||||||
|         await ctx.send_yield(i) |         await ctx.send_yield(i) | ||||||
|         await trio.sleep(0.1) |         await trio.sleep(0.1) | ||||||
|  | @ -338,6 +341,8 @@ async def test_respawn_consumer_task( | ||||||
|                         print("all values streamed, BREAKING") |                         print("all values streamed, BREAKING") | ||||||
|                         break |                         break | ||||||
| 
 | 
 | ||||||
|  |                 cs.cancel() | ||||||
|  | 
 | ||||||
|         # TODO: this is justification for a |         # TODO: this is justification for a | ||||||
|         # ``ActorNursery.stream_from_actor()`` helper? |         # ``ActorNursery.stream_from_actor()`` helper? | ||||||
|         await portal.cancel_actor() |         await portal.cancel_actor() | ||||||
|  |  | ||||||
|  | @ -5,11 +5,21 @@ tractor: An actor model micro-framework built on | ||||||
| from trio import MultiError | from trio import MultiError | ||||||
| 
 | 
 | ||||||
| from ._ipc import Channel | from ._ipc import Channel | ||||||
| from ._streaming import Context, stream | from ._streaming import ( | ||||||
|  |     Context, | ||||||
|  |     ReceiveMsgStream, | ||||||
|  |     MsgStream, | ||||||
|  |     stream, | ||||||
|  |     context, | ||||||
|  | ) | ||||||
| from ._discovery import get_arbiter, find_actor, wait_for_actor | from ._discovery import get_arbiter, find_actor, wait_for_actor | ||||||
| from ._trionics import open_nursery | from ._trionics import open_nursery | ||||||
| from ._state import current_actor, is_root_process | from ._state import current_actor, is_root_process | ||||||
| from ._exceptions import RemoteActorError, ModuleNotExposed | from ._exceptions import ( | ||||||
|  |     RemoteActorError, | ||||||
|  |     ModuleNotExposed, | ||||||
|  |     ContextCancelled, | ||||||
|  | ) | ||||||
| from ._debug import breakpoint, post_mortem | from ._debug import breakpoint, post_mortem | ||||||
| from . import msg | from . import msg | ||||||
| from ._root import run, run_daemon, open_root_actor | from ._root import run, run_daemon, open_root_actor | ||||||
|  | @ -21,6 +31,7 @@ __all__ = [ | ||||||
|     'ModuleNotExposed', |     'ModuleNotExposed', | ||||||
|     'MultiError', |     'MultiError', | ||||||
|     'RemoteActorError', |     'RemoteActorError', | ||||||
|  |     'ContextCancelled', | ||||||
|     'breakpoint', |     'breakpoint', | ||||||
|     'current_actor', |     'current_actor', | ||||||
|     'find_actor', |     'find_actor', | ||||||
|  | @ -33,7 +44,9 @@ __all__ = [ | ||||||
|     'run', |     'run', | ||||||
|     'run_daemon', |     'run_daemon', | ||||||
|     'stream', |     'stream', | ||||||
|     'wait_for_actor', |     'context', | ||||||
|  |     'ReceiveMsgStream', | ||||||
|  |     'MsgStream', | ||||||
|     'to_asyncio', |     'to_asyncio', | ||||||
|     'wait_for_actor', |     'wait_for_actor', | ||||||
| ] | ] | ||||||
|  |  | ||||||
|  | @ -1,5 +1,6 @@ | ||||||
| """ | """ | ||||||
| Actor primitives and helpers | Actor primitives and helpers | ||||||
|  | 
 | ||||||
| """ | """ | ||||||
| from collections import defaultdict | from collections import defaultdict | ||||||
| from functools import partial | from functools import partial | ||||||
|  | @ -11,9 +12,11 @@ import uuid | ||||||
| import typing | import typing | ||||||
| from typing import Dict, List, Tuple, Any, Optional, Union | from typing import Dict, List, Tuple, Any, Optional, Union | ||||||
| from types import ModuleType | from types import ModuleType | ||||||
|  | import signal | ||||||
| import sys | import sys | ||||||
| import os | import os | ||||||
| from contextlib import ExitStack | from contextlib import ExitStack | ||||||
|  | import warnings | ||||||
| 
 | 
 | ||||||
| import trio  # type: ignore | import trio  # type: ignore | ||||||
| from trio_typing import TaskStatus | from trio_typing import TaskStatus | ||||||
|  | @ -27,6 +30,8 @@ from ._exceptions import ( | ||||||
|     unpack_error, |     unpack_error, | ||||||
|     ModuleNotExposed, |     ModuleNotExposed, | ||||||
|     is_multi_cancelled, |     is_multi_cancelled, | ||||||
|  |     ContextCancelled, | ||||||
|  |     TransportClosed, | ||||||
| ) | ) | ||||||
| from . import _debug | from . import _debug | ||||||
| from ._discovery import get_arbiter | from ._discovery import get_arbiter | ||||||
|  | @ -43,6 +48,7 @@ class ActorFailure(Exception): | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def _invoke( | async def _invoke( | ||||||
|  | 
 | ||||||
|     actor: 'Actor', |     actor: 'Actor', | ||||||
|     cid: str, |     cid: str, | ||||||
|     chan: Channel, |     chan: Channel, | ||||||
|  | @ -55,15 +61,44 @@ async def _invoke( | ||||||
|     """Invoke local func and deliver result(s) over provided channel. |     """Invoke local func and deliver result(s) over provided channel. | ||||||
|     """ |     """ | ||||||
|     treat_as_gen = False |     treat_as_gen = False | ||||||
|     cs = None | 
 | ||||||
|  |     # possible a traceback (not sure what typing is for this..) | ||||||
|  |     tb = None | ||||||
|  | 
 | ||||||
|     cancel_scope = trio.CancelScope() |     cancel_scope = trio.CancelScope() | ||||||
|     ctx = Context(chan, cid, cancel_scope) |     cs: trio.CancelScope = None | ||||||
|  | 
 | ||||||
|  |     ctx = Context(chan, cid) | ||||||
|  |     context: bool = False | ||||||
| 
 | 
 | ||||||
|     if getattr(func, '_tractor_stream_function', False): |     if getattr(func, '_tractor_stream_function', False): | ||||||
|         # handle decorated ``@tractor.stream`` async functions |         # handle decorated ``@tractor.stream`` async functions | ||||||
|  |         sig = inspect.signature(func) | ||||||
|  |         params = sig.parameters | ||||||
|  | 
 | ||||||
|  |         # compat with old api | ||||||
|         kwargs['ctx'] = ctx |         kwargs['ctx'] = ctx | ||||||
|  | 
 | ||||||
|  |         if 'ctx' in params: | ||||||
|  |             warnings.warn( | ||||||
|  |                 "`@tractor.stream decorated funcs should now declare " | ||||||
|  |                 "a `stream`  arg, `ctx` is now designated for use with " | ||||||
|  |                 "@tractor.context", | ||||||
|  |                 DeprecationWarning, | ||||||
|  |                 stacklevel=2, | ||||||
|  |             ) | ||||||
|  | 
 | ||||||
|  |         elif 'stream' in params: | ||||||
|  |             assert 'stream' in params | ||||||
|  |             kwargs['stream'] = ctx | ||||||
|  | 
 | ||||||
|         treat_as_gen = True |         treat_as_gen = True | ||||||
| 
 | 
 | ||||||
|  |     elif getattr(func, '_tractor_context_function', False): | ||||||
|  |         # handle decorated ``@tractor.context`` async function | ||||||
|  |         kwargs['ctx'] = ctx | ||||||
|  |         context = True | ||||||
|  | 
 | ||||||
|     # errors raised inside this block are propgated back to caller |     # errors raised inside this block are propgated back to caller | ||||||
|     try: |     try: | ||||||
|         if not ( |         if not ( | ||||||
|  | @ -101,8 +136,9 @@ async def _invoke( | ||||||
|             # `StopAsyncIteration` system here for returning a final |             # `StopAsyncIteration` system here for returning a final | ||||||
|             # value if desired |             # value if desired | ||||||
|             await chan.send({'stop': True, 'cid': cid}) |             await chan.send({'stop': True, 'cid': cid}) | ||||||
|         else: | 
 | ||||||
|             if treat_as_gen: |         # one way @stream func that gets treated like an async gen | ||||||
|  |         elif treat_as_gen: | ||||||
|             await chan.send({'functype': 'asyncgen', 'cid': cid}) |             await chan.send({'functype': 'asyncgen', 'cid': cid}) | ||||||
|             # XXX: the async-func may spawn further tasks which push |             # XXX: the async-func may spawn further tasks which push | ||||||
|             # back values like an async-generator would but must |             # back values like an async-generator would but must | ||||||
|  | @ -111,10 +147,47 @@ async def _invoke( | ||||||
|             with cancel_scope as cs: |             with cancel_scope as cs: | ||||||
|                 task_status.started(cs) |                 task_status.started(cs) | ||||||
|                 await coro |                 await coro | ||||||
|  | 
 | ||||||
|             if not cs.cancelled_caught: |             if not cs.cancelled_caught: | ||||||
|                 # task was not cancelled so we can instruct the |                 # task was not cancelled so we can instruct the | ||||||
|                 # far end async gen to tear down |                 # far end async gen to tear down | ||||||
|                 await chan.send({'stop': True, 'cid': cid}) |                 await chan.send({'stop': True, 'cid': cid}) | ||||||
|  | 
 | ||||||
|  |         elif context: | ||||||
|  |             # context func with support for bi-dir streaming | ||||||
|  |             await chan.send({'functype': 'context', 'cid': cid}) | ||||||
|  | 
 | ||||||
|  |             async with trio.open_nursery() as scope_nursery: | ||||||
|  |                 ctx._scope_nursery = scope_nursery | ||||||
|  |                 cs = scope_nursery.cancel_scope | ||||||
|  |                 task_status.started(cs) | ||||||
|  |                 try: | ||||||
|  |                     await chan.send({'return': await coro, 'cid': cid}) | ||||||
|  |                 except trio.Cancelled as err: | ||||||
|  |                     tb = err.__traceback__ | ||||||
|  | 
 | ||||||
|  |             if cs.cancelled_caught: | ||||||
|  | 
 | ||||||
|  |                 # TODO: pack in ``trio.Cancelled.__traceback__`` here | ||||||
|  |                 # so they can be unwrapped and displayed on the caller | ||||||
|  |                 # side! | ||||||
|  | 
 | ||||||
|  |                 fname = func.__name__ | ||||||
|  |                 if ctx._cancel_called: | ||||||
|  |                     msg = f'{fname} cancelled itself' | ||||||
|  | 
 | ||||||
|  |                 elif cs.cancel_called: | ||||||
|  |                     msg = ( | ||||||
|  |                         f'{fname} was remotely cancelled by its caller ' | ||||||
|  |                         f'{ctx.chan.uid}' | ||||||
|  |                     ) | ||||||
|  | 
 | ||||||
|  |                 # task-contex was cancelled so relay to the cancel to caller | ||||||
|  |                 raise ContextCancelled( | ||||||
|  |                     msg, | ||||||
|  |                     suberror_type=trio.Cancelled, | ||||||
|  |                 ) | ||||||
|  | 
 | ||||||
|         else: |         else: | ||||||
|             # regular async function |             # regular async function | ||||||
|             await chan.send({'functype': 'asyncfunc', 'cid': cid}) |             await chan.send({'functype': 'asyncfunc', 'cid': cid}) | ||||||
|  | @ -124,29 +197,45 @@ async def _invoke( | ||||||
| 
 | 
 | ||||||
|     except (Exception, trio.MultiError) as err: |     except (Exception, trio.MultiError) as err: | ||||||
| 
 | 
 | ||||||
|         # TODO: maybe we'll want differnet "levels" of debugging |         if not is_multi_cancelled(err): | ||||||
|  | 
 | ||||||
|  |             log.exception("Actor crashed:") | ||||||
|  | 
 | ||||||
|  |             # TODO: maybe we'll want different "levels" of debugging | ||||||
|             # eventualy such as ('app', 'supervisory', 'runtime') ? |             # eventualy such as ('app', 'supervisory', 'runtime') ? | ||||||
|         if not isinstance(err, trio.ClosedResourceError) and ( | 
 | ||||||
|             not is_multi_cancelled(err) |             # if not isinstance(err, trio.ClosedResourceError) and ( | ||||||
|  |             # if not is_multi_cancelled(err) and ( | ||||||
|  | 
 | ||||||
|  |             entered_debug: bool = False | ||||||
|  |             if not isinstance(err, ContextCancelled) or ( | ||||||
|  |                 isinstance(err, ContextCancelled) and ctx._cancel_called | ||||||
|             ): |             ): | ||||||
|                 # XXX: is there any case where we'll want to debug IPC |                 # XXX: is there any case where we'll want to debug IPC | ||||||
|             # disconnects? I can't think of a reason that inspecting |                 # disconnects as a default? | ||||||
|  |                 # | ||||||
|  |                 # I can't think of a reason that inspecting | ||||||
|                 # this type of failure will be useful for respawns or |                 # this type of failure will be useful for respawns or | ||||||
|                 # recovery logic - the only case is some kind of strange bug |                 # recovery logic - the only case is some kind of strange bug | ||||||
|             # in `trio` itself? |                 # in our transport layer itself? Going to keep this | ||||||
|             entered = await _debug._maybe_enter_pm(err) |                 # open ended for now. | ||||||
|             if not entered: | 
 | ||||||
|  |                 entered_debug = await _debug._maybe_enter_pm(err) | ||||||
|  | 
 | ||||||
|  |             if not entered_debug: | ||||||
|                 log.exception("Actor crashed:") |                 log.exception("Actor crashed:") | ||||||
| 
 | 
 | ||||||
|         # always ship errors back to caller |         # always ship errors back to caller | ||||||
|         err_msg = pack_error(err) |         err_msg = pack_error(err, tb=tb) | ||||||
|         err_msg['cid'] = cid |         err_msg['cid'] = cid | ||||||
|         try: |         try: | ||||||
|             await chan.send(err_msg) |             await chan.send(err_msg) | ||||||
| 
 | 
 | ||||||
|         except trio.ClosedResourceError: |         except trio.ClosedResourceError: | ||||||
|             log.warning( |             # if we can't propagate the error that's a big boo boo | ||||||
|                 f"Failed to ship error to caller @ {chan.uid}") |             log.error( | ||||||
|  |                 f"Failed to ship error to caller @ {chan.uid} !?" | ||||||
|  |             ) | ||||||
| 
 | 
 | ||||||
|         if cs is None: |         if cs is None: | ||||||
|             # error is from above code not from rpc invocation |             # error is from above code not from rpc invocation | ||||||
|  | @ -164,7 +253,7 @@ async def _invoke( | ||||||
|                 f"Task {func} likely errored or cancelled before it started") |                 f"Task {func} likely errored or cancelled before it started") | ||||||
|         finally: |         finally: | ||||||
|             if not actor._rpc_tasks: |             if not actor._rpc_tasks: | ||||||
|                 log.info("All RPC tasks have completed") |                 log.runtime("All RPC tasks have completed") | ||||||
|                 actor._ongoing_rpc_tasks.set() |                 actor._ongoing_rpc_tasks.set() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -179,10 +268,10 @@ _lifetime_stack: ExitStack = ExitStack() | ||||||
| class Actor: | class Actor: | ||||||
|     """The fundamental concurrency primitive. |     """The fundamental concurrency primitive. | ||||||
| 
 | 
 | ||||||
|     An *actor* is the combination of a regular Python or |     An *actor* is the combination of a regular Python process | ||||||
|     ``multiprocessing.Process`` executing a ``trio`` task tree, communicating |     executing a ``trio`` task tree, communicating | ||||||
|     with other actors through "portals" which provide a native async API |     with other actors through "portals" which provide a native async API | ||||||
|     around "channels". |     around various IPC transport "channels". | ||||||
|     """ |     """ | ||||||
|     is_arbiter: bool = False |     is_arbiter: bool = False | ||||||
| 
 | 
 | ||||||
|  | @ -202,7 +291,7 @@ class Actor: | ||||||
|         enable_modules: List[str] = [], |         enable_modules: List[str] = [], | ||||||
|         uid: str = None, |         uid: str = None, | ||||||
|         loglevel: str = None, |         loglevel: str = None, | ||||||
|         arbiter_addr: Optional[Tuple[str, int]] = None, |         arbiter_addr: Optional[Tuple[str, int]] = (None, None), | ||||||
|         spawn_method: Optional[str] = None |         spawn_method: Optional[str] = None | ||||||
|     ) -> None: |     ) -> None: | ||||||
|         """This constructor is called in the parent actor **before** the spawning |         """This constructor is called in the parent actor **before** the spawning | ||||||
|  | @ -232,7 +321,7 @@ class Actor: | ||||||
|         # TODO: consider making this a dynamically defined |         # TODO: consider making this a dynamically defined | ||||||
|         # @dataclass once we get py3.7 |         # @dataclass once we get py3.7 | ||||||
|         self.loglevel = loglevel |         self.loglevel = loglevel | ||||||
|         self._arb_addr = arbiter_addr |         self._arb_addr = tuple(arbiter_addr) | ||||||
| 
 | 
 | ||||||
|         # marked by the process spawning backend at startup |         # marked by the process spawning backend at startup | ||||||
|         # will be None for the parent most process started manually |         # will be None for the parent most process started manually | ||||||
|  | @ -262,7 +351,7 @@ class Actor: | ||||||
|         self._parent_chan: Optional[Channel] = None |         self._parent_chan: Optional[Channel] = None | ||||||
|         self._forkserver_info: Optional[ |         self._forkserver_info: Optional[ | ||||||
|             Tuple[Any, Any, Any, Any, Any]] = None |             Tuple[Any, Any, Any, Any, Any]] = None | ||||||
|         self._actoruid2nursery: Dict[str, 'ActorNursery'] = {}  # type: ignore |         self._actoruid2nursery: Dict[str, 'ActorNursery'] = {}  # type: ignore  # noqa | ||||||
| 
 | 
 | ||||||
|     async def wait_for_peer( |     async def wait_for_peer( | ||||||
|         self, uid: Tuple[str, str] |         self, uid: Tuple[str, str] | ||||||
|  | @ -326,19 +415,34 @@ class Actor: | ||||||
|             raise mne |             raise mne | ||||||
| 
 | 
 | ||||||
|     async def _stream_handler( |     async def _stream_handler( | ||||||
|  | 
 | ||||||
|         self, |         self, | ||||||
|         stream: trio.SocketStream, |         stream: trio.SocketStream, | ||||||
|  | 
 | ||||||
|     ) -> None: |     ) -> None: | ||||||
|         """Entry point for new inbound connections to the channel server. |         """Entry point for new inbound connections to the channel server. | ||||||
|  | 
 | ||||||
|         """ |         """ | ||||||
|         self._no_more_peers = trio.Event()  # unset |         self._no_more_peers = trio.Event()  # unset | ||||||
|  | 
 | ||||||
|         chan = Channel(stream=stream) |         chan = Channel(stream=stream) | ||||||
|         log.info(f"New connection to us {chan}") |         log.runtime(f"New connection to us {chan}") | ||||||
| 
 | 
 | ||||||
|         # send/receive initial handshake response |         # send/receive initial handshake response | ||||||
|         try: |         try: | ||||||
|             uid = await self._do_handshake(chan) |             uid = await self._do_handshake(chan) | ||||||
|         except StopAsyncIteration: | 
 | ||||||
|  |         except ( | ||||||
|  |             trio.BrokenResourceError, | ||||||
|  |             trio.ClosedResourceError, | ||||||
|  |             TransportClosed, | ||||||
|  |         ): | ||||||
|  |             # XXX: This may propagate up from ``Channel._aiter_recv()`` | ||||||
|  |             # and ``MsgpackStream._inter_packets()`` on a read from the | ||||||
|  |             # stream particularly when the runtime is first starting up | ||||||
|  |             # inside ``open_root_actor()`` where there is a check for | ||||||
|  |             # a bound listener on the "arbiter" addr.  the reset will be | ||||||
|  |             # because the handshake was never meant took place. | ||||||
|             log.warning(f"Channel {chan} failed to handshake") |             log.warning(f"Channel {chan} failed to handshake") | ||||||
|             return |             return | ||||||
| 
 | 
 | ||||||
|  | @ -353,11 +457,16 @@ class Actor: | ||||||
|             event.set() |             event.set() | ||||||
| 
 | 
 | ||||||
|         chans = self._peers[uid] |         chans = self._peers[uid] | ||||||
|  | 
 | ||||||
|  |         # TODO: re-use channels for new connections instead  | ||||||
|  |         # of always new ones; will require changing all the | ||||||
|  |         # discovery funcs | ||||||
|         if chans: |         if chans: | ||||||
|             log.warning( |             log.runtime( | ||||||
|                 f"already have channel(s) for {uid}:{chans}?" |                 f"already have channel(s) for {uid}:{chans}?" | ||||||
|             ) |             ) | ||||||
|         log.trace(f"Registered {chan} for {uid}")  # type: ignore | 
 | ||||||
|  |         log.runtime(f"Registered {chan} for {uid}")  # type: ignore | ||||||
|         # append new channel |         # append new channel | ||||||
|         self._peers[uid].append(chan) |         self._peers[uid].append(chan) | ||||||
| 
 | 
 | ||||||
|  | @ -366,10 +475,24 @@ class Actor: | ||||||
|         try: |         try: | ||||||
|             await self._process_messages(chan) |             await self._process_messages(chan) | ||||||
|         finally: |         finally: | ||||||
|  | 
 | ||||||
|  |             # channel cleanup sequence | ||||||
|  | 
 | ||||||
|  |             # for (channel, cid) in self._rpc_tasks.copy(): | ||||||
|  |             #     if channel is chan: | ||||||
|  |             #         with trio.CancelScope(shield=True): | ||||||
|  |             #             await self._cancel_task(cid, channel) | ||||||
|  | 
 | ||||||
|  |             #             # close all consumer side task mem chans | ||||||
|  |             #             send_chan, _ = self._cids2qs[(chan.uid, cid)] | ||||||
|  |             #             assert send_chan.cid == cid  # type: ignore | ||||||
|  |             #             await send_chan.aclose() | ||||||
|  | 
 | ||||||
|             # Drop ref to channel so it can be gc-ed and disconnected |             # Drop ref to channel so it can be gc-ed and disconnected | ||||||
|             log.debug(f"Releasing channel {chan} from {chan.uid}") |             log.debug(f"Releasing channel {chan} from {chan.uid}") | ||||||
|             chans = self._peers.get(chan.uid) |             chans = self._peers.get(chan.uid) | ||||||
|             chans.remove(chan) |             chans.remove(chan) | ||||||
|  | 
 | ||||||
|             if not chans: |             if not chans: | ||||||
|                 log.debug(f"No more channels for {chan.uid}") |                 log.debug(f"No more channels for {chan.uid}") | ||||||
|                 self._peers.pop(chan.uid, None) |                 self._peers.pop(chan.uid, None) | ||||||
|  | @ -382,14 +505,22 @@ class Actor: | ||||||
| 
 | 
 | ||||||
|             # # XXX: is this necessary (GC should do it?) |             # # XXX: is this necessary (GC should do it?) | ||||||
|             if chan.connected(): |             if chan.connected(): | ||||||
|  |                 # if the channel is still connected it may mean the far | ||||||
|  |                 # end has not closed and we may have gotten here due to | ||||||
|  |                 # an error and so we should at least try to terminate | ||||||
|  |                 # the channel from this end gracefully. | ||||||
|  | 
 | ||||||
|                 log.debug(f"Disconnecting channel {chan}") |                 log.debug(f"Disconnecting channel {chan}") | ||||||
|                 try: |                 try: | ||||||
|                     # send our msg loop terminate sentinel |                     # send a msg loop terminate sentinel | ||||||
|                     await chan.send(None) |                     await chan.send(None) | ||||||
|  | 
 | ||||||
|  |                     # XXX: do we want this? | ||||||
|  |                     # causes "[104] connection reset by peer" on other end | ||||||
|                     # await chan.aclose() |                     # await chan.aclose() | ||||||
|  | 
 | ||||||
|                 except trio.BrokenResourceError: |                 except trio.BrokenResourceError: | ||||||
|                     log.exception( |                     log.warning(f"Channel for {chan.uid} was already closed") | ||||||
|                         f"Channel for {chan.uid} was already zonked..") |  | ||||||
| 
 | 
 | ||||||
|     async def _push_result( |     async def _push_result( | ||||||
|         self, |         self, | ||||||
|  | @ -399,22 +530,32 @@ class Actor: | ||||||
|     ) -> None: |     ) -> None: | ||||||
|         """Push an RPC result to the local consumer's queue. |         """Push an RPC result to the local consumer's queue. | ||||||
|         """ |         """ | ||||||
|         actorid = chan.uid |         # actorid = chan.uid | ||||||
|         assert actorid, f"`actorid` can't be {actorid}" |         assert chan.uid, f"`chan.uid` can't be {chan.uid}" | ||||||
|         send_chan, recv_chan = self._cids2qs[(actorid, cid)] |         send_chan, recv_chan = self._cids2qs[(chan.uid, cid)] | ||||||
|         assert send_chan.cid == cid  # type: ignore |         assert send_chan.cid == cid  # type: ignore | ||||||
| 
 | 
 | ||||||
|         if 'stop' in msg: |         if 'error' in msg: | ||||||
|             log.debug(f"{send_chan} was terminated at remote end") |             ctx = getattr(recv_chan, '_ctx', None) | ||||||
|             # indicate to consumer that far end has stopped |             # if ctx: | ||||||
|             return await send_chan.aclose() |             #     ctx._error_from_remote_msg(msg) | ||||||
|  | 
 | ||||||
|  |         #     log.debug(f"{send_chan} was terminated at remote end") | ||||||
|  |         #     # indicate to consumer that far end has stopped | ||||||
|  |         #     return await send_chan.aclose() | ||||||
| 
 | 
 | ||||||
|         try: |         try: | ||||||
|             log.debug(f"Delivering {msg} from {actorid} to caller {cid}") |             log.debug(f"Delivering {msg} from {chan.uid} to caller {cid}") | ||||||
|             # maintain backpressure |             # maintain backpressure | ||||||
|             await send_chan.send(msg) |             await send_chan.send(msg) | ||||||
| 
 | 
 | ||||||
|         except trio.BrokenResourceError: |         except trio.BrokenResourceError: | ||||||
|  |             # TODO: what is the right way to handle the case where the | ||||||
|  |             # local task has already sent a 'stop' / StopAsyncInteration | ||||||
|  |             # to the other side but and possibly has closed the local | ||||||
|  |             # feeder mem chan? Do we wait for some kind of ack or just | ||||||
|  |             # let this fail silently and bubble up (currently)? | ||||||
|  | 
 | ||||||
|             # XXX: local consumer has closed their side |             # XXX: local consumer has closed their side | ||||||
|             # so cancel the far end streaming task |             # so cancel the far end streaming task | ||||||
|             log.warning(f"{send_chan} consumer is already closed") |             log.warning(f"{send_chan} consumer is already closed") | ||||||
|  | @ -423,7 +564,9 @@ class Actor: | ||||||
|         self, |         self, | ||||||
|         actorid: Tuple[str, str], |         actorid: Tuple[str, str], | ||||||
|         cid: str |         cid: str | ||||||
|  | 
 | ||||||
|     ) -> Tuple[trio.abc.SendChannel, trio.abc.ReceiveChannel]: |     ) -> Tuple[trio.abc.SendChannel, trio.abc.ReceiveChannel]: | ||||||
|  | 
 | ||||||
|         log.debug(f"Getting result queue for {actorid} cid {cid}") |         log.debug(f"Getting result queue for {actorid} cid {cid}") | ||||||
|         try: |         try: | ||||||
|             send_chan, recv_chan = self._cids2qs[(actorid, cid)] |             send_chan, recv_chan = self._cids2qs[(actorid, cid)] | ||||||
|  | @ -476,24 +619,35 @@ class Actor: | ||||||
|                 # ``scope = Nursery.start()`` |                 # ``scope = Nursery.start()`` | ||||||
|                 task_status.started(loop_cs) |                 task_status.started(loop_cs) | ||||||
|                 async for msg in chan: |                 async for msg in chan: | ||||||
|  | 
 | ||||||
|                     if msg is None:  # loop terminate sentinel |                     if msg is None:  # loop terminate sentinel | ||||||
|  | 
 | ||||||
|                         log.debug( |                         log.debug( | ||||||
|                             f"Cancelling all tasks for {chan} from {chan.uid}") |                             f"Cancelling all tasks for {chan} from {chan.uid}") | ||||||
|                         for (channel, cid) in self._rpc_tasks: | 
 | ||||||
|  |                         for (channel, cid) in self._rpc_tasks.copy(): | ||||||
|                             if channel is chan: |                             if channel is chan: | ||||||
|                                 await self._cancel_task(cid, channel) |                                 await self._cancel_task(cid, channel) | ||||||
|  | 
 | ||||||
|  |                                 # close all consumer side task mem chans | ||||||
|  |                                 # send_chan, _ = self._cids2qs[(chan.uid, cid)] | ||||||
|  |                                 # assert send_chan.cid == cid  # type: ignore | ||||||
|  |                                 # await send_chan.aclose() | ||||||
|  | 
 | ||||||
|                         log.debug( |                         log.debug( | ||||||
|                                 f"Msg loop signalled to terminate for" |                                 f"Msg loop signalled to terminate for" | ||||||
|                                 f" {chan} from {chan.uid}") |                                 f" {chan} from {chan.uid}") | ||||||
|  | 
 | ||||||
|                         break |                         break | ||||||
| 
 | 
 | ||||||
|                     log.trace(   # type: ignore |                     log.transport(   # type: ignore | ||||||
|                         f"Received msg {msg} from {chan.uid}") |                         f"Received msg {msg} from {chan.uid}") | ||||||
| 
 | 
 | ||||||
|                     cid = msg.get('cid') |                     cid = msg.get('cid') | ||||||
|                     if cid: |                     if cid: | ||||||
|                         # deliver response to local caller/waiter |                         # deliver response to local caller/waiter | ||||||
|                         await self._push_result(chan, cid, msg) |                         await self._push_result(chan, cid, msg) | ||||||
|  | 
 | ||||||
|                         log.debug( |                         log.debug( | ||||||
|                             f"Waiting on next msg for {chan} from {chan.uid}") |                             f"Waiting on next msg for {chan} from {chan.uid}") | ||||||
|                         continue |                         continue | ||||||
|  | @ -554,7 +708,7 @@ class Actor: | ||||||
|                         else: |                         else: | ||||||
|                             # mark that we have ongoing rpc tasks |                             # mark that we have ongoing rpc tasks | ||||||
|                             self._ongoing_rpc_tasks = trio.Event() |                             self._ongoing_rpc_tasks = trio.Event() | ||||||
|                             log.info(f"RPC func is {func}") |                             log.runtime(f"RPC func is {func}") | ||||||
|                             # store cancel scope such that the rpc task can be |                             # store cancel scope such that the rpc task can be | ||||||
|                             # cancelled gracefully if requested |                             # cancelled gracefully if requested | ||||||
|                             self._rpc_tasks[(chan, cid)] = ( |                             self._rpc_tasks[(chan, cid)] = ( | ||||||
|  | @ -563,7 +717,7 @@ class Actor: | ||||||
|                         # self.cancel() was called so kill this msg loop |                         # self.cancel() was called so kill this msg loop | ||||||
|                         # and break out into ``_async_main()`` |                         # and break out into ``_async_main()`` | ||||||
|                         log.warning( |                         log.warning( | ||||||
|                             f"{self.uid} was remotely cancelled; " |                             f"Actor {self.uid} was remotely cancelled; " | ||||||
|                             "waiting on cancellation completion..") |                             "waiting on cancellation completion..") | ||||||
|                         await self._cancel_complete.wait() |                         await self._cancel_complete.wait() | ||||||
|                         loop_cs.cancel() |                         loop_cs.cancel() | ||||||
|  | @ -578,22 +732,35 @@ class Actor: | ||||||
|                     ) |                     ) | ||||||
|                     await self.cancel_rpc_tasks(chan) |                     await self.cancel_rpc_tasks(chan) | ||||||
| 
 | 
 | ||||||
|         except trio.ClosedResourceError: |         except ( | ||||||
|             log.error(f"{chan} form {chan.uid} broke") |             TransportClosed, | ||||||
|  |             trio.BrokenResourceError, | ||||||
|  |             # trio.ClosedResourceError | ||||||
|  |         ): | ||||||
|  |             # channels "breaking" is ok since we don't have a teardown | ||||||
|  |             # handshake for them (yet) and instead we simply bail out | ||||||
|  |             # of the message loop and expect the surrounding | ||||||
|  |             # caller's teardown sequence to clean up. | ||||||
|  |             log.warning(f"Channel from {chan.uid} closed abruptly") | ||||||
|  | 
 | ||||||
|         except (Exception, trio.MultiError) as err: |         except (Exception, trio.MultiError) as err: | ||||||
|             # ship any "internal" exception (i.e. one from internal machinery |             # ship any "internal" exception (i.e. one from internal machinery | ||||||
|             # not from an rpc task) to parent |             # not from an rpc task) to parent | ||||||
|             log.exception("Actor errored:") |             log.exception("Actor errored:") | ||||||
|             if self._parent_chan: |             if self._parent_chan: | ||||||
|                 await self._parent_chan.send(pack_error(err)) |                 await self._parent_chan.send(pack_error(err)) | ||||||
|             raise | 
 | ||||||
|             # if this is the `MainProcess` we expect the error broadcasting |             # if this is the `MainProcess` we expect the error broadcasting | ||||||
|             # above to trigger an error at consuming portal "checkpoints" |             # above to trigger an error at consuming portal "checkpoints" | ||||||
|  |             raise | ||||||
|  | 
 | ||||||
|         except trio.Cancelled: |         except trio.Cancelled: | ||||||
|             # debugging only |             # debugging only | ||||||
|             log.debug(f"Msg loop was cancelled for {chan}") |             log.debug(f"Msg loop was cancelled for {chan}") | ||||||
|             raise |             raise | ||||||
|  | 
 | ||||||
|         finally: |         finally: | ||||||
|  |             # msg debugging for when he machinery is brokey | ||||||
|             log.debug( |             log.debug( | ||||||
|                 f"Exiting msg loop for {chan} from {chan.uid} " |                 f"Exiting msg loop for {chan} from {chan.uid} " | ||||||
|                 f"with last msg:\n{msg}") |                 f"with last msg:\n{msg}") | ||||||
|  | @ -633,8 +800,22 @@ class Actor: | ||||||
|                 _state._runtime_vars.update(rvs) |                 _state._runtime_vars.update(rvs) | ||||||
| 
 | 
 | ||||||
|                 for attr, value in parent_data.items(): |                 for attr, value in parent_data.items(): | ||||||
|  | 
 | ||||||
|  |                     if attr == '_arb_addr': | ||||||
|  |                         # XXX: msgspec doesn't support serializing tuples | ||||||
|  |                         # so just cash manually here since it's what our | ||||||
|  |                         # internals expect. | ||||||
|  |                         self._arb_addr = tuple(value) | ||||||
|  | 
 | ||||||
|  |                     else: | ||||||
|                         setattr(self, attr, value) |                         setattr(self, attr, value) | ||||||
| 
 | 
 | ||||||
|  |                 # Disable sigint handling in children if NOT running in | ||||||
|  |                 # debug mode; we shouldn't need it thanks to our | ||||||
|  |                 # cancellation machinery. | ||||||
|  |                 # if '_debug_mode' not in rvs: | ||||||
|  |                 # signal.signal(signal.SIGINT, signal.SIG_IGN) | ||||||
|  | 
 | ||||||
|             return chan, accept_addr |             return chan, accept_addr | ||||||
| 
 | 
 | ||||||
|         except OSError:  # failed to connect |         except OSError:  # failed to connect | ||||||
|  | @ -1012,13 +1193,14 @@ class Actor: | ||||||
|         parlance. |         parlance. | ||||||
|         """ |         """ | ||||||
|         await chan.send(self.uid) |         await chan.send(self.uid) | ||||||
|         uid: Tuple[str, str] = await chan.recv() |         # breakpoint() | ||||||
|  |         uid: Tuple[str, str] = tuple(await chan.recv()) | ||||||
| 
 | 
 | ||||||
|         if not isinstance(uid, tuple): |         # if not isinstance(uid, tuple): | ||||||
|             raise ValueError(f"{uid} is not a valid uid?!") |         #     raise ValueError(f"{uid} is not a valid uid?!") | ||||||
| 
 | 
 | ||||||
|         chan.uid = uid |         chan.uid = uid | ||||||
|         log.info(f"Handshake with actor {uid}@{chan.raddr} complete") |         log.runtime(f"Handshake with actor {uid}@{chan.raddr} complete") | ||||||
|         return uid |         return uid | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -1082,8 +1264,9 @@ class Arbiter(Actor): | ||||||
|     async def register_actor( |     async def register_actor( | ||||||
|         self, uid: Tuple[str, str], sockaddr: Tuple[str, int] |         self, uid: Tuple[str, str], sockaddr: Tuple[str, int] | ||||||
|     ) -> None: |     ) -> None: | ||||||
|  |         uid = tuple(uid) | ||||||
|         name, uuid = uid |         name, uuid = uid | ||||||
|         self._registry[uid] = sockaddr |         self._registry[uid] = tuple(sockaddr) | ||||||
| 
 | 
 | ||||||
|         # pop and signal all waiter events |         # pop and signal all waiter events | ||||||
|         events = self._waiters.pop(name, ()) |         events = self._waiters.pop(name, ()) | ||||||
|  | @ -1093,4 +1276,4 @@ class Arbiter(Actor): | ||||||
|                 event.set() |                 event.set() | ||||||
| 
 | 
 | ||||||
|     async def unregister_actor(self, uid: Tuple[str, str]) -> None: |     async def unregister_actor(self, uid: Tuple[str, str]) -> None: | ||||||
|         self._registry.pop(uid) |         self._registry.pop(tuple(uid)) | ||||||
|  |  | ||||||
|  | @ -1,13 +1,13 @@ | ||||||
| """ | """ | ||||||
| Multi-core debugging for da peeps! | Multi-core debugging for da peeps! | ||||||
|  | 
 | ||||||
| """ | """ | ||||||
| import bdb | import bdb | ||||||
| import sys | import sys | ||||||
| from functools import partial | from functools import partial | ||||||
| from contextlib import asynccontextmanager | from contextlib import asynccontextmanager | ||||||
| from typing import Awaitable, Tuple, Optional, Callable, AsyncIterator | from typing import Tuple, Optional, Callable, AsyncIterator | ||||||
| 
 | 
 | ||||||
| from async_generator import aclosing |  | ||||||
| import tractor | import tractor | ||||||
| import trio | import trio | ||||||
| 
 | 
 | ||||||
|  | @ -31,14 +31,22 @@ log = get_logger(__name__) | ||||||
| 
 | 
 | ||||||
| __all__ = ['breakpoint', 'post_mortem'] | __all__ = ['breakpoint', 'post_mortem'] | ||||||
| 
 | 
 | ||||||
|  | 
 | ||||||
|  | # TODO: wrap all these in a static global class: ``DebugLock`` maybe? | ||||||
|  | 
 | ||||||
| # placeholder for function to set a ``trio.Event`` on debugger exit | # placeholder for function to set a ``trio.Event`` on debugger exit | ||||||
| _pdb_release_hook: Optional[Callable] = None | _pdb_release_hook: Optional[Callable] = None | ||||||
| 
 | 
 | ||||||
| # actor-wide variable pointing to current task name using debugger | # actor-wide variable pointing to current task name using debugger | ||||||
| _in_debug = False | _local_task_in_debug: Optional[str] = None | ||||||
|  | 
 | ||||||
|  | # actor tree-wide actor uid that supposedly has the tty lock | ||||||
|  | _global_actor_in_debug: Optional[Tuple[str, str]] = None | ||||||
| 
 | 
 | ||||||
| # lock in root actor preventing multi-access to local tty | # lock in root actor preventing multi-access to local tty | ||||||
| _debug_lock = trio.StrictFIFOLock() | _debug_lock: trio.StrictFIFOLock = trio.StrictFIFOLock() | ||||||
|  | _local_pdb_complete: Optional[trio.Event] = None | ||||||
|  | _no_remote_has_tty: Optional[trio.Event] = None | ||||||
| 
 | 
 | ||||||
| # XXX: set by the current task waiting on the root tty lock | # XXX: set by the current task waiting on the root tty lock | ||||||
| # and must be cancelled if this actor is cancelled via message | # and must be cancelled if this actor is cancelled via message | ||||||
|  | @ -61,19 +69,19 @@ class PdbwTeardown(pdbpp.Pdb): | ||||||
|     # TODO: figure out how to dissallow recursive .set_trace() entry |     # TODO: figure out how to dissallow recursive .set_trace() entry | ||||||
|     # since that'll cause deadlock for us. |     # since that'll cause deadlock for us. | ||||||
|     def set_continue(self): |     def set_continue(self): | ||||||
|         global _in_debug |  | ||||||
|         try: |         try: | ||||||
|             super().set_continue() |             super().set_continue() | ||||||
|         finally: |         finally: | ||||||
|             _in_debug = False |             global _local_task_in_debug | ||||||
|  |             _local_task_in_debug = None | ||||||
|             _pdb_release_hook() |             _pdb_release_hook() | ||||||
| 
 | 
 | ||||||
|     def set_quit(self): |     def set_quit(self): | ||||||
|         global _in_debug |  | ||||||
|         try: |         try: | ||||||
|             super().set_quit() |             super().set_quit() | ||||||
|         finally: |         finally: | ||||||
|             _in_debug = False |             global _local_task_in_debug | ||||||
|  |             _local_task_in_debug = None | ||||||
|             _pdb_release_hook() |             _pdb_release_hook() | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -102,7 +110,7 @@ class PdbwTeardown(pdbpp.Pdb): | ||||||
| 
 | 
 | ||||||
| #     async with aclosing(async_stdin): | #     async with aclosing(async_stdin): | ||||||
| #         async for msg in async_stdin: | #         async for msg in async_stdin: | ||||||
| #             log.trace(f"Stdin input:\n{msg}") | #             log.runtime(f"Stdin input:\n{msg}") | ||||||
| #             # encode to bytes | #             # encode to bytes | ||||||
| #             bmsg = str.encode(msg) | #             bmsg = str.encode(msg) | ||||||
| 
 | 
 | ||||||
|  | @ -116,20 +124,72 @@ class PdbwTeardown(pdbpp.Pdb): | ||||||
| 
 | 
 | ||||||
| @asynccontextmanager | @asynccontextmanager | ||||||
| async def _acquire_debug_lock(uid: Tuple[str, str]) -> AsyncIterator[None]: | async def _acquire_debug_lock(uid: Tuple[str, str]) -> AsyncIterator[None]: | ||||||
|     """Acquire a actor local FIFO lock meant to mutex entry to a local |     '''Acquire a actor local FIFO lock meant to mutex entry to a local | ||||||
|     debugger entry point to avoid tty clobbering by multiple processes. |     debugger entry point to avoid tty clobbering a global root process. | ||||||
|     """ | 
 | ||||||
|  |     ''' | ||||||
|  |     global _debug_lock, _global_actor_in_debug, _no_remote_has_tty | ||||||
|  | 
 | ||||||
|     task_name = trio.lowlevel.current_task().name |     task_name = trio.lowlevel.current_task().name | ||||||
|  | 
 | ||||||
|  |     log.pdb( | ||||||
|  |         f"Attempting to acquire TTY lock, remote task: {task_name}:{uid}" | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     we_acquired = False | ||||||
|  | 
 | ||||||
|  |     if _no_remote_has_tty is None: | ||||||
|  |         # mark the tty lock as being in use so that the runtime | ||||||
|  |         # can try to avoid clobbering any connection from a child | ||||||
|  |         # that's currently relying on it. | ||||||
|  |         _no_remote_has_tty = trio.Event() | ||||||
|  | 
 | ||||||
|     try: |     try: | ||||||
|         log.debug( |         log.debug( | ||||||
|             f"Attempting to acquire TTY lock, remote task: {task_name}:{uid}") |             f"entering lock checkpoint, remote task: {task_name}:{uid}" | ||||||
|  |         ) | ||||||
|  |         # with trio.CancelScope(shield=True): | ||||||
|  |         we_acquired = True | ||||||
|         await _debug_lock.acquire() |         await _debug_lock.acquire() | ||||||
| 
 | 
 | ||||||
|  |         # we_acquired = True | ||||||
|  | 
 | ||||||
|  |         _global_actor_in_debug = uid | ||||||
|         log.debug(f"TTY lock acquired, remote task: {task_name}:{uid}") |         log.debug(f"TTY lock acquired, remote task: {task_name}:{uid}") | ||||||
|         yield | 
 | ||||||
|  |         # NOTE: critical section! | ||||||
|  |         # this yield is unshielded. | ||||||
|  |         # IF we received a cancel during the shielded lock | ||||||
|  |         # entry of some next-in-queue requesting task, | ||||||
|  |         # then the resumption here will result in that | ||||||
|  |         # Cancelled being raised to our caller below! | ||||||
|  | 
 | ||||||
|  |         # in this case the finally below should trigger | ||||||
|  |         # and the surrounding calle side context should cancel | ||||||
|  |         # normally relaying back to the caller. | ||||||
|  | 
 | ||||||
|  |         yield _debug_lock | ||||||
| 
 | 
 | ||||||
|     finally: |     finally: | ||||||
|  |         # if _global_actor_in_debug == uid: | ||||||
|  |         if we_acquired and _debug_lock.locked(): | ||||||
|             _debug_lock.release() |             _debug_lock.release() | ||||||
|  | 
 | ||||||
|  |         # IFF there are no more requesting tasks queued up fire, the | ||||||
|  |         # "tty-unlocked" event thereby alerting any monitors of the lock that | ||||||
|  |         # we are now back in the "tty unlocked" state. This is basically | ||||||
|  |         # and edge triggered signal around an empty queue of sub-actor | ||||||
|  |         # tasks that may have tried to acquire the lock. | ||||||
|  |         stats = _debug_lock.statistics() | ||||||
|  |         if ( | ||||||
|  |             not stats.owner | ||||||
|  |         ): | ||||||
|  |             log.pdb(f"No more tasks waiting on tty lock! says {uid}") | ||||||
|  |             _no_remote_has_tty.set() | ||||||
|  |             _no_remote_has_tty = None | ||||||
|  | 
 | ||||||
|  |         _global_actor_in_debug = None | ||||||
|  | 
 | ||||||
|         log.debug(f"TTY lock released, remote task: {task_name}:{uid}") |         log.debug(f"TTY lock released, remote task: {task_name}:{uid}") | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -144,78 +204,142 @@ async def _acquire_debug_lock(uid: Tuple[str, str]) -> AsyncIterator[None]: | ||||||
| #         signal.signal(signal.SIGINT, prior_handler) | #         signal.signal(signal.SIGINT, prior_handler) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @tractor.context | ||||||
| async def _hijack_stdin_relay_to_child( | async def _hijack_stdin_relay_to_child( | ||||||
|     subactor_uid: Tuple[str, str] |  | ||||||
| ) -> AsyncIterator[str]: |  | ||||||
|     # TODO: when we get to true remote debugging |  | ||||||
|     # this will deliver stdin data |  | ||||||
|     log.warning(f"Actor {subactor_uid} is WAITING on stdin hijack lock") |  | ||||||
|     async with _acquire_debug_lock(subactor_uid): |  | ||||||
|         log.warning(f"Actor {subactor_uid} ACQUIRED stdin hijack lock") |  | ||||||
| 
 | 
 | ||||||
|         # with _disable_sigint(): |     ctx: tractor.Context, | ||||||
|  |     subactor_uid: Tuple[str, str] | ||||||
|  | 
 | ||||||
|  | ) -> str: | ||||||
|  |     '''Hijack the tty in the root process of an actor tree such that | ||||||
|  |     the pdbpp debugger console can be allocated to a sub-actor for repl | ||||||
|  |     bossing. | ||||||
|  | 
 | ||||||
|  |     ''' | ||||||
|  |     task_name = trio.lowlevel.current_task().name | ||||||
|  | 
 | ||||||
|  |     # TODO: when we get to true remote debugging | ||||||
|  |     # this will deliver stdin data? | ||||||
|  | 
 | ||||||
|  |     log.debug( | ||||||
|  |         "Attempting to acquire TTY lock\n" | ||||||
|  |         f"remote task: {task_name}:{subactor_uid}" | ||||||
|  |     ) | ||||||
|  | 
 | ||||||
|  |     log.debug(f"Actor {subactor_uid} is WAITING on stdin hijack lock") | ||||||
|  | 
 | ||||||
|  |     with trio.CancelScope(shield=True): | ||||||
|  | 
 | ||||||
|  |         async with _acquire_debug_lock(subactor_uid): | ||||||
| 
 | 
 | ||||||
|             # indicate to child that we've locked stdio |             # indicate to child that we've locked stdio | ||||||
|         yield 'Locked' |             await ctx.started('Locked') | ||||||
|  |             log.pdb(  # type: ignore | ||||||
|  |                 f"Actor {subactor_uid} ACQUIRED stdin hijack lock") | ||||||
| 
 | 
 | ||||||
|         # wait for cancellation of stream by child |             # wait for unlock pdb by child | ||||||
|         # indicating debugger is dis-engaged |             async with ctx.open_stream() as stream: | ||||||
|         await trio.sleep_forever() |                 try: | ||||||
|  |                     assert await stream.receive() == 'pdb_unlock' | ||||||
| 
 | 
 | ||||||
|     log.debug(f"Actor {subactor_uid} RELEASED stdin hijack lock") |                 except trio.BrokenResourceError: | ||||||
|  |                     # XXX: there may be a race with the portal teardown | ||||||
|  |                     # with the calling actor which we can safely ignore | ||||||
|  |                     # the alternative would be sending an ack message | ||||||
|  |                     # and allowing the client to wait for us to teardown | ||||||
|  |                     # first? | ||||||
|  |                     pass | ||||||
|  | 
 | ||||||
|  |     log.debug( | ||||||
|  |         f"TTY lock released, remote task: {task_name}:{subactor_uid}") | ||||||
|  | 
 | ||||||
|  |     return "pdb_unlock_complete" | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| # XXX: We only make this sync in case someone wants to | async def _breakpoint( | ||||||
| # overload the ``breakpoint()`` built-in. | 
 | ||||||
| def _breakpoint(debug_func) -> Awaitable[None]: |     debug_func, | ||||||
|     """``tractor`` breakpoint entry for engaging pdb machinery | 
 | ||||||
|     in subactors. |     # TODO: | ||||||
|     """ |     # shield: bool = False | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  |     '''``tractor`` breakpoint entry for engaging pdb machinery | ||||||
|  |     in the root or a subactor. | ||||||
|  | 
 | ||||||
|  |     ''' | ||||||
|  |     # TODO: is it possible to debug a trio.Cancelled except block? | ||||||
|  |     # right now it seems like we can kinda do with by shielding | ||||||
|  |     # around ``tractor.breakpoint()`` but not if we move the shielded | ||||||
|  |     # scope here??? | ||||||
|  |     # with trio.CancelScope(shield=shield): | ||||||
|  | 
 | ||||||
|     actor = tractor.current_actor() |     actor = tractor.current_actor() | ||||||
|     do_unlock = trio.Event() |     task_name = trio.lowlevel.current_task().name | ||||||
|  | 
 | ||||||
|  |     global _local_pdb_complete, _pdb_release_hook | ||||||
|  |     global _local_task_in_debug, _global_actor_in_debug | ||||||
|  | 
 | ||||||
|  |     await trio.lowlevel.checkpoint() | ||||||
| 
 | 
 | ||||||
|     async def wait_for_parent_stdin_hijack( |     async def wait_for_parent_stdin_hijack( | ||||||
|         task_status=trio.TASK_STATUS_IGNORED |         task_status=trio.TASK_STATUS_IGNORED | ||||||
|     ): |     ): | ||||||
|         global _debugger_request_cs |         global _debugger_request_cs | ||||||
|         with trio.CancelScope() as cs: | 
 | ||||||
|  |         with trio.CancelScope(shield=True) as cs: | ||||||
|             _debugger_request_cs = cs |             _debugger_request_cs = cs | ||||||
|  | 
 | ||||||
|             try: |             try: | ||||||
|                 async with get_root() as portal: |                 async with get_root() as portal: | ||||||
|                         async with portal.open_stream_from( | 
 | ||||||
|  |                     log.error('got portal') | ||||||
|  | 
 | ||||||
|  |                     # this syncs to child's ``Context.started()`` call. | ||||||
|  |                     async with portal.open_context( | ||||||
|  | 
 | ||||||
|                         tractor._debug._hijack_stdin_relay_to_child, |                         tractor._debug._hijack_stdin_relay_to_child, | ||||||
|                         subactor_uid=actor.uid, |                         subactor_uid=actor.uid, | ||||||
|                         ) as stream: |  | ||||||
| 
 | 
 | ||||||
|                                 # block until first yield above |                     ) as (ctx, val): | ||||||
|                                 async for val in stream: |  | ||||||
| 
 | 
 | ||||||
|  |                         log.error('locked context') | ||||||
|                         assert val == 'Locked' |                         assert val == 'Locked' | ||||||
|  | 
 | ||||||
|  |                         async with ctx.open_stream() as stream: | ||||||
|  | 
 | ||||||
|  |                             log.error('opened stream') | ||||||
|  |                             # unblock local caller | ||||||
|                             task_status.started() |                             task_status.started() | ||||||
| 
 | 
 | ||||||
|                                     # with trio.CancelScope(shield=True): |                             try: | ||||||
|                                     await do_unlock.wait() |                                 await _local_pdb_complete.wait() | ||||||
|  | 
 | ||||||
|  |                             finally: | ||||||
|  |                                 # TODO: shielding currently can cause hangs... | ||||||
|  |                                 with trio.CancelScope(shield=True): | ||||||
|  |                                     await stream.send('pdb_unlock') | ||||||
|  | 
 | ||||||
|  |                             # sync with callee termination | ||||||
|  |                             assert await ctx.result() == "pdb_unlock_complete" | ||||||
|  | 
 | ||||||
|  |             except tractor.ContextCancelled: | ||||||
|  |                 log.warning('Root actor cancelled debug lock') | ||||||
| 
 | 
 | ||||||
|                                     # trigger cancellation of remote stream |  | ||||||
|                                     break |  | ||||||
|             finally: |             finally: | ||||||
|                 log.debug(f"Exiting debugger for actor {actor}") |                 log.debug(f"Exiting debugger for actor {actor}") | ||||||
|                 global _in_debug |                 global _local_task_in_debug | ||||||
|                 _in_debug = False |                 _local_task_in_debug = None | ||||||
|                 log.debug(f"Child {actor} released parent stdio lock") |                 log.debug(f"Child {actor} released parent stdio lock") | ||||||
| 
 | 
 | ||||||
|     async def _bp(): |     if not _local_pdb_complete or _local_pdb_complete.is_set(): | ||||||
|         """Async breakpoint which schedules a parent stdio lock, and once complete |         _local_pdb_complete = trio.Event() | ||||||
|         enters the ``pdbpp`` debugging console. |  | ||||||
|         """ |  | ||||||
|         task_name = trio.lowlevel.current_task().name |  | ||||||
| 
 |  | ||||||
|         global _in_debug |  | ||||||
| 
 | 
 | ||||||
|     # TODO: need a more robust check for the "root" actor |     # TODO: need a more robust check for the "root" actor | ||||||
|     if actor._parent_chan and not is_root_process(): |     if actor._parent_chan and not is_root_process(): | ||||||
|             if _in_debug: | 
 | ||||||
|                 if _in_debug == task_name: |         if _local_task_in_debug: | ||||||
|  |             if _local_task_in_debug == task_name: | ||||||
|                 # this task already has the lock and is |                 # this task already has the lock and is | ||||||
|                 # likely recurrently entering a breakpoint |                 # likely recurrently entering a breakpoint | ||||||
|                 return |                 return | ||||||
|  | @ -223,40 +347,72 @@ def _breakpoint(debug_func) -> Awaitable[None]: | ||||||
|             # if **this** actor is already in debug mode block here |             # if **this** actor is already in debug mode block here | ||||||
|             # waiting for the control to be released - this allows |             # waiting for the control to be released - this allows | ||||||
|             # support for recursive entries to `tractor.breakpoint()` |             # support for recursive entries to `tractor.breakpoint()` | ||||||
|                 log.warning( |             log.warning(f"{actor.uid} already has a debug lock, waiting...") | ||||||
|                     f"Actor {actor.uid} already has a debug lock, waiting...") |  | ||||||
|                 await do_unlock.wait() |  | ||||||
|                 await trio.sleep(0.1) |  | ||||||
| 
 | 
 | ||||||
|             # assign unlock callback for debugger teardown hooks |             await _local_pdb_complete.wait() | ||||||
|             global _pdb_release_hook |             await trio.sleep(0.1) | ||||||
|             _pdb_release_hook = do_unlock.set |  | ||||||
| 
 | 
 | ||||||
|         # mark local actor as "in debug mode" to avoid recurrent |         # mark local actor as "in debug mode" to avoid recurrent | ||||||
|         # entries/requests to the root process |         # entries/requests to the root process | ||||||
|             _in_debug = task_name |         _local_task_in_debug = task_name | ||||||
|  | 
 | ||||||
|  |         # assign unlock callback for debugger teardown hooks | ||||||
|  |         _pdb_release_hook = _local_pdb_complete.set | ||||||
| 
 | 
 | ||||||
|         # this **must** be awaited by the caller and is done using the |         # this **must** be awaited by the caller and is done using the | ||||||
|         # root nursery so that the debugger can continue to run without |         # root nursery so that the debugger can continue to run without | ||||||
|         # being restricted by the scope of a new task nursery. |         # being restricted by the scope of a new task nursery. | ||||||
|  | 
 | ||||||
|  |         # NOTE: if we want to debug a trio.Cancelled triggered exception | ||||||
|  |         # we have to figure out how to avoid having the service nursery | ||||||
|  |         # cancel on this task start? I *think* this works below? | ||||||
|  |         # actor._service_n.cancel_scope.shield = shield | ||||||
|  |         with trio.CancelScope(shield=True): | ||||||
|             await actor._service_n.start(wait_for_parent_stdin_hijack) |             await actor._service_n.start(wait_for_parent_stdin_hijack) | ||||||
|  |         # actor._service_n.cancel_scope.shield = False | ||||||
| 
 | 
 | ||||||
|     elif is_root_process(): |     elif is_root_process(): | ||||||
|  | 
 | ||||||
|         # we also wait in the root-parent for any child that |         # we also wait in the root-parent for any child that | ||||||
|         # may have the tty locked prior |         # may have the tty locked prior | ||||||
|             if _debug_lock.locked():  # root process already has it; ignore |         global _debug_lock | ||||||
|  | 
 | ||||||
|  |         # TODO: wait, what about multiple root tasks acquiring | ||||||
|  |         # it though.. shrug? | ||||||
|  |         # root process (us) already has it; ignore | ||||||
|  |         if _global_actor_in_debug == actor.uid: | ||||||
|             return |             return | ||||||
|  | 
 | ||||||
|  |         # XXX: since we need to enter pdb synchronously below, | ||||||
|  |         # we have to release the lock manually from pdb completion | ||||||
|  |         # callbacks. Can't think of a nicer way then this atm. | ||||||
|  |         if _debug_lock.locked(): | ||||||
|  |             log.warning( | ||||||
|  |                 'Root actor attempting to acquire active tty lock' | ||||||
|  |                 f' owned by {_global_actor_in_debug}') | ||||||
|  | 
 | ||||||
|         await _debug_lock.acquire() |         await _debug_lock.acquire() | ||||||
|             _pdb_release_hook = _debug_lock.release | 
 | ||||||
|  |         _global_actor_in_debug = actor.uid | ||||||
|  |         _local_task_in_debug = task_name | ||||||
|  | 
 | ||||||
|  |         # the lock must be released on pdb completion | ||||||
|  |         def teardown(): | ||||||
|  |             global _local_pdb_complete, _debug_lock | ||||||
|  |             global _global_actor_in_debug, _local_task_in_debug | ||||||
|  | 
 | ||||||
|  |             _debug_lock.release() | ||||||
|  |             _global_actor_in_debug = None | ||||||
|  |             _local_task_in_debug = None | ||||||
|  |             _local_pdb_complete.set() | ||||||
|  | 
 | ||||||
|  |         _pdb_release_hook = teardown | ||||||
| 
 | 
 | ||||||
|     # block here one (at the appropriate frame *up* where |     # block here one (at the appropriate frame *up* where | ||||||
|     # ``breakpoint()`` was awaited and begin handling stdio |     # ``breakpoint()`` was awaited and begin handling stdio | ||||||
|     log.debug("Entering the synchronous world of pdb") |     log.debug("Entering the synchronous world of pdb") | ||||||
|     debug_func(actor) |     debug_func(actor) | ||||||
| 
 | 
 | ||||||
|     # user code **must** await this! |  | ||||||
|     return _bp() |  | ||||||
| 
 |  | ||||||
| 
 | 
 | ||||||
| def _mk_pdb(): | def _mk_pdb(): | ||||||
|     # XXX: setting these flags on the pdb instance are absolutely |     # XXX: setting these flags on the pdb instance are absolutely | ||||||
|  | @ -276,7 +432,7 @@ def _set_trace(actor=None): | ||||||
|     pdb = _mk_pdb() |     pdb = _mk_pdb() | ||||||
| 
 | 
 | ||||||
|     if actor is not None: |     if actor is not None: | ||||||
|         log.runtime(f"\nAttaching pdb to actor: {actor.uid}\n") |         log.pdb(f"\nAttaching pdb to actor: {actor.uid}\n")  # type: ignore | ||||||
| 
 | 
 | ||||||
|         pdb.set_trace( |         pdb.set_trace( | ||||||
|             # start 2 levels up in user code |             # start 2 levels up in user code | ||||||
|  | @ -285,8 +441,8 @@ def _set_trace(actor=None): | ||||||
| 
 | 
 | ||||||
|     else: |     else: | ||||||
|         # we entered the global ``breakpoint()`` built-in from sync code |         # we entered the global ``breakpoint()`` built-in from sync code | ||||||
|         global _in_debug, _pdb_release_hook |         global _local_task_in_debug, _pdb_release_hook | ||||||
|         _in_debug = 'sync' |         _local_task_in_debug = 'sync' | ||||||
| 
 | 
 | ||||||
|         def nuttin(): |         def nuttin(): | ||||||
|             pass |             pass | ||||||
|  | @ -306,7 +462,7 @@ breakpoint = partial( | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _post_mortem(actor): | def _post_mortem(actor): | ||||||
|     log.runtime(f"\nAttaching to pdb in crashed actor: {actor.uid}\n") |     log.pdb(f"\nAttaching to pdb in crashed actor: {actor.uid}\n") | ||||||
|     pdb = _mk_pdb() |     pdb = _mk_pdb() | ||||||
| 
 | 
 | ||||||
|     # custom Pdb post-mortem entry |     # custom Pdb post-mortem entry | ||||||
|  |  | ||||||
|  | @ -16,12 +16,14 @@ from ._state import current_actor, _runtime_vars | ||||||
| 
 | 
 | ||||||
| @asynccontextmanager | @asynccontextmanager | ||||||
| async def get_arbiter( | async def get_arbiter( | ||||||
|  | 
 | ||||||
|     host: str, |     host: str, | ||||||
|     port: int, |     port: int, | ||||||
|  | 
 | ||||||
| ) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]: | ) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]: | ||||||
|     """Return a portal instance connected to a local or remote |     '''Return a portal instance connected to a local or remote | ||||||
|     arbiter. |     arbiter. | ||||||
|     """ |     ''' | ||||||
|     actor = current_actor() |     actor = current_actor() | ||||||
| 
 | 
 | ||||||
|     if not actor: |     if not actor: | ||||||
|  | @ -33,7 +35,9 @@ async def get_arbiter( | ||||||
|         yield LocalPortal(actor, Channel((host, port))) |         yield LocalPortal(actor, Channel((host, port))) | ||||||
|     else: |     else: | ||||||
|         async with _connect_chan(host, port) as chan: |         async with _connect_chan(host, port) as chan: | ||||||
|  | 
 | ||||||
|             async with open_portal(chan) as arb_portal: |             async with open_portal(chan) as arb_portal: | ||||||
|  | 
 | ||||||
|                 yield arb_portal |                 yield arb_portal | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -41,8 +45,10 @@ async def get_arbiter( | ||||||
| async def get_root( | async def get_root( | ||||||
|     **kwargs, |     **kwargs, | ||||||
| ) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]: | ) -> typing.AsyncGenerator[Union[Portal, LocalPortal], None]: | ||||||
|  | 
 | ||||||
|     host, port = _runtime_vars['_root_mailbox'] |     host, port = _runtime_vars['_root_mailbox'] | ||||||
|     assert host is not None |     assert host is not None | ||||||
|  | 
 | ||||||
|     async with _connect_chan(host, port) as chan: |     async with _connect_chan(host, port) as chan: | ||||||
|         async with open_portal(chan, **kwargs) as portal: |         async with open_portal(chan, **kwargs) as portal: | ||||||
|             yield portal |             yield portal | ||||||
|  | @ -60,12 +66,16 @@ async def find_actor( | ||||||
|     """ |     """ | ||||||
|     actor = current_actor() |     actor = current_actor() | ||||||
|     async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal: |     async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal: | ||||||
|  | 
 | ||||||
|         sockaddr = await arb_portal.run_from_ns('self', 'find_actor', name=name) |         sockaddr = await arb_portal.run_from_ns('self', 'find_actor', name=name) | ||||||
|  | 
 | ||||||
|         # TODO: return portals to all available actors - for now just |         # TODO: return portals to all available actors - for now just | ||||||
|         # the last one that registered |         # the last one that registered | ||||||
|         if name == 'arbiter' and actor.is_arbiter: |         if name == 'arbiter' and actor.is_arbiter: | ||||||
|             raise RuntimeError("The current actor is the arbiter") |             raise RuntimeError("The current actor is the arbiter") | ||||||
|  | 
 | ||||||
|         elif sockaddr: |         elif sockaddr: | ||||||
|  | 
 | ||||||
|             async with _connect_chan(*sockaddr) as chan: |             async with _connect_chan(*sockaddr) as chan: | ||||||
|                 async with open_portal(chan) as portal: |                 async with open_portal(chan) as portal: | ||||||
|                     yield portal |                     yield portal | ||||||
|  | @ -83,9 +93,12 @@ async def wait_for_actor( | ||||||
|     A portal to the first registered actor is returned. |     A portal to the first registered actor is returned. | ||||||
|     """ |     """ | ||||||
|     actor = current_actor() |     actor = current_actor() | ||||||
|  | 
 | ||||||
|     async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal: |     async with get_arbiter(*arbiter_sockaddr or actor._arb_addr) as arb_portal: | ||||||
|  | 
 | ||||||
|         sockaddrs = await arb_portal.run_from_ns('self', 'wait_for_actor', name=name) |         sockaddrs = await arb_portal.run_from_ns('self', 'wait_for_actor', name=name) | ||||||
|         sockaddr = sockaddrs[-1] |         sockaddr = sockaddrs[-1] | ||||||
|  | 
 | ||||||
|         async with _connect_chan(*sockaddr) as chan: |         async with _connect_chan(*sockaddr) as chan: | ||||||
|             async with open_portal(chan) as portal: |             async with open_portal(chan) as portal: | ||||||
|                 yield portal |                 yield portal | ||||||
|  |  | ||||||
|  | @ -3,7 +3,6 @@ Sub-process entry points. | ||||||
| """ | """ | ||||||
| from functools import partial | from functools import partial | ||||||
| from typing import Tuple, Any | from typing import Tuple, Any | ||||||
| import signal |  | ||||||
| 
 | 
 | ||||||
| import trio  # type: ignore | import trio  # type: ignore | ||||||
| 
 | 
 | ||||||
|  | @ -15,7 +14,7 @@ log = get_logger(__name__) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _mp_main( | def _mp_main( | ||||||
|     actor: 'Actor',  # type: ignore |     actor: 'Actor',  # type: ignore # noqa | ||||||
|     accept_addr: Tuple[str, int], |     accept_addr: Tuple[str, int], | ||||||
|     forkserver_info: Tuple[Any, Any, Any, Any, Any], |     forkserver_info: Tuple[Any, Any, Any, Any, Any], | ||||||
|     start_method: str, |     start_method: str, | ||||||
|  | @ -54,16 +53,13 @@ def _mp_main( | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def _trio_main( | def _trio_main( | ||||||
|     actor: 'Actor',  # type: ignore |     actor: 'Actor',  # type: ignore # noqa | ||||||
|     *, |     *, | ||||||
|     parent_addr: Tuple[str, int] = None, |     parent_addr: Tuple[str, int] = None, | ||||||
| ) -> None: | ) -> None: | ||||||
|     """Entry point for a `trio_run_in_process` subactor. |     """Entry point for a `trio_run_in_process` subactor. | ||||||
|     """ |  | ||||||
|     # Disable sigint handling in children; |  | ||||||
|     # we don't need it thanks to our cancellation machinery. |  | ||||||
|     signal.signal(signal.SIGINT, signal.SIG_IGN) |  | ||||||
| 
 | 
 | ||||||
|  |     """ | ||||||
|     log.info(f"Started new trio process for {actor.uid}") |     log.info(f"Started new trio process for {actor.uid}") | ||||||
| 
 | 
 | ||||||
|     if actor.loglevel is not None: |     if actor.loglevel is not None: | ||||||
|  |  | ||||||
|  | @ -1,7 +1,7 @@ | ||||||
| """ | """ | ||||||
| Our classy exception set. | Our classy exception set. | ||||||
| """ | """ | ||||||
| from typing import Dict, Any | from typing import Dict, Any, Optional | ||||||
| import importlib | import importlib | ||||||
| import builtins | import builtins | ||||||
| import traceback | import traceback | ||||||
|  | @ -15,17 +15,16 @@ _this_mod = importlib.import_module(__name__) | ||||||
| class RemoteActorError(Exception): | class RemoteActorError(Exception): | ||||||
|     # TODO: local recontruction of remote exception deats |     # TODO: local recontruction of remote exception deats | ||||||
|     "Remote actor exception bundled locally" |     "Remote actor exception bundled locally" | ||||||
|     def __init__(self, message, type_str, **msgdata) -> None: |     def __init__( | ||||||
|         super().__init__(message) |         self, | ||||||
|         for ns in [builtins, _this_mod, trio]: |         message: str, | ||||||
|             try: |         suberror_type: Optional[Exception] = None, | ||||||
|                 self.type = getattr(ns, type_str) |         **msgdata | ||||||
|                 break |  | ||||||
|             except AttributeError: |  | ||||||
|                 continue |  | ||||||
|         else: |  | ||||||
|             self.type = Exception |  | ||||||
| 
 | 
 | ||||||
|  |     ) -> None: | ||||||
|  |         super().__init__(message) | ||||||
|  | 
 | ||||||
|  |         self.type = suberror_type | ||||||
|         self.msgdata = msgdata |         self.msgdata = msgdata | ||||||
| 
 | 
 | ||||||
|     # TODO: a trio.MultiError.catch like context manager |     # TODO: a trio.MultiError.catch like context manager | ||||||
|  | @ -38,6 +37,10 @@ class InternalActorError(RemoteActorError): | ||||||
|     """ |     """ | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | class ContextCancelled(RemoteActorError): | ||||||
|  |     "Inter-actor task context cancelled itself on the callee side." | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| class NoResult(RuntimeError): | class NoResult(RuntimeError): | ||||||
|     "No final result is expected for this actor" |     "No final result is expected for this actor" | ||||||
| 
 | 
 | ||||||
|  | @ -50,13 +53,22 @@ class NoRuntime(RuntimeError): | ||||||
|     "The root actor has not been initialized yet" |     "The root actor has not been initialized yet" | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def pack_error(exc: BaseException) -> Dict[str, Any]: | def pack_error( | ||||||
|  |     exc: BaseException, | ||||||
|  |     tb = None, | ||||||
|  | 
 | ||||||
|  | ) -> Dict[str, Any]: | ||||||
|     """Create an "error message" for tranmission over |     """Create an "error message" for tranmission over | ||||||
|     a channel (aka the wire). |     a channel (aka the wire). | ||||||
|     """ |     """ | ||||||
|  |     if tb: | ||||||
|  |         tb_str = ''.join(traceback.format_tb(tb)) | ||||||
|  |     else: | ||||||
|  |         tb_str = traceback.format_exc() | ||||||
|  | 
 | ||||||
|     return { |     return { | ||||||
|         'error': { |         'error': { | ||||||
|             'tb_str': traceback.format_exc(), |             'tb_str': tb_str, | ||||||
|             'type_str': type(exc).__name__, |             'type_str': type(exc).__name__, | ||||||
|         } |         } | ||||||
|     } |     } | ||||||
|  | @ -70,12 +82,35 @@ def unpack_error( | ||||||
|     """Unpack an 'error' message from the wire |     """Unpack an 'error' message from the wire | ||||||
|     into a local ``RemoteActorError``. |     into a local ``RemoteActorError``. | ||||||
|     """ |     """ | ||||||
|     tb_str = msg['error'].get('tb_str', '') |     error = msg['error'] | ||||||
|     return err_type( | 
 | ||||||
|         f"{chan.uid}\n" + tb_str, |     tb_str = error.get('tb_str', '') | ||||||
|  |     message = f"{chan.uid}\n" + tb_str | ||||||
|  |     type_name = error['type_str'] | ||||||
|  |     suberror_type = Exception | ||||||
|  | 
 | ||||||
|  |     if type_name == 'ContextCancelled': | ||||||
|  |         err_type = ContextCancelled | ||||||
|  |         suberror_type = trio.Cancelled | ||||||
|  | 
 | ||||||
|  |     else:  # try to lookup a suitable local error type | ||||||
|  |         for ns in [builtins, _this_mod, trio]: | ||||||
|  |             try: | ||||||
|  |                 suberror_type = getattr(ns, type_name) | ||||||
|  |                 break | ||||||
|  |             except AttributeError: | ||||||
|  |                 continue | ||||||
|  | 
 | ||||||
|  |     exc = err_type( | ||||||
|  |         message, | ||||||
|  |         suberror_type=suberror_type, | ||||||
|  | 
 | ||||||
|  |         # unpack other fields into error type init | ||||||
|         **msg['error'], |         **msg['error'], | ||||||
|     ) |     ) | ||||||
| 
 | 
 | ||||||
|  |     return exc | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| def is_multi_cancelled(exc: BaseException) -> bool: | def is_multi_cancelled(exc: BaseException) -> bool: | ||||||
|     """Predicate to determine if a ``trio.MultiError`` contains only |     """Predicate to determine if a ``trio.MultiError`` contains only | ||||||
|  |  | ||||||
							
								
								
									
										143
									
								
								tractor/_ipc.py
								
								
								
								
							
							
						
						
									
										143
									
								
								tractor/_ipc.py
								
								
								
								
							|  | @ -1,16 +1,19 @@ | ||||||
| """ | """ | ||||||
| Inter-process comms abstractions | Inter-process comms abstractions | ||||||
| """ | """ | ||||||
|  | from functools import partial | ||||||
|  | import struct | ||||||
| import typing | import typing | ||||||
| from typing import Any, Tuple, Optional | from typing import Any, Tuple, Optional | ||||||
| from functools import partial |  | ||||||
| 
 | 
 | ||||||
|  | from tricycle import BufferedReceiveStream | ||||||
| import msgpack | import msgpack | ||||||
| import trio | import trio | ||||||
| from async_generator import asynccontextmanager | from async_generator import asynccontextmanager | ||||||
| 
 | 
 | ||||||
| from .log import get_logger | from .log import get_logger | ||||||
| log = get_logger('ipc') | from ._exceptions import TransportClosed | ||||||
|  | log = get_logger(__name__) | ||||||
| 
 | 
 | ||||||
| # :eyeroll: | # :eyeroll: | ||||||
| try: | try: | ||||||
|  | @ -21,21 +24,32 @@ except ImportError: | ||||||
|     Unpacker = partial(msgpack.Unpacker, strict_map_key=False) |     Unpacker = partial(msgpack.Unpacker, strict_map_key=False) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class MsgpackStream: | class MsgpackTCPStream: | ||||||
|     """A ``trio.SocketStream`` delivering ``msgpack`` formatted data. |     '''A ``trio.SocketStream`` delivering ``msgpack`` formatted data | ||||||
|     """ |     using ``msgpack-python``. | ||||||
|     def __init__(self, stream: trio.SocketStream) -> None: | 
 | ||||||
|  |     ''' | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         stream: trio.SocketStream, | ||||||
|  | 
 | ||||||
|  |     ) -> None: | ||||||
|  | 
 | ||||||
|         self.stream = stream |         self.stream = stream | ||||||
|         assert self.stream.socket |         assert self.stream.socket | ||||||
|  | 
 | ||||||
|         # should both be IP sockets |         # should both be IP sockets | ||||||
|         lsockname = stream.socket.getsockname() |         lsockname = stream.socket.getsockname() | ||||||
|         assert isinstance(lsockname, tuple) |         assert isinstance(lsockname, tuple) | ||||||
|         self._laddr = lsockname[:2] |         self._laddr = lsockname[:2] | ||||||
|  | 
 | ||||||
|         rsockname = stream.socket.getpeername() |         rsockname = stream.socket.getpeername() | ||||||
|         assert isinstance(rsockname, tuple) |         assert isinstance(rsockname, tuple) | ||||||
|         self._raddr = rsockname[:2] |         self._raddr = rsockname[:2] | ||||||
| 
 | 
 | ||||||
|  |         # start first entry to read loop | ||||||
|         self._agen = self._iter_packets() |         self._agen = self._iter_packets() | ||||||
|  | 
 | ||||||
|         self._send_lock = trio.StrictFIFOLock() |         self._send_lock = trio.StrictFIFOLock() | ||||||
| 
 | 
 | ||||||
|     async def _iter_packets(self) -> typing.AsyncGenerator[dict, None]: |     async def _iter_packets(self) -> typing.AsyncGenerator[dict, None]: | ||||||
|  | @ -46,16 +60,13 @@ class MsgpackStream: | ||||||
|             use_list=False, |             use_list=False, | ||||||
|         ) |         ) | ||||||
|         while True: |         while True: | ||||||
|             try: |  | ||||||
|             data = await self.stream.receive_some(2**10) |             data = await self.stream.receive_some(2**10) | ||||||
|             log.trace(f"received {data}")  # type: ignore |             log.trace(f"received {data}")  # type: ignore | ||||||
|             except trio.BrokenResourceError: |  | ||||||
|                 log.warning(f"Stream connection {self.raddr} broke") |  | ||||||
|                 return |  | ||||||
| 
 | 
 | ||||||
|             if data == b'': |             if data == b'': | ||||||
|                 log.debug(f"Stream connection {self.raddr} was closed") |                 raise TransportClosed( | ||||||
|                 return |                     f'transport {self} was already closed prior ro read' | ||||||
|  |                 ) | ||||||
| 
 | 
 | ||||||
|             unpacker.feed(data) |             unpacker.feed(data) | ||||||
|             for packet in unpacker: |             for packet in unpacker: | ||||||
|  | @ -73,7 +84,8 @@ class MsgpackStream: | ||||||
|     async def send(self, data: Any) -> None: |     async def send(self, data: Any) -> None: | ||||||
|         async with self._send_lock: |         async with self._send_lock: | ||||||
|             return await self.stream.send_all( |             return await self.stream.send_all( | ||||||
|                 msgpack.dumps(data, use_bin_type=True)) |                 msgpack.dumps(data, use_bin_type=True) | ||||||
|  |             ) | ||||||
| 
 | 
 | ||||||
|     async def recv(self) -> Any: |     async def recv(self) -> Any: | ||||||
|         return await self._agen.asend(None) |         return await self._agen.asend(None) | ||||||
|  | @ -85,33 +97,112 @@ class MsgpackStream: | ||||||
|         return self.stream.socket.fileno() != -1 |         return self.stream.socket.fileno() != -1 | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | class MsgspecTCPStream(MsgpackTCPStream): | ||||||
|  |     '''A ``trio.SocketStream`` delivering ``msgpack`` formatted data | ||||||
|  |     using ``msgspec``. | ||||||
|  | 
 | ||||||
|  |     ''' | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         stream: trio.SocketStream, | ||||||
|  |         prefix_size: int = 4, | ||||||
|  | 
 | ||||||
|  |     ) -> None: | ||||||
|  |         super().__init__(stream) | ||||||
|  |         self.recv_stream = BufferedReceiveStream(transport_stream=stream) | ||||||
|  |         self.prefix_size = prefix_size | ||||||
|  | 
 | ||||||
|  |         import msgspec | ||||||
|  | 
 | ||||||
|  |         # TODO: struct aware messaging coders | ||||||
|  |         self.encode = msgspec.Encoder().encode | ||||||
|  |         self.decode = msgspec.Decoder().decode  # dict[str, Any]) | ||||||
|  | 
 | ||||||
|  |     async def _iter_packets(self) -> typing.AsyncGenerator[dict, None]: | ||||||
|  |         """Yield packets from the underlying stream. | ||||||
|  |         """ | ||||||
|  | 
 | ||||||
|  |         while True: | ||||||
|  |             try: | ||||||
|  |                 header = await self.recv_stream.receive_exactly(4) | ||||||
|  | 
 | ||||||
|  |             except (ValueError): | ||||||
|  |                 raise TransportClosed( | ||||||
|  |                     f'transport {self} was already closed prior ro read' | ||||||
|  |                 ) | ||||||
|  | 
 | ||||||
|  |             if header == b'': | ||||||
|  |                 raise TransportClosed( | ||||||
|  |                     f'transport {self} was already closed prior ro read' | ||||||
|  |                 ) | ||||||
|  | 
 | ||||||
|  |             size, = struct.unpack("<I", header) | ||||||
|  | 
 | ||||||
|  |             log.trace(f'received header {size}')  # type: ignore | ||||||
|  | 
 | ||||||
|  |             msg_bytes = await self.recv_stream.receive_exactly(size) | ||||||
|  | 
 | ||||||
|  |             log.trace(f"received {msg_bytes}")  # type: ignore | ||||||
|  |             yield self.decode(msg_bytes) | ||||||
|  | 
 | ||||||
|  |     async def send(self, data: Any) -> None: | ||||||
|  |         async with self._send_lock: | ||||||
|  | 
 | ||||||
|  |             bytes_data: bytes = self.encode(data) | ||||||
|  | 
 | ||||||
|  |             # supposedly the fastest says, | ||||||
|  |             # https://stackoverflow.com/a/54027962 | ||||||
|  |             size: bytes = struct.pack("<I", len(bytes_data)) | ||||||
|  | 
 | ||||||
|  |             return await self.stream.send_all(size + bytes_data) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
| class Channel: | class Channel: | ||||||
|     """An inter-process channel for communication between (remote) actors. |     """An inter-process channel for communication between (remote) actors. | ||||||
| 
 | 
 | ||||||
|     Currently the only supported transport is a ``trio.SocketStream``. |     Currently the only supported transport is a ``trio.SocketStream``. | ||||||
|     """ |     """ | ||||||
|     def __init__( |     def __init__( | ||||||
|  | 
 | ||||||
|         self, |         self, | ||||||
|         destaddr: Optional[Tuple[str, int]] = None, |         destaddr: Optional[Tuple[str, int]] = None, | ||||||
|         on_reconnect: typing.Callable[..., typing.Awaitable] = None, |         on_reconnect: typing.Callable[..., typing.Awaitable] = None, | ||||||
|         auto_reconnect: bool = False, |         auto_reconnect: bool = False, | ||||||
|         stream: trio.SocketStream = None,  # expected to be active |         stream: trio.SocketStream = None,  # expected to be active | ||||||
|  | 
 | ||||||
|     ) -> None: |     ) -> None: | ||||||
|  | 
 | ||||||
|         self._recon_seq = on_reconnect |         self._recon_seq = on_reconnect | ||||||
|         self._autorecon = auto_reconnect |         self._autorecon = auto_reconnect | ||||||
|         self.msgstream: Optional[MsgpackStream] = MsgpackStream( | 
 | ||||||
|             stream) if stream else None |         stream_serializer_type = MsgpackTCPStream | ||||||
|  | 
 | ||||||
|  |         try: | ||||||
|  |             # if installed load the msgspec transport since it's faster | ||||||
|  |             import msgspec  # noqa | ||||||
|  |             stream_serializer_type = MsgspecTCPStream | ||||||
|  |         except ImportError: | ||||||
|  |             pass | ||||||
|  | 
 | ||||||
|  |         self.stream_serializer_type = stream_serializer_type | ||||||
|  |         self.msgstream = stream_serializer_type(stream) if stream else None | ||||||
|  | 
 | ||||||
|         if self.msgstream and destaddr: |         if self.msgstream and destaddr: | ||||||
|             raise ValueError( |             raise ValueError( | ||||||
|                 f"A stream was provided with local addr {self.laddr}" |                 f"A stream was provided with local addr {self.laddr}" | ||||||
|             ) |             ) | ||||||
|  | 
 | ||||||
|         self._destaddr = self.msgstream.raddr if self.msgstream else destaddr |         self._destaddr = self.msgstream.raddr if self.msgstream else destaddr | ||||||
|  | 
 | ||||||
|         # set after handshake - always uid of far end |         # set after handshake - always uid of far end | ||||||
|         self.uid: Optional[Tuple[str, str]] = None |         self.uid: Optional[Tuple[str, str]] = None | ||||||
|  | 
 | ||||||
|         # set if far end actor errors internally |         # set if far end actor errors internally | ||||||
|         self._exc: Optional[Exception] = None |         self._exc: Optional[Exception] = None | ||||||
|         self._agen = self._aiter_recv() |         self._agen = self._aiter_recv() | ||||||
| 
 | 
 | ||||||
|  |         self._closed: bool = False | ||||||
|  | 
 | ||||||
|     def __repr__(self) -> str: |     def __repr__(self) -> str: | ||||||
|         if self.msgstream: |         if self.msgstream: | ||||||
|             return repr( |             return repr( | ||||||
|  | @ -128,35 +219,51 @@ class Channel: | ||||||
|         return self.msgstream.raddr if self.msgstream else None |         return self.msgstream.raddr if self.msgstream else None | ||||||
| 
 | 
 | ||||||
|     async def connect( |     async def connect( | ||||||
|         self, destaddr: Tuple[Any, ...] = None, |         self, | ||||||
|  |         destaddr: Tuple[Any, ...] = None, | ||||||
|         **kwargs |         **kwargs | ||||||
|  | 
 | ||||||
|     ) -> trio.SocketStream: |     ) -> trio.SocketStream: | ||||||
|  | 
 | ||||||
|         if self.connected(): |         if self.connected(): | ||||||
|             raise RuntimeError("channel is already connected?") |             raise RuntimeError("channel is already connected?") | ||||||
|  | 
 | ||||||
|         destaddr = destaddr or self._destaddr |         destaddr = destaddr or self._destaddr | ||||||
|         assert isinstance(destaddr, tuple) |         assert isinstance(destaddr, tuple) | ||||||
|         stream = await trio.open_tcp_stream(*destaddr, **kwargs) | 
 | ||||||
|         self.msgstream = MsgpackStream(stream) |         stream = await trio.open_tcp_stream( | ||||||
|  |             *destaddr, | ||||||
|  |             **kwargs | ||||||
|  |         ) | ||||||
|  |         self.msgstream = self.stream_serializer_type(stream) | ||||||
|         return stream |         return stream | ||||||
| 
 | 
 | ||||||
|     async def send(self, item: Any) -> None: |     async def send(self, item: Any) -> None: | ||||||
|  | 
 | ||||||
|         log.trace(f"send `{item}`")  # type: ignore |         log.trace(f"send `{item}`")  # type: ignore | ||||||
|         assert self.msgstream |         assert self.msgstream | ||||||
|  | 
 | ||||||
|         await self.msgstream.send(item) |         await self.msgstream.send(item) | ||||||
| 
 | 
 | ||||||
|     async def recv(self) -> Any: |     async def recv(self) -> Any: | ||||||
|         assert self.msgstream |         assert self.msgstream | ||||||
|  | 
 | ||||||
|         try: |         try: | ||||||
|             return await self.msgstream.recv() |             return await self.msgstream.recv() | ||||||
|  | 
 | ||||||
|         except trio.BrokenResourceError: |         except trio.BrokenResourceError: | ||||||
|             if self._autorecon: |             if self._autorecon: | ||||||
|                 await self._reconnect() |                 await self._reconnect() | ||||||
|                 return await self.recv() |                 return await self.recv() | ||||||
| 
 | 
 | ||||||
|  |             raise | ||||||
|  | 
 | ||||||
|     async def aclose(self) -> None: |     async def aclose(self) -> None: | ||||||
|         log.debug(f"Closing {self}") |         log.debug(f"Closing {self}") | ||||||
|         assert self.msgstream |         assert self.msgstream | ||||||
|         await self.msgstream.stream.aclose() |         await self.msgstream.stream.aclose() | ||||||
|  |         self._closed = True | ||||||
|  |         log.error(f'CLOSING CHAN {self}') | ||||||
| 
 | 
 | ||||||
|     async def __aenter__(self): |     async def __aenter__(self): | ||||||
|         await self.connect() |         await self.connect() | ||||||
|  |  | ||||||
|  | @ -17,7 +17,12 @@ from async_generator import asynccontextmanager | ||||||
| from ._state import current_actor | from ._state import current_actor | ||||||
| from ._ipc import Channel | from ._ipc import Channel | ||||||
| from .log import get_logger | from .log import get_logger | ||||||
| from ._exceptions import unpack_error, NoResult, RemoteActorError | from ._exceptions import ( | ||||||
|  |     unpack_error, | ||||||
|  |     NoResult, | ||||||
|  |     RemoteActorError, | ||||||
|  |     ContextCancelled, | ||||||
|  | ) | ||||||
| from ._streaming import Context, ReceiveMsgStream | from ._streaming import Context, ReceiveMsgStream | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
|  | @ -84,7 +89,7 @@ class Portal: | ||||||
|         ns: str, |         ns: str, | ||||||
|         func: str, |         func: str, | ||||||
|         kwargs, |         kwargs, | ||||||
|     ) -> Tuple[str, trio.abc.ReceiveChannel, str, Dict[str, Any]]: |     ) -> Tuple[str, trio.MemoryReceiveChannel, str, Dict[str, Any]]: | ||||||
|         """Submit a function to be scheduled and run by actor, return the |         """Submit a function to be scheduled and run by actor, return the | ||||||
|         associated caller id, response queue, response type str, |         associated caller id, response queue, response type str, | ||||||
|         first message packet as a tuple. |         first message packet as a tuple. | ||||||
|  | @ -172,6 +177,7 @@ class Portal: | ||||||
|                 f"Cancelling all streams with {self.channel.uid}") |                 f"Cancelling all streams with {self.channel.uid}") | ||||||
|             for stream in self._streams.copy(): |             for stream in self._streams.copy(): | ||||||
|                 try: |                 try: | ||||||
|  |                     # with trio.CancelScope(shield=True): | ||||||
|                     await stream.aclose() |                     await stream.aclose() | ||||||
|                 except trio.ClosedResourceError: |                 except trio.ClosedResourceError: | ||||||
|                     # don't error the stream having already been closed |                     # don't error the stream having already been closed | ||||||
|  | @ -289,6 +295,7 @@ class Portal: | ||||||
|         self, |         self, | ||||||
|         async_gen_func: Callable,  # typing: ignore |         async_gen_func: Callable,  # typing: ignore | ||||||
|         **kwargs, |         **kwargs, | ||||||
|  | 
 | ||||||
|     ) -> AsyncGenerator[ReceiveMsgStream, None]: |     ) -> AsyncGenerator[ReceiveMsgStream, None]: | ||||||
| 
 | 
 | ||||||
|         if not inspect.isasyncgenfunction(async_gen_func): |         if not inspect.isasyncgenfunction(async_gen_func): | ||||||
|  | @ -312,13 +319,23 @@ class Portal: | ||||||
| 
 | 
 | ||||||
|         ctx = Context(self.channel, cid, _portal=self) |         ctx = Context(self.channel, cid, _portal=self) | ||||||
|         try: |         try: | ||||||
|             async with ReceiveMsgStream(ctx, recv_chan, self) as rchan: |             # deliver receive only stream | ||||||
|  |             async with ReceiveMsgStream(ctx, recv_chan) as rchan: | ||||||
|                 self._streams.add(rchan) |                 self._streams.add(rchan) | ||||||
|                 yield rchan |                 yield rchan | ||||||
|  | 
 | ||||||
|         finally: |         finally: | ||||||
|  | 
 | ||||||
|             # cancel the far end task on consumer close |             # cancel the far end task on consumer close | ||||||
|  |             # NOTE: this is a special case since we assume that if using | ||||||
|  |             # this ``.open_fream_from()`` api, the stream is one a one | ||||||
|  |             # time use and we couple the far end tasks's lifetime to | ||||||
|  |             # the consumer's scope; we don't ever send a `'stop'` | ||||||
|  |             # message right now since there shouldn't be a reason to | ||||||
|  |             # stop and restart the stream, right? | ||||||
|             try: |             try: | ||||||
|                 await ctx.cancel() |                 await ctx.cancel() | ||||||
|  | 
 | ||||||
|             except trio.ClosedResourceError: |             except trio.ClosedResourceError: | ||||||
|                 # if the far end terminates before we send a cancel the |                 # if the far end terminates before we send a cancel the | ||||||
|                 # underlying transport-channel may already be closed. |                 # underlying transport-channel may already be closed. | ||||||
|  | @ -326,16 +343,127 @@ class Portal: | ||||||
| 
 | 
 | ||||||
|             self._streams.remove(rchan) |             self._streams.remove(rchan) | ||||||
| 
 | 
 | ||||||
|     # @asynccontextmanager |     @asynccontextmanager | ||||||
|     # async def open_context( |     async def open_context( | ||||||
|     #     self, | 
 | ||||||
|     #     func: Callable, |         self, | ||||||
|     #     **kwargs, |         func: Callable, | ||||||
|     # ) -> Context: |         **kwargs, | ||||||
|     #     # TODO | 
 | ||||||
|     #     elif resptype == 'context':  # context manager style setup/teardown |     ) -> AsyncGenerator[Tuple[Context, Any], None]: | ||||||
|     #         # TODO likely not here though |         '''Open an inter-actor task context. | ||||||
|     #         raise NotImplementedError | 
 | ||||||
|  |         This is a synchronous API which allows for deterministic | ||||||
|  |         setup/teardown of a remote task. The yielded ``Context`` further | ||||||
|  |         allows for opening bidirectional streams, explicit cancellation | ||||||
|  |         and synchronized final result collection. See ``tractor.Context``. | ||||||
|  | 
 | ||||||
|  |         ''' | ||||||
|  | 
 | ||||||
|  |         # conduct target func method structural checks | ||||||
|  |         if not inspect.iscoroutinefunction(func) and ( | ||||||
|  |             getattr(func, '_tractor_contex_function', False) | ||||||
|  |         ): | ||||||
|  |             raise TypeError( | ||||||
|  |                 f'{func} must be an async generator function!') | ||||||
|  | 
 | ||||||
|  |         fn_mod_path, fn_name = func_deats(func) | ||||||
|  | 
 | ||||||
|  |         recv_chan: Optional[trio.MemoryReceiveChannel] = None | ||||||
|  | 
 | ||||||
|  |         cid, recv_chan, functype, first_msg = await self._submit( | ||||||
|  |             fn_mod_path, fn_name, kwargs) | ||||||
|  | 
 | ||||||
|  |         assert functype == 'context' | ||||||
|  |         msg = await recv_chan.receive() | ||||||
|  | 
 | ||||||
|  |         try: | ||||||
|  |             # the "first" value here is delivered by the callee's | ||||||
|  |             # ``Context.started()`` call. | ||||||
|  |             first = msg['started'] | ||||||
|  | 
 | ||||||
|  |         except KeyError: | ||||||
|  |             assert msg.get('cid'), ("Received internal error at context?") | ||||||
|  | 
 | ||||||
|  |             if msg.get('error'): | ||||||
|  |                 # raise the error message | ||||||
|  |                 raise unpack_error(msg, self.channel) | ||||||
|  |             else: | ||||||
|  |                 raise | ||||||
|  | 
 | ||||||
|  |         _err = None | ||||||
|  |         # deliver context instance and .started() msg value in open | ||||||
|  |         # tuple. | ||||||
|  |         try: | ||||||
|  |             async with trio.open_nursery() as scope_nursery: | ||||||
|  |                 ctx = Context( | ||||||
|  |                     self.channel, | ||||||
|  |                     cid, | ||||||
|  |                     _portal=self, | ||||||
|  |                     _recv_chan=recv_chan, | ||||||
|  |                     _scope_nursery=scope_nursery, | ||||||
|  |                 ) | ||||||
|  |                 recv_chan._ctx = ctx | ||||||
|  | 
 | ||||||
|  |                 # await trio.lowlevel.checkpoint() | ||||||
|  |                 yield ctx, first | ||||||
|  | 
 | ||||||
|  |                 # if not ctx._cancel_called: | ||||||
|  |                 #     await ctx.result() | ||||||
|  | 
 | ||||||
|  |             # await recv_chan.aclose() | ||||||
|  | 
 | ||||||
|  |         except ContextCancelled as err: | ||||||
|  |             _err = err | ||||||
|  |             if not ctx._cancel_called: | ||||||
|  |                 # context was cancelled at the far end but was | ||||||
|  |                 # not part of this end requesting that cancel | ||||||
|  |                 # so raise for the local task to respond and handle. | ||||||
|  |                 raise | ||||||
|  | 
 | ||||||
|  |             # if the context was cancelled by client code | ||||||
|  |             # then we don't need to raise since user code | ||||||
|  |             # is expecting this and the block should exit. | ||||||
|  |             else: | ||||||
|  |                 log.debug(f'Context {ctx} cancelled gracefully') | ||||||
|  | 
 | ||||||
|  |         except ( | ||||||
|  |             trio.Cancelled, | ||||||
|  |             trio.MultiError, | ||||||
|  |             Exception, | ||||||
|  |         ) as err: | ||||||
|  |             _err = err | ||||||
|  |             # the context cancels itself on any cancel | ||||||
|  |             # causing error. | ||||||
|  |             log.error(f'Context {ctx} sending cancel to far end') | ||||||
|  |             with trio.CancelScope(shield=True): | ||||||
|  |                 await ctx.cancel() | ||||||
|  |             raise | ||||||
|  | 
 | ||||||
|  |         finally: | ||||||
|  |             result = await ctx.result() | ||||||
|  | 
 | ||||||
|  |             # though it should be impossible for any tasks | ||||||
|  |             # operating *in* this scope to have survived | ||||||
|  |             # we tear down the runtime feeder chan last | ||||||
|  |             # to avoid premature stream clobbers. | ||||||
|  |             if recv_chan is not None: | ||||||
|  |                 await recv_chan.aclose() | ||||||
|  | 
 | ||||||
|  |             if _err: | ||||||
|  |                 if ctx._cancel_called: | ||||||
|  |                     log.warning( | ||||||
|  |                         f'Context {fn_name} cancelled by caller with\n{_err}' | ||||||
|  |                     ) | ||||||
|  |                 elif _err is not None: | ||||||
|  |                     log.warning( | ||||||
|  |                         f'Context {fn_name} cancelled by callee with\n{_err}' | ||||||
|  |                     ) | ||||||
|  |             else: | ||||||
|  |                 log.info( | ||||||
|  |                     f'Context {fn_name} returned ' | ||||||
|  |                     f'value from callee `{self._result}`' | ||||||
|  |                 ) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @dataclass | @dataclass | ||||||
|  | @ -360,10 +488,12 @@ class LocalPortal: | ||||||
| 
 | 
 | ||||||
| @asynccontextmanager | @asynccontextmanager | ||||||
| async def open_portal( | async def open_portal( | ||||||
|  | 
 | ||||||
|     channel: Channel, |     channel: Channel, | ||||||
|     nursery: Optional[trio.Nursery] = None, |     nursery: Optional[trio.Nursery] = None, | ||||||
|     start_msg_loop: bool = True, |     start_msg_loop: bool = True, | ||||||
|     shield: bool = False, |     shield: bool = False, | ||||||
|  | 
 | ||||||
| ) -> AsyncGenerator[Portal, None]: | ) -> AsyncGenerator[Portal, None]: | ||||||
|     """Open a ``Portal`` through the provided ``channel``. |     """Open a ``Portal`` through the provided ``channel``. | ||||||
| 
 | 
 | ||||||
|  | @ -374,6 +504,7 @@ async def open_portal( | ||||||
|     was_connected = False |     was_connected = False | ||||||
| 
 | 
 | ||||||
|     async with maybe_open_nursery(nursery, shield=shield) as nursery: |     async with maybe_open_nursery(nursery, shield=shield) as nursery: | ||||||
|  | 
 | ||||||
|         if not channel.connected(): |         if not channel.connected(): | ||||||
|             await channel.connect() |             await channel.connect() | ||||||
|             was_connected = True |             was_connected = True | ||||||
|  | @ -395,12 +526,14 @@ async def open_portal( | ||||||
|         portal = Portal(channel) |         portal = Portal(channel) | ||||||
|         try: |         try: | ||||||
|             yield portal |             yield portal | ||||||
|  | 
 | ||||||
|         finally: |         finally: | ||||||
|             await portal.aclose() |             await portal.aclose() | ||||||
| 
 | 
 | ||||||
|             if was_connected: |             if was_connected: | ||||||
|                 # cancel remote channel-msg loop |                 # gracefully signal remote channel-msg loop | ||||||
|                 await channel.send(None) |                 await channel.send(None) | ||||||
|  |                 # await channel.aclose() | ||||||
| 
 | 
 | ||||||
|             # cancel background msg loop task |             # cancel background msg loop task | ||||||
|             if msg_loop_cs: |             if msg_loop_cs: | ||||||
|  |  | ||||||
|  | @ -166,15 +166,17 @@ async def open_root_actor( | ||||||
|                 yield actor |                 yield actor | ||||||
| 
 | 
 | ||||||
|             except (Exception, trio.MultiError) as err: |             except (Exception, trio.MultiError) as err: | ||||||
|                 logger.exception("Actor crashed:") |                 # with trio.CancelScope(shield=True): | ||||||
|                 await _debug._maybe_enter_pm(err) |                 entered = await _debug._maybe_enter_pm(err) | ||||||
|  | 
 | ||||||
|  |                 if not entered: | ||||||
|  |                     logger.exception("Root actor crashed:") | ||||||
| 
 | 
 | ||||||
|                 # always re-raise |                 # always re-raise | ||||||
|                 raise |                 raise | ||||||
| 
 | 
 | ||||||
|             finally: |             finally: | ||||||
|                 logger.info("Shutting down root actor") |                 logger.info("Shutting down root actor") | ||||||
|                 with trio.CancelScope(shield=True): |  | ||||||
|                 await actor.cancel() |                 await actor.cancel() | ||||||
|     finally: |     finally: | ||||||
|         _state._current_actor = None |         _state._current_actor = None | ||||||
|  |  | ||||||
|  | @ -22,7 +22,14 @@ from multiprocessing import forkserver  # type: ignore | ||||||
| from typing import Tuple | from typing import Tuple | ||||||
| 
 | 
 | ||||||
| from . import _forkserver_override | from . import _forkserver_override | ||||||
| from ._state import current_actor, is_main_process | from ._state import ( | ||||||
|  |     current_actor, | ||||||
|  |     is_main_process, | ||||||
|  |     is_root_process, | ||||||
|  |     _runtime_vars, | ||||||
|  | ) | ||||||
|  | from ._debug import _global_actor_in_debug | ||||||
|  | 
 | ||||||
| from .log import get_logger | from .log import get_logger | ||||||
| from ._portal import Portal | from ._portal import Portal | ||||||
| from ._actor import Actor, ActorFailure | from ._actor import Actor, ActorFailure | ||||||
|  | @ -141,13 +148,34 @@ async def cancel_on_completion( | ||||||
|             ) |             ) | ||||||
| 
 | 
 | ||||||
|         else: |         else: | ||||||
|             log.info( |             log.runtime( | ||||||
|                 f"Cancelling {portal.channel.uid} gracefully " |                 f"Cancelling {portal.channel.uid} gracefully " | ||||||
|                 f"after result {result}") |                 f"after result {result}") | ||||||
| 
 | 
 | ||||||
|         # cancel the process now that we have a final result |         # cancel the process now that we have a final result | ||||||
|         await portal.cancel_actor() |         await portal.cancel_actor() | ||||||
| 
 | 
 | ||||||
|  | async def do_hard_kill( | ||||||
|  |     proc: trio.Process, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|  |     # NOTE: this timeout used to do nothing since we were shielding | ||||||
|  |     # the ``.wait()`` inside ``new_proc()`` which will pretty much | ||||||
|  |     # never release until the process exits, now it acts as | ||||||
|  |     # a hard-kill time ultimatum. | ||||||
|  |     with trio.move_on_after(3) as cs: | ||||||
|  | 
 | ||||||
|  |         # NOTE: This ``__aexit__()`` shields internally. | ||||||
|  |         async with proc:  # calls ``trio.Process.aclose()`` | ||||||
|  |             log.debug(f"Terminating {proc}") | ||||||
|  | 
 | ||||||
|  |     if cs.cancelled_caught: | ||||||
|  |         # XXX: should pretty much never get here unless we have | ||||||
|  |         # to move the bits from ``proc.__aexit__()`` out and | ||||||
|  |         # into here. | ||||||
|  |         log.critical(f"HARD KILLING {proc}") | ||||||
|  |         proc.kill() | ||||||
|  | 
 | ||||||
| 
 | 
 | ||||||
| @asynccontextmanager | @asynccontextmanager | ||||||
| async def spawn_subactor( | async def spawn_subactor( | ||||||
|  | @ -180,26 +208,48 @@ async def spawn_subactor( | ||||||
|     proc = await trio.open_process(spawn_cmd) |     proc = await trio.open_process(spawn_cmd) | ||||||
|     try: |     try: | ||||||
|         yield proc |         yield proc | ||||||
|  | 
 | ||||||
|     finally: |     finally: | ||||||
|  |         log.runtime(f"Attempting to kill {proc}") | ||||||
|  | 
 | ||||||
|         # XXX: do this **after** cancellation/tearfown |         # XXX: do this **after** cancellation/tearfown | ||||||
|         # to avoid killing the process too early |         # to avoid killing the process too early | ||||||
|         # since trio does this internally on ``__aexit__()`` |         # since trio does this internally on ``__aexit__()`` | ||||||
| 
 | 
 | ||||||
|         # NOTE: we always "shield" join sub procs in |         # if ( | ||||||
|         # the outer scope since no actor zombies are |         #     is_root_process() | ||||||
|         # ever allowed. This ``__aexit__()`` also shields |  | ||||||
|         # internally. |  | ||||||
|         log.debug(f"Attempting to kill {proc}") |  | ||||||
| 
 | 
 | ||||||
|         # NOTE: this timeout effectively does nothing right now since |         #     # XXX: basically the pre-closing of stdstreams in a | ||||||
|         # we are shielding the ``.wait()`` inside ``new_proc()`` which |         #     # root-processe's ``trio.Process.aclose()`` can clobber | ||||||
|         # will pretty much never release until the process exits. |         #     # any existing debugger session so we avoid | ||||||
|         with trio.move_on_after(3) as cs: |         #     and _runtime_vars['_debug_mode'] | ||||||
|             async with proc: |         #     and _global_actor_in_debug is not None | ||||||
|                 log.debug(f"Terminating {proc}") |         # ): | ||||||
|         if cs.cancelled_caught: |         #     # XXX: this is ``trio.Process.aclose()`` MINUS the | ||||||
|             log.critical(f"HARD KILLING {proc}") |         #     # std-streams pre-closing steps inside ``proc.__aexit__()`` | ||||||
|             proc.kill() |         #     # (see below) which incluses a ``Process.kill()`` call | ||||||
|  | 
 | ||||||
|  |         #     log.error( | ||||||
|  |         #         "Root process tty is locked in debug mode by " | ||||||
|  |         #         f"{_global_actor_in_debug}. If the console is hanging, you " | ||||||
|  |         #         "may need to trigger a KBI to kill any " | ||||||
|  |         #         "not-fully-initialized" " subprocesses and allow errors " | ||||||
|  |         #         "from `trio` to propagate" | ||||||
|  |         #     ) | ||||||
|  |         #     try: | ||||||
|  |         #         # one more graceful wait try can can be cancelled by KBI | ||||||
|  |         #         # sent by user. | ||||||
|  |         #         await proc.wait() | ||||||
|  | 
 | ||||||
|  |         #     finally: | ||||||
|  |         #         if proc.returncode is None: | ||||||
|  |         #             # with trio.CancelScope(shield=True): | ||||||
|  |         #             #     await proc.wait() | ||||||
|  | 
 | ||||||
|  |         #             await do_hard_kill(proc) | ||||||
|  |         # else: | ||||||
|  | 
 | ||||||
|  |         await do_hard_kill(proc) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| async def new_proc( | async def new_proc( | ||||||
|  | @ -212,7 +262,6 @@ async def new_proc( | ||||||
|     parent_addr: Tuple[str, int], |     parent_addr: Tuple[str, int], | ||||||
|     _runtime_vars: Dict[str, Any],  # serialized and sent to _child |     _runtime_vars: Dict[str, Any],  # serialized and sent to _child | ||||||
|     *, |     *, | ||||||
|     use_trio_run_in_process: bool = False, |  | ||||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED |     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||||
| ) -> None: | ) -> None: | ||||||
|     """Create a new ``multiprocessing.Process`` using the |     """Create a new ``multiprocessing.Process`` using the | ||||||
|  | @ -223,13 +272,13 @@ async def new_proc( | ||||||
|     # mark the new actor with the global spawn method |     # mark the new actor with the global spawn method | ||||||
|     subactor._spawn_method = _spawn_method |     subactor._spawn_method = _spawn_method | ||||||
| 
 | 
 | ||||||
|     if use_trio_run_in_process or _spawn_method == 'trio': |     if _spawn_method == 'trio': | ||||||
|         async with trio.open_nursery() as nursery: |         async with trio.open_nursery() as nursery: | ||||||
|             async with spawn_subactor( |             async with spawn_subactor( | ||||||
|                 subactor, |                 subactor, | ||||||
|                 parent_addr, |                 parent_addr, | ||||||
|             ) as proc: |             ) as proc: | ||||||
|                 log.info(f"Started {proc}") |                 log.runtime(f"Started {proc}") | ||||||
| 
 | 
 | ||||||
|                 # wait for actor to spawn and connect back to us |                 # wait for actor to spawn and connect back to us | ||||||
|                 # channel should have handshake completed by the |                 # channel should have handshake completed by the | ||||||
|  | @ -277,9 +326,14 @@ async def new_proc( | ||||||
|                 # reaping more stringently without the shield |                 # reaping more stringently without the shield | ||||||
|                 # we used to have below... |                 # we used to have below... | ||||||
| 
 | 
 | ||||||
|                 # always "hard" join sub procs: |  | ||||||
|                 # no actor zombies allowed |  | ||||||
|                 # with trio.CancelScope(shield=True): |                 # with trio.CancelScope(shield=True): | ||||||
|  |                 # async with proc: | ||||||
|  | 
 | ||||||
|  |                 # Always "hard" join sub procs since no actor zombies | ||||||
|  |                 # are allowed! | ||||||
|  | 
 | ||||||
|  |                 # this is a "light" (cancellable) join, the hard join is | ||||||
|  |                 # in the enclosing scope (see above). | ||||||
|                 await proc.wait() |                 await proc.wait() | ||||||
| 
 | 
 | ||||||
|             log.debug(f"Joined {proc}") |             log.debug(f"Joined {proc}") | ||||||
|  | @ -320,7 +374,6 @@ async def mp_new_proc( | ||||||
|     parent_addr: Tuple[str, int], |     parent_addr: Tuple[str, int], | ||||||
|     _runtime_vars: Dict[str, Any],  # serialized and sent to _child |     _runtime_vars: Dict[str, Any],  # serialized and sent to _child | ||||||
|     *, |     *, | ||||||
|     use_trio_run_in_process: bool = False, |  | ||||||
|     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED |     task_status: TaskStatus[Portal] = trio.TASK_STATUS_IGNORED | ||||||
| 
 | 
 | ||||||
| ) -> None: | ) -> None: | ||||||
|  | @ -380,7 +433,7 @@ async def mp_new_proc( | ||||||
|         if not proc.is_alive(): |         if not proc.is_alive(): | ||||||
|             raise ActorFailure("Couldn't start sub-actor?") |             raise ActorFailure("Couldn't start sub-actor?") | ||||||
| 
 | 
 | ||||||
|         log.info(f"Started {proc}") |         log.runtime(f"Started {proc}") | ||||||
| 
 | 
 | ||||||
|         try: |         try: | ||||||
|             # wait for actor to spawn and connect back to us |             # wait for actor to spawn and connect back to us | ||||||
|  |  | ||||||
|  | @ -1,41 +1,312 @@ | ||||||
|  | """ | ||||||
|  | Message stream types and APIs. | ||||||
|  | 
 | ||||||
|  | """ | ||||||
| import inspect | import inspect | ||||||
| from contextlib import contextmanager  # , asynccontextmanager | from contextlib import contextmanager, asynccontextmanager | ||||||
| from dataclasses import dataclass | from dataclasses import dataclass | ||||||
| from typing import Any, Iterator, Optional | from typing import ( | ||||||
|  |     Any, Iterator, Optional, Callable, | ||||||
|  |     AsyncGenerator, Dict, | ||||||
|  | ) | ||||||
|  | 
 | ||||||
| import warnings | import warnings | ||||||
| 
 | 
 | ||||||
| import trio | import trio | ||||||
| 
 | 
 | ||||||
| from ._ipc import Channel | from ._ipc import Channel | ||||||
| from ._exceptions import unpack_error | from ._exceptions import unpack_error, ContextCancelled | ||||||
|  | from ._state import current_actor | ||||||
| from .log import get_logger | from .log import get_logger | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| log = get_logger(__name__) | log = get_logger(__name__) | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| @dataclass(frozen=True) | # TODO: generic typing like trio's receive channel | ||||||
| class Context: | # but with msgspec messages? | ||||||
|     """An IAC (inter-actor communication) context. | # class ReceiveChannel(AsyncResource, Generic[ReceiveType]): | ||||||
| 
 | 
 | ||||||
|     Allows maintaining task or protocol specific state between communicating |  | ||||||
|     actors. A unique context is created on the receiving end for every request |  | ||||||
|     to a remote actor. |  | ||||||
| 
 | 
 | ||||||
|     A context can be cancelled and (eventually) restarted from | class ReceiveMsgStream(trio.abc.ReceiveChannel): | ||||||
|     either side of the underlying IPC channel. |     """A wrapper around a ``trio._channel.MemoryReceiveChannel`` with | ||||||
|  |     special behaviour for signalling stream termination across an | ||||||
|  |     inter-actor ``Channel``. This is the type returned to a local task | ||||||
|  |     which invoked a remote streaming function using `Portal.run()`. | ||||||
| 
 | 
 | ||||||
|     A context can be used to open task oriented message streams. |     Termination rules: | ||||||
|  | 
 | ||||||
|  |     - if the local task signals stop iteration a cancel signal is | ||||||
|  |       relayed to the remote task indicating to stop streaming | ||||||
|  |     - if the remote task signals the end of a stream, raise | ||||||
|  |       a ``StopAsyncIteration`` to terminate the local ``async for`` | ||||||
| 
 | 
 | ||||||
|     """ |     """ | ||||||
|  |     def __init__( | ||||||
|  |         self, | ||||||
|  |         ctx: 'Context',  # typing: ignore # noqa | ||||||
|  |         rx_chan: trio.abc.ReceiveChannel, | ||||||
|  |         shield: bool = False, | ||||||
|  |     ) -> None: | ||||||
|  |         self._ctx = ctx | ||||||
|  |         self._rx_chan = rx_chan | ||||||
|  |         self._shielded = shield | ||||||
|  | 
 | ||||||
|  |         # flag to denote end of stream | ||||||
|  |         self._eoc: bool = False | ||||||
|  | 
 | ||||||
|  |     # delegate directly to underlying mem channel | ||||||
|  |     def receive_nowait(self): | ||||||
|  |         msg = self._rx_chan.receive_nowait() | ||||||
|  |         return msg['yield'] | ||||||
|  | 
 | ||||||
|  |     async def receive(self): | ||||||
|  |         # see ``.aclose()`` for notes on the old behaviour prior to | ||||||
|  |         # introducing this | ||||||
|  |         if self._eoc: | ||||||
|  |             raise trio.EndOfChannel | ||||||
|  | 
 | ||||||
|  |         try: | ||||||
|  | 
 | ||||||
|  |             msg = await self._rx_chan.receive() | ||||||
|  |             return msg['yield'] | ||||||
|  | 
 | ||||||
|  |         except KeyError: | ||||||
|  |             # internal error should never get here | ||||||
|  |             assert msg.get('cid'), ("Received internal error at portal?") | ||||||
|  | 
 | ||||||
|  |             # TODO: handle 2 cases with 3.10 match syntax | ||||||
|  |             # - 'stop' | ||||||
|  |             # - 'error' | ||||||
|  |             # possibly just handle msg['stop'] here! | ||||||
|  | 
 | ||||||
|  |             if msg.get('stop'): | ||||||
|  |                 log.debug(f"{self} was stopped at remote end") | ||||||
|  | 
 | ||||||
|  |                 # # when the send is closed we assume the stream has | ||||||
|  |                 # # terminated and signal this local iterator to stop | ||||||
|  |                 # await self.aclose() | ||||||
|  | 
 | ||||||
|  |                 # XXX: this causes ``ReceiveChannel.__anext__()`` to | ||||||
|  |                 # raise a ``StopAsyncIteration`` **and** in our catch | ||||||
|  |                 # block below it will trigger ``.aclose()``. | ||||||
|  |                 raise trio.EndOfChannel | ||||||
|  | 
 | ||||||
|  |             # TODO: test that shows stream raising an expected error!!! | ||||||
|  |             elif msg.get('error'): | ||||||
|  |                 # raise the error message | ||||||
|  |                 raise unpack_error(msg, self._ctx.chan) | ||||||
|  | 
 | ||||||
|  |             else: | ||||||
|  |                 raise | ||||||
|  | 
 | ||||||
|  |         except ( | ||||||
|  |             trio.ClosedResourceError,  # by self._rx_chan | ||||||
|  |             trio.EndOfChannel,  # by self._rx_chan or `stop` msg from far end | ||||||
|  |             trio.Cancelled,  # by local cancellation | ||||||
|  |         ): | ||||||
|  |             # XXX: we close the stream on any of these error conditions: | ||||||
|  | 
 | ||||||
|  |             # a ``ClosedResourceError`` indicates that the internal | ||||||
|  |             # feeder memory receive channel was closed likely by the | ||||||
|  |             # runtime after the associated transport-channel | ||||||
|  |             # disconnected or broke. | ||||||
|  | 
 | ||||||
|  |             # an ``EndOfChannel`` indicates either the internal recv | ||||||
|  |             # memchan exhausted **or** we raisesd it just above after | ||||||
|  |             # receiving a `stop` message from the far end of the stream. | ||||||
|  | 
 | ||||||
|  |             # Previously this was triggered by calling ``.aclose()`` on | ||||||
|  |             # the send side of the channel inside | ||||||
|  |             # ``Actor._push_result()`` (should still be commented code | ||||||
|  |             # there - which should eventually get removed), but now the | ||||||
|  |             # 'stop' message handling has been put just above. | ||||||
|  | 
 | ||||||
|  |             # TODO: Locally, we want to close this stream gracefully, by | ||||||
|  |             # terminating any local consumers tasks deterministically. | ||||||
|  |             # One we have broadcast support, we **don't** want to be | ||||||
|  |             # closing this stream and not flushing a final value to | ||||||
|  |             # remaining (clone) consumers who may not have been | ||||||
|  |             # scheduled to receive it yet. | ||||||
|  | 
 | ||||||
|  |             # when the send is closed we assume the stream has | ||||||
|  |             # terminated and signal this local iterator to stop | ||||||
|  |             await self.aclose() | ||||||
|  | 
 | ||||||
|  |             raise  # propagate | ||||||
|  | 
 | ||||||
|  |     @contextmanager | ||||||
|  |     def shield( | ||||||
|  |         self | ||||||
|  |     ) -> Iterator['ReceiveMsgStream']:  # noqa | ||||||
|  |         """Shield this stream's underlying channel such that a local consumer task | ||||||
|  |         can be cancelled (and possibly restarted) using ``trio.Cancelled``. | ||||||
|  | 
 | ||||||
|  |         Note that here, "shielding" here guards against relaying | ||||||
|  |         a ``'stop'`` message to the far end of the stream thus keeping | ||||||
|  |         the stream machinery active and ready for further use, it does | ||||||
|  |         not have anything to do with an internal ``trio.CancelScope``. | ||||||
|  | 
 | ||||||
|  |         """ | ||||||
|  |         self._shielded = True | ||||||
|  |         yield self | ||||||
|  |         self._shielded = False | ||||||
|  | 
 | ||||||
|  |     async def aclose(self): | ||||||
|  |         """Cancel associated remote actor task and local memory channel | ||||||
|  |         on close. | ||||||
|  | 
 | ||||||
|  |         """ | ||||||
|  |         # XXX: keep proper adherance to trio's `.aclose()` semantics: | ||||||
|  |         # https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose | ||||||
|  |         rx_chan = self._rx_chan | ||||||
|  | 
 | ||||||
|  |         if rx_chan._closed: | ||||||
|  |             log.warning(f"{self} is already closed") | ||||||
|  | 
 | ||||||
|  |             # this stream has already been closed so silently succeed as | ||||||
|  |             # per ``trio.AsyncResource`` semantics. | ||||||
|  |             # https://trio.readthedocs.io/en/stable/reference-io.html#trio.abc.AsyncResource.aclose | ||||||
|  |             return | ||||||
|  | 
 | ||||||
|  |         # TODO: broadcasting to multiple consumers | ||||||
|  |         # stats = rx_chan.statistics() | ||||||
|  |         # if stats.open_receive_channels > 1: | ||||||
|  |         #     # if we've been cloned don't kill the stream | ||||||
|  |         #     log.debug( | ||||||
|  |         #       "there are still consumers running keeping stream alive") | ||||||
|  |         #     return | ||||||
|  | 
 | ||||||
|  |         if self._shielded: | ||||||
|  |             log.warning(f"{self} is shielded, portal channel being kept alive") | ||||||
|  |             return | ||||||
|  | 
 | ||||||
|  |         # XXX: This must be set **AFTER** the shielded test above! | ||||||
|  |         self._eoc = True | ||||||
|  | 
 | ||||||
|  |         # NOTE: this is super subtle IPC messaging stuff: | ||||||
|  |         # Relay stop iteration to far end **iff** we're | ||||||
|  |         # in bidirectional mode. If we're only streaming | ||||||
|  |         # *from* one side then that side **won't** have an | ||||||
|  |         # entry in `Actor._cids2qs` (maybe it should though?). | ||||||
|  |         # So any `yield` or `stop` msgs sent from the caller side | ||||||
|  |         # will cause key errors on the callee side since there is | ||||||
|  |         # no entry for a local feeder mem chan since the callee task | ||||||
|  |         # isn't expecting messages to be sent by the caller. | ||||||
|  |         # Thus, we must check that this context DOES NOT | ||||||
|  |         # have a portal reference to ensure this is indeed the callee | ||||||
|  |         # side and can relay a 'stop'. | ||||||
|  | 
 | ||||||
|  |         # In the bidirectional case, `Context.open_stream()` will create | ||||||
|  |         # the `Actor._cids2qs` entry from a call to | ||||||
|  |         # `Actor.get_memchans()` and will send the stop message in | ||||||
|  |         # ``__aexit__()`` on teardown so it **does not** need to be | ||||||
|  |         # called here. | ||||||
|  |         if not self._ctx._portal: | ||||||
|  |             try: | ||||||
|  |                 # only for 2 way streams can we can send | ||||||
|  |                 # stop from the caller side | ||||||
|  |                 await self._ctx.send_stop() | ||||||
|  | 
 | ||||||
|  |             except ( | ||||||
|  |                 trio.BrokenResourceError, | ||||||
|  |                 trio.ClosedResourceError | ||||||
|  |             ): | ||||||
|  |                 # the underlying channel may already have been pulled | ||||||
|  |                 # in which case our stop message is meaningless since | ||||||
|  |                 # it can't traverse the transport. | ||||||
|  |                 log.debug(f'Channel for {self} was already closed') | ||||||
|  | 
 | ||||||
|  |         # close the local mem chan ``self._rx_chan`` ??!? | ||||||
|  | 
 | ||||||
|  |         # DEFINITELY NOT if we're a bi-dir ``MsgStream``! | ||||||
|  |         # BECAUSE this same core-msg-loop mem recv-chan is used to deliver | ||||||
|  |         # the potential final result from the surrounding inter-actor | ||||||
|  |         # `Context` so we don't want to close it until that context has | ||||||
|  |         # run to completion. | ||||||
|  | 
 | ||||||
|  |         # XXX: Notes on old behaviour: | ||||||
|  |         # await rx_chan.aclose() | ||||||
|  | 
 | ||||||
|  |         # In the receive-only case, ``Portal.open_stream_from()`` used | ||||||
|  |         # to rely on this call explicitly on teardown such that a new | ||||||
|  |         # call to ``.receive()`` after ``rx_chan`` had been closed, would | ||||||
|  |         # result in us raising a ``trio.EndOfChannel`` (since we | ||||||
|  |         # remapped the ``trio.ClosedResourceError`). However, now if for some | ||||||
|  |         # reason the stream's consumer code tries to manually receive a new | ||||||
|  |         # value before ``.aclose()`` is called **but** the far end has | ||||||
|  |         # stopped `.receive()` **must** raise ``trio.EndofChannel`` in | ||||||
|  |         # order to avoid an infinite hang on ``.__anext__()``; this is | ||||||
|  |         # why we added ``self._eoc`` to denote stream closure indepedent | ||||||
|  |         # of ``rx_chan``. | ||||||
|  | 
 | ||||||
|  |         # In theory we could still use this old method and close the | ||||||
|  |         # underlying msg-loop mem chan as above and then **not** check | ||||||
|  |         # for ``self._eoc`` in ``.receive()`` (if for some reason we | ||||||
|  |         # think that check is a bottle neck - not likely) **but** then | ||||||
|  |         # we would need to map the resulting | ||||||
|  |         # ``trio.ClosedResourceError`` to a ``trio.EndOfChannel`` in | ||||||
|  |         # ``.receive()`` (as it originally was before bi-dir streaming | ||||||
|  |         # support) in order to trigger stream closure. The old behaviour | ||||||
|  |         # is arguably more confusing since we lose detection of the | ||||||
|  |         # runtime's closure of ``rx_chan`` in the case where we may | ||||||
|  |         # still need to consume msgs that are "in transit" from the far | ||||||
|  |         # end (eg. for ``Context.result()``). | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class MsgStream(ReceiveMsgStream, trio.abc.Channel): | ||||||
|  |     """ | ||||||
|  |     Bidirectional message stream for use within an inter-actor actor | ||||||
|  |     ``Context```. | ||||||
|  | 
 | ||||||
|  |     """ | ||||||
|  |     async def send( | ||||||
|  |         self, | ||||||
|  |         data: Any | ||||||
|  |     ) -> None: | ||||||
|  |         '''Send a message over this stream to the far end. | ||||||
|  | 
 | ||||||
|  |         ''' | ||||||
|  |         await self._ctx.chan.send({'yield': data, 'cid': self._ctx.cid}) | ||||||
|  | 
 | ||||||
|  |     # TODO: but make it broadcasting to consumers | ||||||
|  |     def clone(self): | ||||||
|  |         """Clone this receive channel allowing for multi-task | ||||||
|  |         consumption from the same channel. | ||||||
|  | 
 | ||||||
|  |         """ | ||||||
|  |         return MsgStream( | ||||||
|  |             self._ctx, | ||||||
|  |             self._rx_chan.clone(), | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | @dataclass | ||||||
|  | class Context: | ||||||
|  |     '''An inter-actor task communication context. | ||||||
|  | 
 | ||||||
|  |     Allows maintaining task or protocol specific state between | ||||||
|  |     2 communicating actor tasks. A unique context is created on the | ||||||
|  |     callee side/end for every request to a remote actor from a portal. | ||||||
|  | 
 | ||||||
|  |     A context can be cancelled and (possibly eventually restarted) from | ||||||
|  |     either side of the underlying IPC channel. | ||||||
|  | 
 | ||||||
|  |     A context can be used to open task oriented message streams and can | ||||||
|  |     be thought of as an IPC aware inter-actor cancel scope. | ||||||
|  | 
 | ||||||
|  |     ''' | ||||||
|     chan: Channel |     chan: Channel | ||||||
|     cid: str |     cid: str | ||||||
| 
 | 
 | ||||||
|     # only set on the caller side |     # only set on the caller side | ||||||
|     _portal: Optional['Portal'] = None    # type: ignore # noqa |     _portal: Optional['Portal'] = None    # type: ignore # noqa | ||||||
|  |     _recv_chan: Optional[trio.MemoryReceiveChannel] = None | ||||||
|  |     _result: Optional[Any] = False | ||||||
|  |     _cancel_called: bool = False | ||||||
| 
 | 
 | ||||||
|     # only set on the callee side |     # only set on the callee side | ||||||
|     _cancel_scope: Optional[trio.CancelScope] = None |     _scope_nursery: Optional[trio.Nursery] = None | ||||||
| 
 | 
 | ||||||
|     async def send_yield(self, data: Any) -> None: |     async def send_yield(self, data: Any) -> None: | ||||||
| 
 | 
 | ||||||
|  | @ -50,15 +321,40 @@ class Context: | ||||||
|     async def send_stop(self) -> None: |     async def send_stop(self) -> None: | ||||||
|         await self.chan.send({'stop': True, 'cid': self.cid}) |         await self.chan.send({'stop': True, 'cid': self.cid}) | ||||||
| 
 | 
 | ||||||
|  |     def _error_from_remote_msg( | ||||||
|  |         self, | ||||||
|  |         msg: Dict[str, Any], | ||||||
|  | 
 | ||||||
|  |     ) -> None: | ||||||
|  |         '''Unpack and raise a msg error into the local scope | ||||||
|  |         nursery for this context. | ||||||
|  | 
 | ||||||
|  |         Acts as a form of "relay" for a remote error raised | ||||||
|  |         in the corresponding remote callee task. | ||||||
|  |         ''' | ||||||
|  |         async def raiser(): | ||||||
|  |             raise unpack_error(msg, self.chan) | ||||||
|  | 
 | ||||||
|  |         self._scope_nursery.start_soon(raiser) | ||||||
|  | 
 | ||||||
|     async def cancel(self) -> None: |     async def cancel(self) -> None: | ||||||
|         """Cancel this inter-actor-task context. |         '''Cancel this inter-actor-task context. | ||||||
| 
 | 
 | ||||||
|         Request that the far side cancel it's current linked context, |         Request that the far side cancel it's current linked context, | ||||||
|         timeout quickly to sidestep 2-generals... |         Timeout quickly in an attempt to sidestep 2-generals... | ||||||
| 
 | 
 | ||||||
|         """ |         ''' | ||||||
|         assert self._portal, ( |         side = 'caller' if self._portal else 'callee' | ||||||
|             "No portal found, this is likely a callee side context") | 
 | ||||||
|  |         log.warning(f'Cancelling {side} side of context to {self.chan}') | ||||||
|  | 
 | ||||||
|  |         self._cancel_called = True | ||||||
|  | 
 | ||||||
|  |         if side == 'caller': | ||||||
|  |             if not self._portal: | ||||||
|  |                 raise RuntimeError( | ||||||
|  |                     "No portal found, this is likely a callee side context" | ||||||
|  |                 ) | ||||||
| 
 | 
 | ||||||
|             cid = self.cid |             cid = self.cid | ||||||
|             with trio.move_on_after(0.5) as cs: |             with trio.move_on_after(0.5) as cs: | ||||||
|  | @ -76,27 +372,175 @@ class Context: | ||||||
|                 # XXX: there's no way to know if the remote task was indeed |                 # XXX: there's no way to know if the remote task was indeed | ||||||
|                 # cancelled in the case where the connection is broken or |                 # cancelled in the case where the connection is broken or | ||||||
|                 # some other network error occurred. |                 # some other network error occurred. | ||||||
|             if not self._portal.channel.connected(): |                 # if not self._portal.channel.connected(): | ||||||
|  |                 if not self.chan.connected(): | ||||||
|                     log.warning( |                     log.warning( | ||||||
|                         "May have failed to cancel remote task " |                         "May have failed to cancel remote task " | ||||||
|                         f"{cid} for {self._portal.channel.uid}") |                         f"{cid} for {self._portal.channel.uid}") | ||||||
|  |         else: | ||||||
|  |             # callee side remote task | ||||||
| 
 | 
 | ||||||
|  |             # TODO: should we have an explicit cancel message | ||||||
|  |             # or is relaying the local `trio.Cancelled` as an | ||||||
|  |             # {'error': trio.Cancelled, cid: "blah"} enough? | ||||||
|  |             # This probably gets into the discussion in | ||||||
|  |             # https://github.com/goodboy/tractor/issues/36 | ||||||
|  |             self._scope_nursery.cancel_scope.cancel() | ||||||
|  | 
 | ||||||
|  |         if self._recv_chan: | ||||||
|  |             await self._recv_chan.aclose() | ||||||
|  | 
 | ||||||
|  |     @asynccontextmanager | ||||||
|  |     async def open_stream( | ||||||
|  | 
 | ||||||
|  |         self, | ||||||
|  |         shield: bool = False, | ||||||
|  | 
 | ||||||
|  |     ) -> AsyncGenerator[MsgStream, None]: | ||||||
|  |         '''Open a ``MsgStream``, a bi-directional stream connected to the | ||||||
|  |         cross-actor (far end) task for this ``Context``. | ||||||
|  | 
 | ||||||
|  |         This context manager must be entered on both the caller and | ||||||
|  |         callee for the stream to logically be considered "connected". | ||||||
|  | 
 | ||||||
|  |         A ``MsgStream`` is currently "one-shot" use, meaning if you | ||||||
|  |         close it you can not "re-open" it for streaming and instead you | ||||||
|  |         must re-establish a new surrounding ``Context`` using | ||||||
|  |         ``Portal.open_context()``.  In the future this may change but | ||||||
|  |         currently there seems to be no obvious reason to support | ||||||
|  |         "re-opening": | ||||||
|  |             - pausing a stream can be done with a message. | ||||||
|  |             - task errors will normally require a restart of the entire | ||||||
|  |               scope of the inter-actor task context due to the nature of | ||||||
|  |               ``trio``'s cancellation system. | ||||||
|  | 
 | ||||||
|  |         ''' | ||||||
|  |         actor = current_actor() | ||||||
|  | 
 | ||||||
|  |         # here we create a mem chan that corresponds to the | ||||||
|  |         # far end caller / callee. | ||||||
|  | 
 | ||||||
|  |         # NOTE: in one way streaming this only happens on the | ||||||
|  |         # caller side inside `Actor.send_cmd()` so if you try | ||||||
|  |         # to send a stop from the caller to the callee in the | ||||||
|  |         # single-direction-stream case you'll get a lookup error | ||||||
|  |         # currently. | ||||||
|  |         _, recv_chan = actor.get_memchans( | ||||||
|  |             self.chan.uid, | ||||||
|  |             self.cid | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |         # Likewise if the surrounding context has been cancelled we error here | ||||||
|  |         # since it likely means the surrounding block was exited or | ||||||
|  |         # killed | ||||||
|  | 
 | ||||||
|  |         if self._cancel_called: | ||||||
|  |             task = trio.lowlevel.current_task().name | ||||||
|  |             raise ContextCancelled( | ||||||
|  |                 f'Context around {actor.uid[0]}:{task} was already cancelled!' | ||||||
|  |             ) | ||||||
|  | 
 | ||||||
|  |         # XXX: If the underlying channel feeder receive mem chan has | ||||||
|  |         # been closed then likely client code has already exited | ||||||
|  |         # a ``.open_stream()`` block prior or there was some other | ||||||
|  |         # unanticipated error or cancellation from ``trio``. | ||||||
|  | 
 | ||||||
|  |         if recv_chan._closed: | ||||||
|  |             raise trio.ClosedResourceError( | ||||||
|  |                 'The underlying channel for this stream was already closed!?') | ||||||
|  | 
 | ||||||
|  |         async with MsgStream( | ||||||
|  |             ctx=self, | ||||||
|  |             rx_chan=recv_chan, | ||||||
|  |             shield=shield, | ||||||
|  |         ) as rchan: | ||||||
|  | 
 | ||||||
|  |             if self._portal: | ||||||
|  |                 self._portal._streams.add(rchan) | ||||||
|  | 
 | ||||||
|  |             try: | ||||||
|  |                 # ensure we aren't cancelled before delivering | ||||||
|  |                 # the stream | ||||||
|  |                 # await trio.lowlevel.checkpoint() | ||||||
|  |                 yield rchan | ||||||
|  | 
 | ||||||
|  |             except trio.EndOfChannel: | ||||||
|  |                 # likely the far end sent us a 'stop' message to | ||||||
|  |                 # terminate the stream. | ||||||
|  |                 raise | ||||||
|  | 
 | ||||||
|  |             else: | ||||||
|  |                 # XXX: Make the stream "one-shot use".  On exit, signal | ||||||
|  |                 # ``trio.EndOfChannel``/``StopAsyncIteration`` to the | ||||||
|  |                 # far end. | ||||||
|  |                 await self.send_stop() | ||||||
|  | 
 | ||||||
|  |             finally: | ||||||
|  |                 if self._portal: | ||||||
|  |                     self._portal._streams.remove(rchan) | ||||||
|  | 
 | ||||||
|  |     async def result(self) -> Any: | ||||||
|  |         '''From a caller side, wait for and return the final result from | ||||||
|  |         the callee side task. | ||||||
|  | 
 | ||||||
|  |         ''' | ||||||
|  |         assert self._portal, "Context.result() can not be called from callee!" | ||||||
|  |         assert self._recv_chan | ||||||
|  | 
 | ||||||
|  |         if self._result is False: | ||||||
|  | 
 | ||||||
|  |             if not self._recv_chan._closed:  # type: ignore | ||||||
|  | 
 | ||||||
|  |                 # wait for a final context result consuming | ||||||
|  |                 # and discarding any bi dir stream msgs still | ||||||
|  |                 # in transit from the far end. | ||||||
|  |                 while True: | ||||||
|  | 
 | ||||||
|  |                     msg = await self._recv_chan.receive() | ||||||
|  |                     try: | ||||||
|  |                         self._result = msg['return'] | ||||||
|  |                         break | ||||||
|  |                     except KeyError: | ||||||
|  | 
 | ||||||
|  |                         if 'yield' in msg: | ||||||
|  |                             # far end task is still streaming to us.. | ||||||
|  |                             log.warning(f'Remote stream deliverd {msg}') | ||||||
|  |                             # do disard | ||||||
|  |                             continue | ||||||
|  | 
 | ||||||
|  |                         elif 'stop' in msg: | ||||||
|  |                             log.debug('Remote stream terminated') | ||||||
|  |                             continue | ||||||
|  | 
 | ||||||
|  |                         # internal error should never get here | ||||||
|  |                         assert msg.get('cid'), ( | ||||||
|  |                             "Received internal error at portal?") | ||||||
|  |                         raise unpack_error(msg, self._portal.channel) | ||||||
|  | 
 | ||||||
|  |         return self._result | ||||||
|  | 
 | ||||||
|  |     async def started(self, value: Optional[Any] = None) -> None: | ||||||
|  | 
 | ||||||
|  |         if self._portal: | ||||||
|  |             raise RuntimeError( | ||||||
|  |                 f"Caller side context {self} can not call started!") | ||||||
|  | 
 | ||||||
|  |         await self.chan.send({'started': value, 'cid': self.cid}) | ||||||
|  | 
 | ||||||
|  |     # TODO: do we need a restart api? | ||||||
|     # async def restart(self) -> None: |     # async def restart(self) -> None: | ||||||
|     #     # TODO |  | ||||||
|     #     pass |  | ||||||
| 
 |  | ||||||
|     # @asynccontextmanager |  | ||||||
|     # async def open_stream( |  | ||||||
|     #     self, |  | ||||||
|     # ) -> AsyncContextManager: |  | ||||||
|     #     # TODO |  | ||||||
|     #     pass |     #     pass | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def stream(func): | def stream(func: Callable) -> Callable: | ||||||
|     """Mark an async function as a streaming routine with ``@stream``. |     """Mark an async function as a streaming routine with ``@stream``. | ||||||
|  | 
 | ||||||
|     """ |     """ | ||||||
|     func._tractor_stream_function = True |     # annotate | ||||||
|  |     # TODO: apply whatever solution ``mypy`` ends up picking for this: | ||||||
|  |     # https://github.com/python/mypy/issues/2087#issuecomment-769266912 | ||||||
|  |     func._tractor_stream_function = True  # type: ignore | ||||||
|  | 
 | ||||||
|     sig = inspect.signature(func) |     sig = inspect.signature(func) | ||||||
|     params = sig.parameters |     params = sig.parameters | ||||||
|     if 'stream' not in params and 'ctx' in params: |     if 'stream' not in params and 'ctx' in params: | ||||||
|  | @ -115,146 +559,25 @@ def stream(func): | ||||||
|         raise TypeError( |         raise TypeError( | ||||||
|             "The first argument to the stream function " |             "The first argument to the stream function " | ||||||
|             f"{func.__name__} must be `ctx: tractor.Context` " |             f"{func.__name__} must be `ctx: tractor.Context` " | ||||||
|  |             "(Or ``to_trio`` if using ``asyncio`` in guest mode)." | ||||||
|         ) |         ) | ||||||
|     return func |     return func | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| class ReceiveMsgStream(trio.abc.ReceiveChannel): | def context(func: Callable) -> Callable: | ||||||
|     """A wrapper around a ``trio._channel.MemoryReceiveChannel`` with |     """Mark an async function as a streaming routine with ``@context``. | ||||||
|     special behaviour for signalling stream termination across an |  | ||||||
|     inter-actor ``Channel``. This is the type returned to a local task |  | ||||||
|     which invoked a remote streaming function using `Portal.run()`. |  | ||||||
| 
 |  | ||||||
|     Termination rules: |  | ||||||
|     - if the local task signals stop iteration a cancel signal is |  | ||||||
|       relayed to the remote task indicating to stop streaming |  | ||||||
|     - if the remote task signals the end of a stream, raise a |  | ||||||
|       ``StopAsyncIteration`` to terminate the local ``async for`` |  | ||||||
| 
 | 
 | ||||||
|     """ |     """ | ||||||
|     def __init__( |     # annotate | ||||||
|         self, |     # TODO: apply whatever solution ``mypy`` ends up picking for this: | ||||||
|         ctx: Context, |     # https://github.com/python/mypy/issues/2087#issuecomment-769266912 | ||||||
|         rx_chan: trio.abc.ReceiveChannel, |     func._tractor_context_function = True  # type: ignore | ||||||
|         portal: 'Portal',  # type: ignore # noqa |  | ||||||
|     ) -> None: |  | ||||||
|         self._ctx = ctx |  | ||||||
|         self._rx_chan = rx_chan |  | ||||||
|         self._portal = portal |  | ||||||
|         self._shielded = False |  | ||||||
| 
 | 
 | ||||||
|     # delegate directly to underlying mem channel |     sig = inspect.signature(func) | ||||||
|     def receive_nowait(self): |     params = sig.parameters | ||||||
|         return self._rx_chan.receive_nowait() |     if 'ctx' not in params: | ||||||
| 
 |         raise TypeError( | ||||||
|     async def receive(self): |             "The first argument to the context function " | ||||||
|         try: |             f"{func.__name__} must be `ctx: tractor.Context`" | ||||||
|             msg = await self._rx_chan.receive() |         ) | ||||||
|             return msg['yield'] |     return func | ||||||
| 
 |  | ||||||
|         except KeyError: |  | ||||||
|             # internal error should never get here |  | ||||||
|             assert msg.get('cid'), ("Received internal error at portal?") |  | ||||||
| 
 |  | ||||||
|             # TODO: handle 2 cases with 3.10 match syntax |  | ||||||
|             # - 'stop' |  | ||||||
|             # - 'error' |  | ||||||
|             # possibly just handle msg['stop'] here! |  | ||||||
| 
 |  | ||||||
|             # TODO: test that shows stream raising an expected error!!! |  | ||||||
|             if msg.get('error'): |  | ||||||
|                 # raise the error message |  | ||||||
|                 raise unpack_error(msg, self._portal.channel) |  | ||||||
| 
 |  | ||||||
|         except (trio.ClosedResourceError, StopAsyncIteration): |  | ||||||
|             # XXX: this indicates that a `stop` message was |  | ||||||
|             # sent by the far side of the underlying channel. |  | ||||||
|             # Currently this is triggered by calling ``.aclose()`` on |  | ||||||
|             # the send side of the channel inside |  | ||||||
|             # ``Actor._push_result()``, but maybe it should be put here? |  | ||||||
|             # to avoid exposing the internal mem chan closing mechanism? |  | ||||||
|             # in theory we could instead do some flushing of the channel |  | ||||||
|             # if needed to ensure all consumers are complete before |  | ||||||
|             # triggering closure too early? |  | ||||||
| 
 |  | ||||||
|             # Locally, we want to close this stream gracefully, by |  | ||||||
|             # terminating any local consumers tasks deterministically. |  | ||||||
|             # We **don't** want to be closing this send channel and not |  | ||||||
|             # relaying a final value to remaining consumers who may not |  | ||||||
|             # have been scheduled to receive it yet? |  | ||||||
| 
 |  | ||||||
|             # lots of testing to do here |  | ||||||
| 
 |  | ||||||
|             # when the send is closed we assume the stream has |  | ||||||
|             # terminated and signal this local iterator to stop |  | ||||||
|             await self.aclose() |  | ||||||
|             raise StopAsyncIteration |  | ||||||
| 
 |  | ||||||
|         except trio.Cancelled: |  | ||||||
|             # relay cancels to the remote task |  | ||||||
|             await self.aclose() |  | ||||||
|             raise |  | ||||||
| 
 |  | ||||||
|     @contextmanager |  | ||||||
|     def shield( |  | ||||||
|         self |  | ||||||
|     ) -> Iterator['ReceiveMsgStream']:  # noqa |  | ||||||
|         """Shield this stream's underlying channel such that a local consumer task |  | ||||||
|         can be cancelled (and possibly restarted) using ``trio.Cancelled``. |  | ||||||
| 
 |  | ||||||
|         """ |  | ||||||
|         self._shielded = True |  | ||||||
|         yield self |  | ||||||
|         self._shielded = False |  | ||||||
| 
 |  | ||||||
|     async def aclose(self): |  | ||||||
|         """Cancel associated remote actor task and local memory channel |  | ||||||
|         on close. |  | ||||||
|         """ |  | ||||||
|         rx_chan = self._rx_chan |  | ||||||
| 
 |  | ||||||
|         if rx_chan._closed: |  | ||||||
|             log.warning(f"{self} is already closed") |  | ||||||
|             return |  | ||||||
| 
 |  | ||||||
|         # stats = rx_chan.statistics() |  | ||||||
|         # if stats.open_receive_channels > 1: |  | ||||||
|         #     # if we've been cloned don't kill the stream |  | ||||||
|         #     log.debug( |  | ||||||
|         #       "there are still consumers running keeping stream alive") |  | ||||||
|         #     return |  | ||||||
| 
 |  | ||||||
|         if self._shielded: |  | ||||||
|             log.warning(f"{self} is shielded, portal channel being kept alive") |  | ||||||
|             return |  | ||||||
| 
 |  | ||||||
|         # close the local mem chan |  | ||||||
|         rx_chan.close() |  | ||||||
| 
 |  | ||||||
|         # cancel surrounding IPC context |  | ||||||
|         await self._ctx.cancel() |  | ||||||
| 
 |  | ||||||
|     # TODO: but make it broadcasting to consumers |  | ||||||
|     # def clone(self): |  | ||||||
|     #     """Clone this receive channel allowing for multi-task |  | ||||||
|     #     consumption from the same channel. |  | ||||||
| 
 |  | ||||||
|     #     """ |  | ||||||
|     #     return ReceiveStream( |  | ||||||
|     #         self._cid, |  | ||||||
|     #         self._rx_chan.clone(), |  | ||||||
|     #         self._portal, |  | ||||||
|     #     ) |  | ||||||
| 
 |  | ||||||
| 
 |  | ||||||
| # class MsgStream(ReceiveMsgStream, trio.abc.Channel): |  | ||||||
| #     """ |  | ||||||
| #     Bidirectional message stream for use within an inter-actor actor |  | ||||||
| #     ``Context```. |  | ||||||
| 
 |  | ||||||
| #     """ |  | ||||||
| #     async def send( |  | ||||||
| #         self, |  | ||||||
| #         data: Any |  | ||||||
| #     ) -> None: |  | ||||||
| #         await self._ctx.chan.send({'yield': data, 'cid': self._ctx.cid}) |  | ||||||
|  |  | ||||||
|  | @ -11,7 +11,8 @@ import warnings | ||||||
| import trio | import trio | ||||||
| from async_generator import asynccontextmanager | from async_generator import asynccontextmanager | ||||||
| 
 | 
 | ||||||
| from ._state import current_actor, is_main_process | from . import _debug | ||||||
|  | from ._state import current_actor, is_main_process, is_root_process | ||||||
| from .log import get_logger, get_loglevel | from .log import get_logger, get_loglevel | ||||||
| from ._actor import Actor | from ._actor import Actor | ||||||
| from ._portal import Portal | from ._portal import Portal | ||||||
|  | @ -169,16 +170,25 @@ class ActorNursery: | ||||||
| 
 | 
 | ||||||
|         log.warning(f"Cancelling nursery in {self._actor.uid}") |         log.warning(f"Cancelling nursery in {self._actor.uid}") | ||||||
|         with trio.move_on_after(3) as cs: |         with trio.move_on_after(3) as cs: | ||||||
|  | 
 | ||||||
|             async with trio.open_nursery() as nursery: |             async with trio.open_nursery() as nursery: | ||||||
|  | 
 | ||||||
|                 for subactor, proc, portal in self._children.values(): |                 for subactor, proc, portal in self._children.values(): | ||||||
|  | 
 | ||||||
|  |                     # TODO: are we ever even going to use this or | ||||||
|  |                     # is the spawning backend responsible for such | ||||||
|  |                     # things? I'm thinking latter. | ||||||
|                     if hard_kill: |                     if hard_kill: | ||||||
|                         proc.terminate() |                         proc.terminate() | ||||||
|  | 
 | ||||||
|                     else: |                     else: | ||||||
|                         if portal is None:  # actor hasn't fully spawned yet |                         if portal is None:  # actor hasn't fully spawned yet | ||||||
|                             event = self._actor._peer_connected[subactor.uid] |                             event = self._actor._peer_connected[subactor.uid] | ||||||
|                             log.warning( |                             log.warning( | ||||||
|                                 f"{subactor.uid} wasn't finished spawning?") |                                 f"{subactor.uid} wasn't finished spawning?") | ||||||
|  | 
 | ||||||
|                             await event.wait() |                             await event.wait() | ||||||
|  | 
 | ||||||
|                             # channel/portal should now be up |                             # channel/portal should now be up | ||||||
|                             _, _, portal = self._children[subactor.uid] |                             _, _, portal = self._children[subactor.uid] | ||||||
| 
 | 
 | ||||||
|  | @ -238,6 +248,7 @@ async def _open_and_supervise_one_cancels_all_nursery( | ||||||
|             # As such if the strategy propagates any error(s) upwards |             # As such if the strategy propagates any error(s) upwards | ||||||
|             # the above "daemon actor" nursery will be notified. |             # the above "daemon actor" nursery will be notified. | ||||||
|             async with trio.open_nursery() as ria_nursery: |             async with trio.open_nursery() as ria_nursery: | ||||||
|  | 
 | ||||||
|                 anursery = ActorNursery( |                 anursery = ActorNursery( | ||||||
|                     actor, |                     actor, | ||||||
|                     ria_nursery, |                     ria_nursery, | ||||||
|  | @ -248,15 +259,53 @@ async def _open_and_supervise_one_cancels_all_nursery( | ||||||
|                     # spawning of actors happens in the caller's scope |                     # spawning of actors happens in the caller's scope | ||||||
|                     # after we yield upwards |                     # after we yield upwards | ||||||
|                     yield anursery |                     yield anursery | ||||||
|                     log.debug( | 
 | ||||||
|  |                     log.runtime( | ||||||
|                         f"Waiting on subactors {anursery._children} " |                         f"Waiting on subactors {anursery._children} " | ||||||
|                         "to complete" |                         "to complete" | ||||||
|                     ) |                     ) | ||||||
|  | 
 | ||||||
|  |                     # Last bit before first nursery block ends in the case | ||||||
|  |                     # where we didn't error in the caller's scope | ||||||
|  | 
 | ||||||
|  |                     # signal all process monitor tasks to conduct | ||||||
|  |                     # hard join phase. | ||||||
|  |                     anursery._join_procs.set() | ||||||
|  | 
 | ||||||
|                 except BaseException as err: |                 except BaseException as err: | ||||||
|  | 
 | ||||||
|  |                     # If we error in the root but the debugger is | ||||||
|  |                     # engaged we don't want to prematurely kill (and | ||||||
|  |                     # thus clobber access to) the local tty since it | ||||||
|  |                     # will make the pdb repl unusable. | ||||||
|  |                     # Instead try to wait for pdb to be released before | ||||||
|  |                     # tearing down. | ||||||
|  |                     if is_root_process(): | ||||||
|  |                         log.exception(f"we're root with {err}") | ||||||
|  | 
 | ||||||
|  |                         # wait to see if a sub-actor task | ||||||
|  |                         # will be scheduled and grab the tty | ||||||
|  |                         # lock on the next tick | ||||||
|  |                         # await trio.testing.wait_all_tasks_blocked() | ||||||
|  | 
 | ||||||
|  |                         debug_complete = _debug._no_remote_has_tty | ||||||
|  |                         if ( | ||||||
|  |                             debug_complete and | ||||||
|  |                             not debug_complete.is_set() | ||||||
|  |                         ): | ||||||
|  |                             log.warning( | ||||||
|  |                                 'Root has errored but pdb is in use by ' | ||||||
|  |                                 f'child {_debug._global_actor_in_debug}\n' | ||||||
|  |                                 'Waiting on tty lock to release..') | ||||||
|  | 
 | ||||||
|  |                             with trio.CancelScope(shield=True): | ||||||
|  |                                 await debug_complete.wait() | ||||||
|  | 
 | ||||||
|                     # if the caller's scope errored then we activate our |                     # if the caller's scope errored then we activate our | ||||||
|                     # one-cancels-all supervisor strategy (don't |                     # one-cancels-all supervisor strategy (don't | ||||||
|                     # worry more are coming). |                     # worry more are coming). | ||||||
|                     anursery._join_procs.set() |                     anursery._join_procs.set() | ||||||
|  | 
 | ||||||
|                     try: |                     try: | ||||||
|                         # XXX: hypothetically an error could be |                         # XXX: hypothetically an error could be | ||||||
|                         # raised and then a cancel signal shows up |                         # raised and then a cancel signal shows up | ||||||
|  | @ -292,15 +341,18 @@ async def _open_and_supervise_one_cancels_all_nursery( | ||||||
|                     else: |                     else: | ||||||
|                         raise |                         raise | ||||||
| 
 | 
 | ||||||
|                 # Last bit before first nursery block ends in the case |  | ||||||
|                 # where we didn't error in the caller's scope |  | ||||||
|                 log.debug("Waiting on all subactors to complete") |  | ||||||
|                 anursery._join_procs.set() |  | ||||||
| 
 |  | ||||||
|                 # ria_nursery scope end |                 # ria_nursery scope end | ||||||
| 
 | 
 | ||||||
|         # XXX: do we need a `trio.Cancelled` catch here as well? |         # XXX: do we need a `trio.Cancelled` catch here as well? | ||||||
|         except (Exception, trio.MultiError, trio.Cancelled) as err: |         # this is the catch around the ``.run_in_actor()`` nursery | ||||||
|  |         except ( | ||||||
|  | 
 | ||||||
|  |             Exception, | ||||||
|  |             trio.MultiError, | ||||||
|  |             trio.Cancelled | ||||||
|  | 
 | ||||||
|  |         ) as err: | ||||||
|  | 
 | ||||||
|             # If actor-local error was raised while waiting on |             # If actor-local error was raised while waiting on | ||||||
|             # ".run_in_actor()" actors then we also want to cancel all |             # ".run_in_actor()" actors then we also want to cancel all | ||||||
|             # remaining sub-actors (due to our lone strategy: |             # remaining sub-actors (due to our lone strategy: | ||||||
|  | @ -357,7 +409,8 @@ async def open_nursery( | ||||||
|     try: |     try: | ||||||
|         if actor is None and is_main_process(): |         if actor is None and is_main_process(): | ||||||
| 
 | 
 | ||||||
|             # if we are the parent process start the actor runtime implicitly |             # if we are the parent process start the | ||||||
|  |             # actor runtime implicitly | ||||||
|             log.info("Starting actor runtime!") |             log.info("Starting actor runtime!") | ||||||
| 
 | 
 | ||||||
|             # mark us for teardown on exit |             # mark us for teardown on exit | ||||||
|  | @ -366,6 +419,7 @@ async def open_nursery( | ||||||
|             async with open_root_actor(**kwargs) as actor: |             async with open_root_actor(**kwargs) as actor: | ||||||
|                 assert actor is current_actor() |                 assert actor is current_actor() | ||||||
| 
 | 
 | ||||||
|  |                 # try: | ||||||
|                 async with _open_and_supervise_one_cancels_all_nursery( |                 async with _open_and_supervise_one_cancels_all_nursery( | ||||||
|                     actor |                     actor | ||||||
|                 ) as anursery: |                 ) as anursery: | ||||||
|  | @ -376,7 +430,6 @@ async def open_nursery( | ||||||
|             async with _open_and_supervise_one_cancels_all_nursery( |             async with _open_and_supervise_one_cancels_all_nursery( | ||||||
|                 actor |                 actor | ||||||
|             ) as anursery: |             ) as anursery: | ||||||
| 
 |  | ||||||
|                 yield anursery |                 yield anursery | ||||||
| 
 | 
 | ||||||
|     finally: |     finally: | ||||||
|  |  | ||||||
|  | @ -29,19 +29,20 @@ LOG_FORMAT = ( | ||||||
| DATE_FORMAT = '%b %d %H:%M:%S' | DATE_FORMAT = '%b %d %H:%M:%S' | ||||||
| LEVELS = { | LEVELS = { | ||||||
|     'GARBAGE': 1, |     'GARBAGE': 1, | ||||||
|     'TRACE': 5, |     'TRANSPORT': 5, | ||||||
|     'PROFILE': 15, |     'RUNTIME': 15, | ||||||
|     'RUNTIME': 500, |     'PDB': 500, | ||||||
|     'QUIET': 1000, |     'QUIET': 1000, | ||||||
| } | } | ||||||
| STD_PALETTE = { | STD_PALETTE = { | ||||||
|     'CRITICAL': 'red', |     'CRITICAL': 'red', | ||||||
|     'ERROR': 'red', |     'ERROR': 'red', | ||||||
|     'RUNTIME': 'white', |     'PDB': 'white', | ||||||
|     'WARNING': 'yellow', |     'WARNING': 'yellow', | ||||||
|     'INFO': 'green', |     'INFO': 'green', | ||||||
|  |     'RUNTIME': 'white', | ||||||
|     'DEBUG': 'white', |     'DEBUG': 'white', | ||||||
|     'TRACE': 'cyan', |     'TRANSPORT': 'cyan', | ||||||
|     'GARBAGE': 'blue', |     'GARBAGE': 'blue', | ||||||
| } | } | ||||||
| BOLD_PALETTE = { | BOLD_PALETTE = { | ||||||
|  | @ -76,7 +77,7 @@ def get_logger( | ||||||
|     # additional levels |     # additional levels | ||||||
|     for name, val in LEVELS.items(): |     for name, val in LEVELS.items(): | ||||||
|         logging.addLevelName(val, name) |         logging.addLevelName(val, name) | ||||||
|         # ex. create ``logger.trace()`` |         # ex. create ``logger.runtime()`` | ||||||
|         setattr(logger, name.lower(), partial(logger.log, val)) |         setattr(logger, name.lower(), partial(logger.log, val)) | ||||||
| 
 | 
 | ||||||
|     return logger |     return logger | ||||||
|  |  | ||||||
|  | @ -1,9 +1,13 @@ | ||||||
| """ | """ | ||||||
| Messaging pattern APIs and helpers. | Messaging pattern APIs and helpers. | ||||||
|  | 
 | ||||||
|  | NOTE: this module is likely deprecated by the new bi-directional streaming | ||||||
|  | support provided by ``tractor.Context.open_stream()`` and friends. | ||||||
|  | 
 | ||||||
| """ | """ | ||||||
| import inspect | import inspect | ||||||
| import typing | import typing | ||||||
| from typing import Dict, Any, Set, Callable | from typing import Dict, Any, Set, Callable, List, Tuple | ||||||
| from functools import partial | from functools import partial | ||||||
| from async_generator import aclosing | from async_generator import aclosing | ||||||
| 
 | 
 | ||||||
|  | @ -20,7 +24,7 @@ log = get_logger('messaging') | ||||||
| 
 | 
 | ||||||
| async def fan_out_to_ctxs( | async def fan_out_to_ctxs( | ||||||
|     pub_async_gen_func: typing.Callable,  # it's an async gen ... gd mypy |     pub_async_gen_func: typing.Callable,  # it's an async gen ... gd mypy | ||||||
|     topics2ctxs: Dict[str, set], |     topics2ctxs: Dict[str, list], | ||||||
|     packetizer: typing.Callable = None, |     packetizer: typing.Callable = None, | ||||||
| ) -> None: | ) -> None: | ||||||
|     """Request and fan out quotes to each subscribed actor channel. |     """Request and fan out quotes to each subscribed actor channel. | ||||||
|  | @ -34,24 +38,27 @@ async def fan_out_to_ctxs( | ||||||
| 
 | 
 | ||||||
|         async for published in pub_gen: |         async for published in pub_gen: | ||||||
| 
 | 
 | ||||||
|             ctx_payloads: Dict[str, Any] = {} |             ctx_payloads: List[Tuple[Context, Any]] = [] | ||||||
| 
 | 
 | ||||||
|             for topic, data in published.items(): |             for topic, data in published.items(): | ||||||
|                 log.debug(f"publishing {topic, data}") |                 log.debug(f"publishing {topic, data}") | ||||||
|  | 
 | ||||||
|                 # build a new dict packet or invoke provided packetizer |                 # build a new dict packet or invoke provided packetizer | ||||||
|                 if packetizer is None: |                 if packetizer is None: | ||||||
|                     packet = {topic: data} |                     packet = {topic: data} | ||||||
|  | 
 | ||||||
|                 else: |                 else: | ||||||
|                     packet = packetizer(topic, data) |                     packet = packetizer(topic, data) | ||||||
|                 for ctx in topics2ctxs.get(topic, set()): | 
 | ||||||
|                     ctx_payloads.setdefault(ctx, {}).update(packet), |                 for ctx in topics2ctxs.get(topic, list()): | ||||||
|  |                     ctx_payloads.append((ctx, packet)) | ||||||
| 
 | 
 | ||||||
|             if not ctx_payloads: |             if not ctx_payloads: | ||||||
|                 log.debug(f"Unconsumed values:\n{published}") |                 log.debug(f"Unconsumed values:\n{published}") | ||||||
| 
 | 
 | ||||||
|             # deliver to each subscriber (fan out) |             # deliver to each subscriber (fan out) | ||||||
|             if ctx_payloads: |             if ctx_payloads: | ||||||
|                 for ctx, payload in ctx_payloads.items(): |                 for ctx, payload in ctx_payloads: | ||||||
|                     try: |                     try: | ||||||
|                         await ctx.send_yield(payload) |                         await ctx.send_yield(payload) | ||||||
|                     except ( |                     except ( | ||||||
|  | @ -60,15 +67,24 @@ async def fan_out_to_ctxs( | ||||||
|                         ConnectionRefusedError, |                         ConnectionRefusedError, | ||||||
|                     ): |                     ): | ||||||
|                         log.warning(f"{ctx.chan} went down?") |                         log.warning(f"{ctx.chan} went down?") | ||||||
|                         for ctx_set in topics2ctxs.values(): |                         for ctx_list in topics2ctxs.values(): | ||||||
|                             ctx_set.discard(ctx) |                             try: | ||||||
|  |                                 ctx_list.remove(ctx) | ||||||
|  |                             except ValueError: | ||||||
|  |                                 continue | ||||||
| 
 | 
 | ||||||
|             if not get_topics(): |             if not get_topics(): | ||||||
|                 log.warning(f"No subscribers left for {pub_gen}") |                 log.warning(f"No subscribers left for {pub_gen}") | ||||||
|                 break |                 break | ||||||
| 
 | 
 | ||||||
| 
 | 
 | ||||||
| def modify_subs(topics2ctxs, topics, ctx): | def modify_subs( | ||||||
|  | 
 | ||||||
|  |     topics2ctxs: Dict[str, List[Context]], | ||||||
|  |     topics: Set[str], | ||||||
|  |     ctx: Context, | ||||||
|  | 
 | ||||||
|  | ) -> None: | ||||||
|     """Absolute symbol subscription list for each quote stream. |     """Absolute symbol subscription list for each quote stream. | ||||||
| 
 | 
 | ||||||
|     Effectively a symbol subscription api. |     Effectively a symbol subscription api. | ||||||
|  | @ -77,7 +93,7 @@ def modify_subs(topics2ctxs, topics, ctx): | ||||||
| 
 | 
 | ||||||
|     # update map from each symbol to requesting client's chan |     # update map from each symbol to requesting client's chan | ||||||
|     for topic in topics: |     for topic in topics: | ||||||
|         topics2ctxs.setdefault(topic, set()).add(ctx) |         topics2ctxs.setdefault(topic, list()).append(ctx) | ||||||
| 
 | 
 | ||||||
|     # remove any existing symbol subscriptions if symbol is not |     # remove any existing symbol subscriptions if symbol is not | ||||||
|     # found in ``symbols`` |     # found in ``symbols`` | ||||||
|  | @ -85,10 +101,14 @@ def modify_subs(topics2ctxs, topics, ctx): | ||||||
|     for topic in filter( |     for topic in filter( | ||||||
|         lambda topic: topic not in topics, topics2ctxs.copy() |         lambda topic: topic not in topics, topics2ctxs.copy() | ||||||
|     ): |     ): | ||||||
|         ctx_set = topics2ctxs.get(topic) |         ctx_list = topics2ctxs.get(topic) | ||||||
|         ctx_set.discard(ctx) |         if ctx_list: | ||||||
|  |             try: | ||||||
|  |                 ctx_list.remove(ctx) | ||||||
|  |             except ValueError: | ||||||
|  |                 pass | ||||||
| 
 | 
 | ||||||
|         if not ctx_set: |         if not ctx_list: | ||||||
|             # pop empty sets which will trigger bg quoter task termination |             # pop empty sets which will trigger bg quoter task termination | ||||||
|             topics2ctxs.pop(topic) |             topics2ctxs.pop(topic) | ||||||
| 
 | 
 | ||||||
|  | @ -256,7 +276,7 @@ def pub( | ||||||
|                             respawn = True |                             respawn = True | ||||||
|             finally: |             finally: | ||||||
|                 # remove all subs for this context |                 # remove all subs for this context | ||||||
|                 modify_subs(topics2ctxs, (), ctx) |                 modify_subs(topics2ctxs, set(), ctx) | ||||||
| 
 | 
 | ||||||
|                 # if there are truly no more subscriptions with this broker |                 # if there are truly no more subscriptions with this broker | ||||||
|                 # drop from broker subs dict |                 # drop from broker subs dict | ||||||
|  |  | ||||||
|  | @ -78,7 +78,7 @@ def tractor_test(fn): | ||||||
| 
 | 
 | ||||||
|         else: |         else: | ||||||
|             # use implicit root actor start |             # use implicit root actor start | ||||||
|             main = partial(fn, *args, **kwargs), |             main = partial(fn, *args, **kwargs) | ||||||
| 
 | 
 | ||||||
|         return trio.run(main) |         return trio.run(main) | ||||||
|             # arbiter_addr=arb_addr, |             # arbiter_addr=arb_addr, | ||||||
|  |  | ||||||
		Loading…
	
		Reference in New Issue