Use the actor's service nursery instead

In order to ensure the lifetime of the feed can in fact be kept open
until the last consumer task has completed we need to maintain
a lifetime which is hierarchically greater then all consumer tasks.

This solution is somewhat hacky but seems to work well: we just use the
`tractor` actor's "service nursery" (the one normally used to invoke rpc
tasks) to launch the task which will start and keep open the target
cached async context manager. To make this more "proper" we may want to
offer a "root nursery" in all piker actors that is exposed through some
singleton api or even introduce a public api for it into `tractor`
directly.
pause_feeds_on_sym_switch
Tyler Goodlet 2021-08-31 12:46:47 -04:00
parent 1184a4d88e
commit c3682348fe
1 changed files with 40 additions and 33 deletions

View File

@ -36,6 +36,7 @@ from contextlib import (
import trio import trio
from trio_typing import TaskStatus from trio_typing import TaskStatus
import tractor
from tractor._portal import maybe_open_nursery from tractor._portal import maybe_open_nursery
from .brokers import get_brokermod from .brokers import get_brokermod
@ -180,9 +181,6 @@ async def maybe_open_ctx(
ctx_key = id(mngr) ctx_key = id(mngr)
# TODO: does this need to be a tractor "root nursery"?
async with maybe_open_nursery(cache.nurseries.get(ctx_key)) as n:
cache.nurseries[ctx_key] = n
value = None value = None
try: try:
@ -201,7 +199,16 @@ async def maybe_open_ctx(
# checking the cache until complete otherwise the scheduler # checking the cache until complete otherwise the scheduler
# may switch and by accident we create more then one feed. # may switch and by accident we create more then one feed.
value = await n.start(cache.run_ctx, mngr, key) # TODO: vaoid pulling from ``tractor`` internals and
# instead offer a "root nursery" in
service_n = tractor.current_actor()._service_n
# TODO: does this need to be a tractor "root nursery"?
ln = cache.nurseries.get(ctx_key)
assert not ln
ln = cache.nurseries[ctx_key] = service_n
value = await ln.start(cache.run_ctx, mngr, key)
cache.users += 1 cache.users += 1
cache.lock.release() cache.lock.release()