Establish stream before `fsp_compute` so that backfill updates work again..
parent
25891c6e51
commit
08d7f925b9
|
@ -76,7 +76,6 @@ async def filter_quotes_by_sym(
|
|||
|
||||
async def fsp_compute(
|
||||
|
||||
ctx: tractor.Context,
|
||||
symbol: Symbol,
|
||||
feed: Feed,
|
||||
quote_stream: trio.abc.ReceiveChannel,
|
||||
|
@ -86,7 +85,7 @@ async def fsp_compute(
|
|||
|
||||
func: Callable,
|
||||
|
||||
attach_stream: bool = False,
|
||||
# attach_stream: bool = False,
|
||||
task_status: TaskStatus[None] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
|
@ -193,27 +192,22 @@ async def fsp_compute(
|
|||
profiler(f'{func_name} pushed history')
|
||||
profiler.finish()
|
||||
|
||||
# TODO: UGH, what is the right way to do something like this?
|
||||
if not ctx._started_called:
|
||||
await ctx.started(index)
|
||||
|
||||
# setup a respawn handle
|
||||
with trio.CancelScope() as cs:
|
||||
|
||||
# TODO: might be better to just make a "restart" method where
|
||||
# the target task is spawned implicitly and then the event is
|
||||
# set via some higher level api? At that poing we might as well
|
||||
# be writing a one-cancels-one nursery though right?
|
||||
tracker = TaskTracker(trio.Event(), cs)
|
||||
task_status.started((tracker, index))
|
||||
|
||||
profiler(f'{func_name} yield last index')
|
||||
|
||||
# import time
|
||||
# last = time.time()
|
||||
|
||||
try:
|
||||
# rt stream
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
# always trigger UI refresh after history update,
|
||||
# see ``piker.ui._fsp.FspAdmin.open_chain()`` and
|
||||
# ``piker.ui._display.trigger_update()``.
|
||||
await stream.send('update')
|
||||
|
||||
async for processed in out_stream:
|
||||
|
||||
|
@ -225,8 +219,14 @@ async def fsp_compute(
|
|||
# NOTE: for now we aren't streaming this to the consumer
|
||||
# stream latest array index entry which basically just acts
|
||||
# as trigger msg to tell the consumer to read from shm
|
||||
if attach_stream:
|
||||
await stream.send(index)
|
||||
# TODO: further this should likely be implemented much
|
||||
# like our `Feed` api where there is one background
|
||||
# "service" task which computes output and then sends to
|
||||
# N-consumers who subscribe for the real-time output,
|
||||
# which we'll likely want to implement using local-mem
|
||||
# chans for the fan out?
|
||||
# if attach_stream:
|
||||
# await client_stream.send(index)
|
||||
|
||||
# period = time.time() - last
|
||||
# hz = 1/period if period else float('nan')
|
||||
|
@ -323,7 +323,6 @@ async def cascade(
|
|||
fsp_target = partial(
|
||||
|
||||
fsp_compute,
|
||||
ctx=ctx,
|
||||
symbol=symbol,
|
||||
feed=feed,
|
||||
quote_stream=quote_stream,
|
||||
|
@ -332,7 +331,7 @@ async def cascade(
|
|||
src=src,
|
||||
dst=dst,
|
||||
|
||||
# func_name=func_name,
|
||||
# target
|
||||
func=func
|
||||
)
|
||||
|
||||
|
@ -344,13 +343,34 @@ async def cascade(
|
|||
|
||||
profiler(f'{func_name}: fsp up')
|
||||
|
||||
async def resync(tracker: TaskTracker) -> tuple[TaskTracker, int]:
|
||||
# sync client
|
||||
await ctx.started(index)
|
||||
|
||||
# XXX: rt stream with client which we MUST
|
||||
# open here (and keep it open) in order to make
|
||||
# incremental "updates" as history prepends take
|
||||
# place.
|
||||
async with ctx.open_stream() as client_stream:
|
||||
|
||||
# TODO: these likely should all become
|
||||
# methods of this ``TaskLifetime`` or wtv
|
||||
# abstraction..
|
||||
async def resync(
|
||||
tracker: TaskTracker,
|
||||
|
||||
) -> tuple[TaskTracker, int]:
|
||||
# TODO: adopt an incremental update engine/approach
|
||||
# where possible here eventually!
|
||||
log.warning(f're-syncing fsp {func_name} to source')
|
||||
tracker.cs.cancel()
|
||||
await tracker.complete.wait()
|
||||
return await n.start(fsp_target)
|
||||
tracker, index = await n.start(fsp_target)
|
||||
|
||||
# always trigger UI refresh after history update,
|
||||
# see ``piker.ui._fsp.FspAdmin.open_chain()`` and
|
||||
# ``piker.ui._display.trigger_update()``.
|
||||
await client_stream.send('update')
|
||||
return tracker, index
|
||||
|
||||
def is_synced(
|
||||
src: ShmArray,
|
||||
|
@ -397,7 +417,9 @@ async def cascade(
|
|||
|
||||
# Increment the underlying shared memory buffer on every
|
||||
# "increment" msg received from the underlying data feed.
|
||||
async with feed.index_stream(int(delay_s)) as istream:
|
||||
async with feed.index_stream(
|
||||
int(delay_s)
|
||||
) as istream:
|
||||
|
||||
profiler(f'{func_name}: sample stream up')
|
||||
profiler.finish()
|
||||
|
|
Loading…
Reference in New Issue