Fix the drunk fix

This should finally be correct fsp src-to-dst array syncing now..
There's a few edge cases but mostly we need to be sure we sync both
back-filled history diffs and avoid current step lag/leads. Use
a polling routine and the more stringent task re-spawn system to get
this right.
fsp_drunken_alignment
Tyler Goodlet 2021-10-04 16:34:54 -04:00
parent 086aaf1d16
commit 3dd82c8d31
1 changed files with 62 additions and 32 deletions

View File

@ -148,6 +148,7 @@ async def fsp_compute(
# import time
# last = time.time()
try:
# rt stream
async for processed in out_stream:
@ -166,6 +167,8 @@ async def fsp_compute(
# if hz > 60:
# log.info(f'FSP quote too fast: {hz}')
# last = time.time()
finally:
tracker.complete.set()
@tractor.context
@ -217,7 +220,7 @@ async def cascade(
profiler(f'{func_name}: feed up')
assert src.token == feed.shm.token
last_len = new_len = len(src.array)
# last_len = new_len = len(src.array)
async with (
ctx.open_stream() as stream,
@ -249,9 +252,16 @@ async def cascade(
await ctx.started(index)
profiler(f'{func_name}: fsp up')
async def resync(tracker: TaskTracker) -> tuple[TaskTracker, int]:
# TODO: adopt an incremental update engine/approach
# where possible here eventually!
log.warning(f're-syncing fsp {func_name} to source')
tracker.cs.cancel()
await tracker.complete.wait()
return await n.start(fsp_target)
# Increment the underlying shared memory buffer on every
# "increment" msg received from the underlying data feed.
async with feed.index_stream() as stream:
profiler(f'{func_name}: sample stream up')
@ -263,22 +273,43 @@ async def cascade(
# array has been updated such that we compute
# new history from the (prepended) source.
diff = src.index - dst.index
new_len = len(src.array)
# XXX: ok no idea why this works but "drunk fix"
# says it don't matter.
# new_len = len(src.array)
async def poll_and_sync_to_step(tracker):
diff = src.index - dst.index
while True:
if diff in (0, 1):
break
tracker, index = await resync(tracker)
diff = src.index - dst.index
# log.info(
# '\n'.join((
# f'history index after sync: {index}',
# f'diff after sync: {diff}',
# ))
# )
return tracker, diff
# log.debug(f'diff {diff}')
if (
new_len > last_len + 1 or
abs(diff) > 1
):
# TODO: adopt an incremental update engine/approach
# where possible here eventually!
log.warning(f're-syncing fsp {func_name} to source')
tracker.cs.cancel()
await tracker.complete.wait()
tracker, index = await n.start(fsp_target)
# the source is likely backfilling and we must
# sync history calculations
abs(len(src.array) - len(dst.array)) > 0 or
# skip adding a new bar since we should be fully aligned.
# we aren't step synced to the source and may be
# leading/lagging by a step
diff > 1 or
diff < 0
):
tracker, diff = await poll_and_sync_to_step(tracker)
# skip adding a last bar since we should be
# source alinged
if diff == 0:
continue
# read out last shm row, copy and write new row
@ -292,4 +323,3 @@ async def cascade(
last = array[-1:].copy()
dst.push(last)
last_len = new_len