Merge pull request #252 from pikers/fspd_cluster

Fspd cluster
py3.10_support
goodboy 2022-01-25 07:46:50 -05:00 committed by GitHub
commit 11544dc64f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 286 additions and 251 deletions

View File

@ -18,30 +18,18 @@
Cacheing apis and toolz. Cacheing apis and toolz.
""" """
# further examples of interest:
# https://gist.github.com/njsmith/cf6fc0a97f53865f2c671659c88c1798#file-cache-py-L8
from collections import OrderedDict from collections import OrderedDict
from typing import (
Any,
Hashable,
Optional,
TypeVar,
AsyncContextManager,
)
from contextlib import ( from contextlib import (
asynccontextmanager, asynccontextmanager,
) )
import trio from tractor.trionics import maybe_open_context
from trio_typing import TaskStatus
import tractor
from .brokers import get_brokermod from .brokers import get_brokermod
from .log import get_logger from .log import get_logger
T = TypeVar('T')
log = get_logger(__name__) log = get_logger(__name__)
@ -74,124 +62,18 @@ def async_lifo_cache(maxsize=128):
return decorator return decorator
_cache: dict[str, 'Client'] = {} # noqa
class cache:
'''Globally (processs wide) cached, task access to a
kept-alive-while-in-use async resource.
'''
lock = trio.Lock()
users: int = 0
values: dict[Any, Any] = {}
resources: dict[
int,
Optional[tuple[trio.Nursery, trio.Event]]
] = {}
no_more_users: Optional[trio.Event] = None
@classmethod
async def run_ctx(
cls,
mng,
key,
task_status: TaskStatus[T] = trio.TASK_STATUS_IGNORED,
) -> None:
async with mng as value:
_, no_more_users = cls.resources[id(mng)]
cls.values[key] = value
task_status.started(value)
try:
await no_more_users.wait()
finally:
value = cls.values.pop(key)
# discard nursery ref so it won't be re-used (an error)
cls.resources.pop(id(mng))
@asynccontextmanager
async def maybe_open_ctx(
key: Hashable,
mngr: AsyncContextManager[T],
) -> (bool, T):
'''Maybe open a context manager if there is not already a cached
version for the provided ``key``. Return the cached instance on
a cache hit.
'''
await cache.lock.acquire()
ctx_key = id(mngr)
value = None
try:
# lock feed acquisition around task racing / ``trio``'s
# scheduler protocol
value = cache.values[key]
log.info(f'Reusing cached resource for {key}')
cache.users += 1
cache.lock.release()
yield True, value
except KeyError:
log.info(f'Allocating new resource for {key}')
# **critical section** that should prevent other tasks from
# checking the cache until complete otherwise the scheduler
# may switch and by accident we create more then one feed.
# TODO: avoid pulling from ``tractor`` internals and
# instead offer a "root nursery" in piker actors?
service_n = tractor.current_actor()._service_n
# TODO: does this need to be a tractor "root nursery"?
ln = cache.resources.get(ctx_key)
assert not ln
ln, _ = cache.resources[ctx_key] = (service_n, trio.Event())
value = await ln.start(cache.run_ctx, mngr, key)
cache.users += 1
cache.lock.release()
yield False, value
finally:
cache.users -= 1
if cache.lock.locked():
cache.lock.release()
if value is not None:
# if no more consumers, teardown the client
if cache.users <= 0:
log.warning(f'De-allocating resource for {key}')
# terminate mngr nursery
entry = cache.resources.get(ctx_key)
if entry:
_, no_more_users = entry
no_more_users.set()
@asynccontextmanager @asynccontextmanager
async def open_cached_client( async def open_cached_client(
brokername: str, brokername: str,
) -> 'Client': # noqa ) -> 'Client': # noqa
'''Get a cached broker client from the current actor's local vars. '''
Get a cached broker client from the current actor's local vars.
If one has not been setup do it and cache it. If one has not been setup do it and cache it.
''' '''
brokermod = get_brokermod(brokername) brokermod = get_brokermod(brokername)
async with maybe_open_ctx( async with maybe_open_context(
key=brokername, acm_func=brokermod.get_client,
mngr=brokermod.get_client(),
) as (cache_hit, client): ) as (cache_hit, client):
yield client yield client

View File

@ -272,9 +272,8 @@ class ShmArray:
return end return end
except ValueError as err: except ValueError as err:
# shoudl raise if diff detected # should raise if diff detected
self.diff_err_fields(data) self.diff_err_fields(data)
raise err raise err
def diff_err_fields( def diff_err_fields(

View File

@ -27,7 +27,6 @@ from types import ModuleType
from typing import ( from typing import (
Any, Sequence, Any, Sequence,
AsyncIterator, Optional, AsyncIterator, Optional,
Awaitable, Callable,
) )
import trio import trio
@ -37,7 +36,7 @@ import tractor
from pydantic import BaseModel from pydantic import BaseModel
from ..brokers import get_brokermod from ..brokers import get_brokermod
from .._cacheables import maybe_open_ctx from .._cacheables import maybe_open_context
from ..log import get_logger, get_console_log from ..log import get_logger, get_console_log
from .._daemon import ( from .._daemon import (
maybe_spawn_brokerd, maybe_spawn_brokerd,
@ -356,7 +355,10 @@ async def open_feed_bus(
f'Stopping {symbol}.{brokername} feed for {ctx.chan.uid}') f'Stopping {symbol}.{brokername} feed for {ctx.chan.uid}')
if tick_throttle: if tick_throttle:
n.cancel_scope.cancel() n.cancel_scope.cancel()
try:
bus._subscribers[symbol].remove(sub) bus._subscribers[symbol].remove(sub)
except ValueError:
log.warning(f'{sub} for {symbol} was already removed?')
@asynccontextmanager @asynccontextmanager
@ -368,12 +370,13 @@ async def open_sample_step_stream(
# XXX: this should be singleton on a host, # XXX: this should be singleton on a host,
# a lone broker-daemon per provider should be # a lone broker-daemon per provider should be
# created for all practical purposes # created for all practical purposes
async with maybe_open_ctx( async with maybe_open_context(
key=delay_s, acm_func=partial(
mngr=portal.open_stream_from( portal.open_stream_from,
iter_ohlc_periods, iter_ohlc_periods,
delay_s=delay_s, # must be kwarg
), ),
kwargs={'delay_s': delay_s},
) as (cache_hit, istream): ) as (cache_hit, istream):
if cache_hit: if cache_hit:
# add a new broadcast subscription for the quote stream # add a new broadcast subscription for the quote stream
@ -520,7 +523,12 @@ async def open_feed(
) as (ctx, (init_msg, first_quotes)), ) as (ctx, (init_msg, first_quotes)),
ctx.open_stream() as stream, ctx.open_stream(
# XXX: be explicit about stream backpressure since we should
# **never** overrun on feeds being too fast, which will
# pretty much always happen with HFT XD
backpressure=True
) as stream,
): ):
# we can only read from shm # we can only read from shm
@ -566,6 +574,7 @@ async def open_feed(
feed._max_sample_rate = max(ohlc_sample_rates) feed._max_sample_rate = max(ohlc_sample_rates)
# yield feed
try: try:
yield feed yield feed
finally: finally:
@ -590,17 +599,19 @@ async def maybe_open_feed(
''' '''
sym = symbols[0].lower() sym = symbols[0].lower()
async with maybe_open_ctx( async with maybe_open_context(
key=(brokername, sym), acm_func=open_feed,
mngr=open_feed( kwargs={
brokername, 'brokername': brokername,
[sym], 'symbols': [sym],
loglevel=loglevel, 'loglevel': loglevel,
**kwargs, 'tick_throttle': kwargs.get('tick_throttle'),
), },
key=sym,
) as (cache_hit, feed): ) as (cache_hit, feed):
if cache_hit: if cache_hit:
print('USING CACHED FEED')
# add a new broadcast subscription for the quote stream # add a new broadcast subscription for the quote stream
# if this feed is likely already in use # if this feed is likely already in use
async with feed.stream.subscribe() as bstream: async with feed.stream.subscribe() as bstream:

View File

@ -144,10 +144,13 @@ async def fsp_compute(
profiler(f'{func_name} pushed history') profiler(f'{func_name} pushed history')
profiler.finish() profiler.finish()
# TODO: UGH, what is the right way to do something like this?
if not ctx._started_called:
await ctx.started(index)
# setup a respawn handle # setup a respawn handle
with trio.CancelScope() as cs: with trio.CancelScope() as cs:
tracker = TaskTracker(trio.Event(), cs) tracker = TaskTracker(trio.Event(), cs)
await ctx.started(index)
task_status.started((tracker, index)) task_status.started((tracker, index))
profiler(f'{func_name} yield last index') profiler(f'{func_name} yield last index')

80
piker/trionics.py 100644
View File

@ -0,0 +1,80 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship of piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
sugarz for trio/tractor conc peeps.
'''
from typing import AsyncContextManager
from typing import TypeVar
from contextlib import asynccontextmanager as acm
import trio
# A regular invariant generic type
T = TypeVar("T")
async def _enter_and_sleep(
mngr: AsyncContextManager[T],
to_yield: dict[int, T],
all_entered: trio.Event,
# task_status: TaskStatus[T] = trio.TASK_STATUS_IGNORED,
) -> T:
'''Open the async context manager deliver it's value
to this task's spawner and sleep until cancelled.
'''
async with mngr as value:
to_yield[id(mngr)] = value
if all(to_yield.values()):
all_entered.set()
# sleep until cancelled
await trio.sleep_forever()
@acm
async def async_enter_all(
*mngrs: list[AsyncContextManager[T]],
) -> tuple[T]:
to_yield = {}.fromkeys(id(mngr) for mngr in mngrs)
all_entered = trio.Event()
async with trio.open_nursery() as n:
for mngr in mngrs:
n.start_soon(
_enter_and_sleep,
mngr,
to_yield,
all_entered,
)
# deliver control once all managers have started up
await all_entered.wait()
yield tuple(to_yield.values())
# tear down all sleeper tasks thus triggering individual
# mngr ``__aexit__()``s.
n.cancel_scope.cancel()

View File

@ -170,10 +170,11 @@ def _main(
piker_loglevel: str, piker_loglevel: str,
tractor_kwargs, tractor_kwargs,
) -> None: ) -> None:
"""Sync entry point to start a chart app. '''
Sync entry point to start a chart: a ``tractor`` + Qt runtime
entry point
""" '''
# ``tractor`` + Qt runtime entry point
run_qtractor( run_qtractor(
func=_async_main, func=_async_main,
args=(sym, brokernames, piker_loglevel), args=(sym, brokernames, piker_loglevel),

View File

@ -21,22 +21,23 @@ this module ties together quote and computational (fsp) streams with
graphics update methods via our custom ``pyqtgraph`` charting api. graphics update methods via our custom ``pyqtgraph`` charting api.
''' '''
from contextlib import asynccontextmanager from contextlib import asynccontextmanager as acm
from functools import partial from functools import partial
from itertools import cycle
import time import time
from types import ModuleType from types import ModuleType
from typing import Optional from typing import Optional, AsyncGenerator
import numpy as np import numpy as np
from pydantic import create_model from pydantic import create_model
import pyqtgraph as pg
import tractor import tractor
import trio import trio
from .. import brokers from .. import brokers
from ..data.feed import ( from .._cacheables import maybe_open_context
open_feed, from ..trionics import async_enter_all
# Feed, from ..data.feed import open_feed, Feed
)
from ._chart import ( from ._chart import (
ChartPlotWidget, ChartPlotWidget,
LinkedSplits, LinkedSplits,
@ -60,27 +61,19 @@ log = get_logger(__name__)
_quote_throttle_rate: int = 58 # Hz _quote_throttle_rate: int = 58 # Hz
def update_fsp_chart( def try_read(array: np.ndarray) -> Optional[np.ndarray]:
chart: ChartPlotWidget, '''
shm: ShmArray, Try to read the last row from a shared mem array or ``None``
graphics_name: str, if the array read returns a zero-length array result.
array_key: Optional[str],
) -> None: Can be used to check for backfilling race conditions where an array
is currently being (re-)written by a writer actor but the reader is
array = shm.array unaware and reads during the window where the first and last indexes
are being updated.
# update graphics
# NOTE: this does a length check internally which allows it
# staying above the last row check below..
chart.update_curve_from_array(
graphics_name,
array,
array_key=array_key or graphics_name,
)
'''
try: try:
last_row = array[-1] return array[-1]
except IndexError: except IndexError:
# XXX: race condition with backfilling shm. # XXX: race condition with backfilling shm.
# #
@ -93,24 +86,34 @@ def update_fsp_chart(
# signal that a prepend is taking place and this consumer can # signal that a prepend is taking place and this consumer can
# respond (eg. redrawing graphics) accordingly. # respond (eg. redrawing graphics) accordingly.
log.warning(f'Read-race on shm array: {graphics_name}@{shm.token}') log.warning(f'Read-race on shm array: {graphics_name}@{shm.token}')
# the array read was emtpy
return None
def update_fsp_chart(
chart: ChartPlotWidget,
shm: ShmArray,
graphics_name: str,
array_key: Optional[str],
) -> None:
array = shm.array
last_row = try_read(array)
# guard against unreadable case
if not last_row:
return return
# TODO: provide a read sync mechanism to avoid this polling. the # update graphics
# underlying issue is that a backfill (aka prepend) and subsequent # NOTE: this does a length check internally which allows it
# shm array first/last index update could result in an empty array # staying above the last row check below..
# read here since the stream is never torn down on the re-compute chart.update_curve_from_array(
# steps. graphics_name,
# read_tries = 2 array,
# while read_tries > 0: array_key=array_key or graphics_name,
# try: )
# # read last chart._set_yrange()
# array = shm.array
# value = array[-1][array_key]
# break
# except IndexError:
# read_tries -= 1
# continue
# XXX: re: ``array_key``: fsp func names must be unique meaning we # XXX: re: ``array_key``: fsp func names must be unique meaning we
# can't have duplicates of the underlying data even if multiple # can't have duplicates of the underlying data even if multiple
@ -126,24 +129,12 @@ def update_fsp_chart(
last_val_sticky.update_from_data(-1, last) last_val_sticky.update_from_data(-1, last)
# _clses = { # a working tick-type-classes template
# 'clears': {'trade', 'utrade', 'last'}, _tick_groups = {
# 'last': {'last'}, 'clears': {'trade', 'utrade', 'last'},
# 'bids': {'bid', 'bsize'}, 'bids': {'bid', 'bsize'},
# 'asks': {'ask', 'asize'}, 'asks': {'ask', 'asize'},
# } }
# XXX: idea for frame type data structure we could use on the
# wire instead of doing it here?
# frames = {
# 'index': ['type_a', 'type_c', 'type_n', 'type_n', 'type_c'],
# 'type_a': [tick0, tick1, tick2, .., tickn],
# 'type_b': [tick0, tick1, tick2, .., tickn],
# 'type_c': [tick0, tick1, tick2, .., tickn],
# ...
# 'type_n': [tick0, tick1, tick2, .., tickn],
# }
def chart_maxmin( def chart_maxmin(
@ -263,8 +254,12 @@ async def update_chart_from_quotes(
now = time.time() now = time.time()
quote_period = now - last_quote quote_period = now - last_quote
if quote_period <= 1/_quote_throttle_rate: quote_rate = round(1/quote_period, 1) if quote_period else float('inf')
log.warning(f'TOO FAST: {1/quote_period}') if (
quote_period <= 1/_quote_throttle_rate
and quote_rate > _quote_throttle_rate + 2
):
log.warning(f'High quote rate {symbol.key}: {quote_rate}')
last_quote = now last_quote = now
# chart isn't active/shown so skip render cycle and pause feed(s) # chart isn't active/shown so skip render cycle and pause feed(s)
@ -291,15 +286,7 @@ async def update_chart_from_quotes(
array = ohlcv.array array = ohlcv.array
if vlm_chart: if vlm_chart:
# print(f"volume: {end['volume']}")
vlm_chart.update_curve_from_array('volume', array) vlm_chart.update_curve_from_array('volume', array)
# built-in tina $vlm FSP using chl3 typical price for ohlc step
# last = array[-1]
# chl3 = (last['close'] + last['high'] + last['low']) / 3
# v = last['volume']
# dv = last['volume'] * chl3
vlm_sticky.update_from_data(*array[-1][['index', 'volume']]) vlm_sticky.update_from_data(*array[-1][['index', 'volume']])
if ( if (
@ -346,7 +333,7 @@ async def update_chart_from_quotes(
# TODO: eventually we want to separate out the utrade (aka # TODO: eventually we want to separate out the utrade (aka
# dark vlm prices) here and show them as an additional # dark vlm prices) here and show them as an additional
# graphic. # graphic.
clear_types = {'trade', 'utrade', 'last'} clear_types = _tick_groups['clears']
# XXX: if we wanted to iterate in "latest" (i.e. most # XXX: if we wanted to iterate in "latest" (i.e. most
# current) tick first order as an optimization where we only # current) tick first order as an optimization where we only
@ -415,11 +402,11 @@ async def update_chart_from_quotes(
# label.size -= size # label.size -= size
# elif ticktype in ('ask', 'asize'): # elif ticktype in ('ask', 'asize'):
elif typ in ('ask', 'asize'): elif typ in _tick_groups['asks']:
l1.ask_label.update_fields({'level': price, 'size': size}) l1.ask_label.update_fields({'level': price, 'size': size})
# elif ticktype in ('bid', 'bsize'): # elif ticktype in ('bid', 'bsize'):
elif typ in ('bid', 'bsize'): elif typ in _tick_groups['bids']:
l1.bid_label.update_fields({'level': price, 'size': size}) l1.bid_label.update_fields({'level': price, 'size': size})
# check for y-range re-size # check for y-range re-size
@ -492,7 +479,7 @@ def maybe_mk_fsp_shm(
return shm, opened return shm, opened
@asynccontextmanager @acm
async def open_fsp_sidepane( async def open_fsp_sidepane(
linked: LinkedSplits, linked: LinkedSplits,
@ -558,8 +545,52 @@ async def open_fsp_sidepane(
yield sidepane yield sidepane
async def open_fspd_cluster( @acm
async def open_fsp_cluster(
workers: int = 2
) -> AsyncGenerator[int, dict[str, tractor.Portal]]:
from tractor._clustering import open_actor_cluster
profiler = pg.debug.Profiler(
delayed=False,
disabled=False
)
async with open_actor_cluster(
count=2,
names=['fsp_0', 'fsp_1'],
modules=['piker.fsp._engine'],
) as cluster_map:
profiler('started fsp cluster')
yield cluster_map
@acm
async def maybe_open_fsp_cluster(
workers: int = 2,
**kwargs,
) -> AsyncGenerator[int, dict[str, tractor.Portal]]:
kwargs.update(
{'workers': workers}
)
async with maybe_open_context(
# for now make a cluster per client?
acm_func=open_fsp_cluster,
kwargs=kwargs,
) as (cache_hit, cluster_map):
if cache_hit:
log.info('re-using existing fsp cluster')
yield cluster_map
else:
yield cluster_map
async def start_fsp_displays(
cluster_map: dict[str, tractor.Portal],
linkedsplits: LinkedSplits, linkedsplits: LinkedSplits,
fsps: dict[str, str], fsps: dict[str, str],
sym: str, sym: str,
@ -580,19 +611,22 @@ async def open_fspd_cluster(
''' '''
linkedsplits.focus() linkedsplits.focus()
# spawns sub-processes which execute cpu bound fsp work profiler = pg.debug.Profiler(
# which is streamed back to this parent. delayed=False,
async with ( disabled=False
tractor.open_nursery() as n, )
trio.open_nursery() as ln,
): async with trio.open_nursery() as n:
# Currently we spawn an actor per fsp chain but # Currently we spawn an actor per fsp chain but
# likely we'll want to pool them eventually to # likely we'll want to pool them eventually to
# scale horizonatlly once cores are used up. # scale horizonatlly once cores are used up.
for display_name, conf in fsps.items(): for (display_name, conf), (name, portal) in zip(
fsps.items(),
# rr to cluster for now..
cycle(cluster_map.items()),
):
func_name = conf['func_name'] func_name = conf['func_name']
shm, opened = maybe_mk_fsp_shm( shm, opened = maybe_mk_fsp_shm(
sym, sym,
field_name=func_name, field_name=func_name,
@ -600,18 +634,17 @@ async def open_fspd_cluster(
readonly=True, readonly=True,
) )
profiler(f'created shm for fsp actor: {display_name}')
# XXX: fsp may have been opened by a duplicate chart. # XXX: fsp may have been opened by a duplicate chart.
# Error for now until we figure out how to wrap fsps as # Error for now until we figure out how to wrap fsps as
# "feeds". assert opened, f"A chart for {key} likely # "feeds". assert opened, f"A chart for {key} likely
# already exists?" # already exists?"
portal = await n.start_actor( profiler(f'attached to fsp portal: {display_name}')
enable_modules=['piker.fsp._engine'],
name='fsp.' + display_name,
)
# init async # init async
ln.start_soon( n.start_soon(
partial( partial(
update_chart_from_fsp, update_chart_from_fsp,
@ -627,6 +660,7 @@ async def open_fspd_cluster(
is_overlay=conf.get('overlay', False), is_overlay=conf.get('overlay', False),
group_status_key=group_status_key, group_status_key=group_status_key,
loglevel=loglevel, loglevel=loglevel,
profiler=profiler,
) )
) )
@ -650,6 +684,7 @@ async def update_chart_from_fsp(
group_status_key: str, group_status_key: str,
loglevel: str, loglevel: str,
profiler: pg.debug.Profiler,
) -> None: ) -> None:
'''FSP stream chart update loop. '''FSP stream chart update loop.
@ -658,6 +693,9 @@ async def update_chart_from_fsp(
config map. config map.
''' '''
profiler(f'started chart task for fsp: {func_name}')
done = linkedsplits.window().status_bar.open_status( done = linkedsplits.window().status_bar.open_status(
f'loading fsp, {display_name}..', f'loading fsp, {display_name}..',
group_key=group_status_key, group_key=group_status_key,
@ -676,12 +714,15 @@ async def update_chart_from_fsp(
symbol=sym, symbol=sym,
func_name=func_name, func_name=func_name,
loglevel=loglevel, loglevel=loglevel,
zero_on_step=conf.get('zero_on_step', False),
) as (ctx, last_index), ) as (ctx, last_index),
ctx.open_stream() as stream, ctx.open_stream() as stream,
open_fsp_sidepane(linkedsplits, {display_name: conf},) as sidepane, open_fsp_sidepane(linkedsplits, {display_name: conf},) as sidepane,
): ):
profiler(f'fsp:{func_name} attached to fsp ctx-stream')
if is_overlay: if is_overlay:
chart = linkedsplits.chart chart = linkedsplits.chart
chart.draw_curve( chart.draw_curve(
@ -719,6 +760,8 @@ async def update_chart_from_fsp(
array_key = func_name array_key = func_name
profiler(f'fsp:{func_name} chart created')
# first UI update, usually from shm pushed history # first UI update, usually from shm pushed history
update_fsp_chart( update_fsp_chart(
chart, chart,
@ -753,15 +796,15 @@ async def update_chart_from_fsp(
done() done()
chart.linked.resize_sidepanes() chart.linked.resize_sidepanes()
profiler(f'fsp:{func_name} starting update loop')
profiler.finish()
# update chart graphics # update chart graphics
i = 0
last = time.time() last = time.time()
async for value in stream: async for value in stream:
# chart isn't actively shown so just skip render cycle # chart isn't actively shown so just skip render cycle
if chart.linked.isHidden(): if chart.linked.isHidden():
print(f'{i} unseen fsp cyclce')
i += 1
continue continue
else: else:
@ -785,11 +828,17 @@ async def update_chart_from_fsp(
last = time.time() last = time.time()
async def check_for_new_bars(feed, ohlcv, linkedsplits): async def check_for_new_bars(
"""Task which updates from new bars in the shared ohlcv buffer every feed: Feed,
ohlcv: np.ndarray,
linkedsplits: LinkedSplits,
) -> None:
'''
Task which updates from new bars in the shared ohlcv buffer every
``delay_s`` seconds. ``delay_s`` seconds.
""" '''
# TODO: right now we'll spin printing bars if the last time # TODO: right now we'll spin printing bars if the last time
# stamp is before a large period of no market activity. # stamp is before a large period of no market activity.
# Likely the best way to solve this is to make this task # Likely the best way to solve this is to make this task
@ -816,12 +865,17 @@ async def check_for_new_bars(feed, ohlcv, linkedsplits):
# current bar) and then either write the current bar manually # current bar) and then either write the current bar manually
# or place a cursor for visual cue of the current time step. # or place a cursor for visual cue of the current time step.
array = ohlcv.array
# avoid unreadable race case on backfills
while not try_read(array):
await trio.sleep(0.01)
# XXX: this puts a flat bar on the current time step # XXX: this puts a flat bar on the current time step
# TODO: if we eventually have an x-axis time-step "cursor" # TODO: if we eventually have an x-axis time-step "cursor"
# we can get rid of this since it is extra overhead. # we can get rid of this since it is extra overhead.
price_chart.update_ohlc_from_array( price_chart.update_ohlc_from_array(
price_chart.name, price_chart.name,
ohlcv.array, array,
just_history=False, just_history=False,
) )
@ -835,6 +889,9 @@ async def check_for_new_bars(feed, ohlcv, linkedsplits):
# each subplot # each subplot
for name, chart in linkedsplits.subplots.items(): for name, chart in linkedsplits.subplots.items():
# TODO: do we need the same unreadable guard as for the
# price chart (above) here?
chart.update_curve_from_array( chart.update_curve_from_array(
chart.name, chart.name,
chart._shm.array, chart._shm.array,
@ -853,7 +910,7 @@ def has_vlm(ohlcv: ShmArray) -> bool:
return not bool(np.all(np.isin(volm, -1)) or np.all(np.isnan(volm))) return not bool(np.all(np.isin(volm, -1)) or np.all(np.isnan(volm)))
@asynccontextmanager @acm
async def maybe_open_vlm_display( async def maybe_open_vlm_display(
linked: LinkedSplits, linked: LinkedSplits,
@ -875,12 +932,12 @@ async def maybe_open_vlm_display(
async with open_fsp_sidepane( async with open_fsp_sidepane(
linked, { linked, {
'volume': { '$_vlm': {
'params': { 'params': {
'price_func': { 'price_func': {
'default_value': 'ohl3', 'default_value': 'chl3',
# tell target ``Edit`` widget to not allow # tell target ``Edit`` widget to not allow
# edits for now. # edits for now.
'widget_kwargs': {'readonly': True}, 'widget_kwargs': {'readonly': True},
@ -957,7 +1014,8 @@ async def display_symbol_data(
order_mode_started: trio.Event, order_mode_started: trio.Event,
) -> None: ) -> None:
'''Spawn a real-time updated chart for ``symbol``. '''
Spawn a real-time updated chart for ``symbol``.
Spawned ``LinkedSplits`` chart widgets can remain up but hidden so Spawned ``LinkedSplits`` chart widgets can remain up but hidden so
that multiple symbols can be viewed and switched between extremely that multiple symbols can be viewed and switched between extremely
@ -979,17 +1037,19 @@ async def display_symbol_data(
# group_key=loading_sym_key, # group_key=loading_sym_key,
# ) # )
async with( async with async_enter_all(
open_feed( open_feed(
provider, provider,
[sym], [sym],
loglevel=loglevel, loglevel=loglevel,
# 60 FPS to limit context switches # limit to at least display's FPS
# avoiding needless Qt-in-guest-mode context switches
tick_throttle=_quote_throttle_rate, tick_throttle=_quote_throttle_rate,
),
maybe_open_fsp_cluster(),
) as feed, ) as (feed, cluster_map):
):
ohlcv: ShmArray = feed.shm ohlcv: ShmArray = feed.shm
bars = ohlcv.array bars = ohlcv.array
@ -1043,9 +1103,7 @@ async def display_symbol_data(
fsp_conf = { fsp_conf = {
'rsi': { 'rsi': {
'func_name': 'rsi', # literal python func ref lookup name
# literal python func ref lookup name
'func_name': 'rsi',
# map of parameters to place on the fsp sidepane widget # map of parameters to place on the fsp sidepane widget
# which should map to dynamic inputs available to the # which should map to dynamic inputs available to the
@ -1062,7 +1120,6 @@ async def display_symbol_data(
'static_yrange': (0, 100), 'static_yrange': (0, 100),
}, },
}, },
} }
if has_vlm(ohlcv): # and provider != 'binance': if has_vlm(ohlcv): # and provider != 'binance':
@ -1086,13 +1143,15 @@ async def display_symbol_data(
linkedsplits.focus() linkedsplits.focus()
await trio.sleep(0) await trio.sleep(0)
vlm_chart = None
async with ( async with (
trio.open_nursery() as ln, trio.open_nursery() as ln,
maybe_open_vlm_display(linkedsplits, ohlcv) as vlm_chart, maybe_open_vlm_display(linkedsplits, ohlcv) as vlm_chart,
): ):
# load initial fsp chain (otherwise known as "indicators") # load initial fsp chain (otherwise known as "indicators")
ln.start_soon( ln.start_soon(
open_fspd_cluster, start_fsp_displays,
cluster_map,
linkedsplits, linkedsplits,
fsp_conf, fsp_conf,
sym, sym,