Compare commits

..

2 Commits

12 changed files with 267 additions and 304 deletions

View File

@ -35,7 +35,7 @@ log = get_logger(__name__)
_root_dname = 'pikerd' _root_dname = 'pikerd'
_registry_addr = ('127.0.0.1', 6116) _registry_addr = ('127.0.0.1', 1616)
_tractor_kwargs: dict[str, Any] = { _tractor_kwargs: dict[str, Any] = {
# use a different registry addr then tractor's default # use a different registry addr then tractor's default
'arbiter_addr': _registry_addr 'arbiter_addr': _registry_addr

View File

@ -640,7 +640,6 @@ class Client:
ready = ticker.updateEvent ready = ticker.updateEvent
# ensure a last price gets filled in before we deliver quote # ensure a last price gets filled in before we deliver quote
warnset: bool = False
for _ in range(100): for _ in range(100):
if isnan(ticker.last): if isnan(ticker.last):
@ -651,21 +650,17 @@ class Client:
if ready in done: if ready in done:
break break
else: else:
if not warnset:
log.warning( log.warning(
f'Quote for {symbol} timed out: market is closed?' f'Quote for {symbol} timed out: market is closed?'
) )
warnset = True
else: else:
log.info(f'Got first quote for {symbol}') log.info(f'Got first quote for {symbol}')
break break
else: else:
if not warnset:
log.warning( log.warning(
f'Symbol {symbol} is not returning a quote ' f'Symbol {symbol} is not returning a quote '
'it may be outside trading hours?') 'it may be outside trading hours?')
warnset = True
return contract, ticker, details return contract, ticker, details

View File

@ -555,36 +555,56 @@ def load_flex_trades(
report = flexreport.FlexReport(path=path) report = flexreport.FlexReport(path=path)
trade_entries = report.extract('Trade') trade_entries = report.extract('Trade')
trades = {
# get reverse map to user account names
accounts = conf['accounts'].inverse
trades_by_account = {}
for t in trade_entries:
# XXX: LOL apparently ``toml`` has a bug # XXX: LOL apparently ``toml`` has a bug
# where a section key error will show up in the write # where a section key error will show up in the write
# if you leave this as an ``int``? # if you leave this as an ``int``?
str(t.__dict__['tradeID']): t.__dict__ trade = t.__dict__
for t in trade_entries
}
ln = len(trades)
log.info(f'Loaded {ln} trades from flex query')
trades_by_account = {}
for tid, trade in trades.items():
trades_by_account.setdefault(
# oddly for some so-called "BookTrade" entries # oddly for some so-called "BookTrade" entries
# this field seems to be blank, no cuckin clue. # this field seems to be blank, no cuckin clue.
# trade['ibExecID'] # trade['ibExecID']
str(trade['accountId']), {} tid = str(trade['tradeID'])
)[tid] = trade date = str(trade['tradeDate'])
acctid = accounts[str(trade['accountId'])]
trades_by_account.setdefault(
acctid, {}
).setdefault(date, {})[tid] = trade
section = {'ib': trades_by_account} ln = len(trades_by_account.values())
pprint(section) log.info(f'Loaded {ln} trades from flex query')
# section = {'ib': trades_by_account}
for acctid, trades_by_id in trades_by_account.items():
with config.open_trade_ledger('ib', acctid) as ledger:
ledger.update({'ib': trades_by_id})
# pprint(section)
# TODO: load the config first and append in # TODO: load the config first and append in
# the new trades loaded here.. # the new trades loaded here..
try: # try:
config.write(section, 'trades') # config.write(section, 'trades')
except KeyError: # except KeyError:
import pdbpp; pdbpp.set_trace() # noqa # import pdbpp; pdbpp.set_trace() # noqa
if __name__ == '__main__': if __name__ == '__main__':
import sys
import os
args = sys.argv
if len(args) > 1:
args = args[1:]
for arg in args:
path = os.path.abspath(arg)
load_flex_trades(path=path)
else:
# expect brokers.toml to have an entry and
# pull from the web service.
load_flex_trades() load_flex_trades()

View File

@ -18,9 +18,11 @@
Broker configuration mgmt. Broker configuration mgmt.
""" """
from contextlib import contextmanager as cm
import platform import platform
import sys import sys
import os import os
from os import path
from os.path import dirname from os.path import dirname
import shutil import shutil
from typing import Optional from typing import Optional
@ -111,6 +113,7 @@ if _parent_user:
_conf_names: set[str] = { _conf_names: set[str] = {
'brokers', 'brokers',
'pp',
'trades', 'trades',
'watchlists', 'watchlists',
} }
@ -147,19 +150,21 @@ def get_conf_path(
conf_name: str = 'brokers', conf_name: str = 'brokers',
) -> str: ) -> str:
"""Return the default config path normally under '''
``~/.config/piker`` on linux. Return the top-level default config path normally under
``~/.config/piker`` on linux for a given ``conf_name``, the config
name.
Contains files such as: Contains files such as:
- brokers.toml - brokers.toml
- pp.toml
- watchlists.toml - watchlists.toml
- trades.toml
# maybe coming soon ;) # maybe coming soon ;)
- signals.toml - signals.toml
- strats.toml - strats.toml
""" '''
assert conf_name in _conf_names assert conf_name in _conf_names
fn = _conf_fn_w_ext(conf_name) fn = _conf_fn_w_ext(conf_name)
return os.path.join( return os.path.join(
@ -168,12 +173,57 @@ def get_conf_path(
) )
@cm
def open_trade_ledger(
broker: str,
account: str,
) -> str:
'''
Indempotently create and read in a trade log file from the
``<configuration_dir>/ledgers/`` directory.
Files are named per broker account of the form
``<brokername>_<accountname>.toml``. The ``accountname`` here is the
name as defined in the user's ``brokers.toml`` config.
'''
ldir = path.join(_config_dir, 'ledgers')
if not path.isdir(ldir):
os.makedirs(ldir)
fname = f'trades_{broker}_{account}.toml'
tradesfile = path.join(ldir, fname)
if not path.isfile(tradesfile):
log.info(
f'Creating new local trades ledger: {tradesfile}'
)
with open(tradesfile, 'w') as cf:
pass # touch
try:
with open(tradesfile, 'r') as cf:
ledger = toml.load(tradesfile)
cpy = ledger.copy()
yield cpy
finally:
if cpy != ledger:
# TODO: show diff output?
# https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
print(f'Updating ledger for {tradesfile}:\n')
ledger.update(cpy)
# we write on close the mutated ledger data
with open(tradesfile, 'w') as cf:
return toml.dump(ledger, cf)
def repodir(): def repodir():
''' '''
Return the abspath to the repo directory. Return the abspath to the repo directory.
''' '''
dirpath = os.path.abspath( dirpath = path.abspath(
# we're 3 levels down in **this** module file # we're 3 levels down in **this** module file
dirname(dirname(os.path.realpath(__file__))) dirname(dirname(os.path.realpath(__file__)))
) )

View File

@ -19,7 +19,6 @@ Supervisor for docker with included specific-image service helpers.
''' '''
import os import os
import time
from typing import ( from typing import (
Optional, Optional,
Callable, Callable,
@ -187,65 +186,45 @@ class Container:
async def cancel( async def cancel(
self, self,
stop_msg: str,
) -> None: ) -> None:
cid = self.cntr.id cid = self.cntr.id
# first try a graceful cancel
log.cancel(
f'SIGINT cancelling container: {cid}\n'
f'waiting on stop msg: "{stop_msg}"'
)
self.try_signal('SIGINT') self.try_signal('SIGINT')
start = time.time()
for _ in range(30):
with trio.move_on_after(0.5) as cs: with trio.move_on_after(0.5) as cs:
cs.shield = True cs.shield = True
await self.process_logs_until(stop_msg) await self.process_logs_until('initiating graceful shutdown')
await self.process_logs_until('exiting...',)
# if we aren't cancelled on above checkpoint then we for _ in range(10):
# assume we read the expected stop msg and terminated. with trio.move_on_after(0.5) as cs:
cs.shield = True
await self.process_logs_until('exiting...',)
break break
try: if cs.cancelled_caught:
log.info(f'Polling for container shutdown:\n{cid}')
if self.cntr.status not in {'exited', 'not-running'}:
self.cntr.wait(
timeout=0.1,
condition='not-running',
)
break
except (
ReadTimeout,
):
log.info(f'Still waiting on container:\n{cid}')
continue
except (
docker.errors.APIError,
ConnectionError,
):
log.exception('Docker connection failure')
break
else:
delay = time.time() - start
log.error(
f'Failed to kill container {cid} after {delay}s\n'
'sending SIGKILL..'
)
# get out the big guns, bc apparently marketstore # get out the big guns, bc apparently marketstore
# doesn't actually know how to terminate gracefully # doesn't actually know how to terminate gracefully
# :eyeroll:... # :eyeroll:...
self.try_signal('SIGKILL') self.try_signal('SIGKILL')
try:
log.info('Waiting on container shutdown: {cid}')
self.cntr.wait( self.cntr.wait(
timeout=3, timeout=0.1,
condition='not-running', condition='not-running',
) )
break
except (
ReadTimeout,
ConnectionError,
):
log.error(f'failed to wait on container {cid}')
raise
else:
raise RuntimeError('Failed to cancel container {cid}')
log.cancel(f'Container stopped: {cid}') log.cancel(f'Container stopped: {cid}')
@ -266,16 +245,13 @@ async def open_ahabd(
# params, etc. passing to ``Containter.run()``? # params, etc. passing to ``Containter.run()``?
# call into endpoint for container config/init # call into endpoint for container config/init
ep_func = NamespacePath(endpoint).load_ref() ep_func = NamespacePath(endpoint).load_ref()
( dcntr, cntr_config = ep_func(client)
dcntr,
cntr_config,
start_msg,
stop_msg,
) = ep_func(client)
cntr = Container(dcntr) cntr = Container(dcntr)
with trio.move_on_after(1): with trio.move_on_after(1):
found = await cntr.process_logs_until(start_msg) found = await cntr.process_logs_until(
"launching tcp listener for all services...",
)
if not found and cntr not in client.containers.list(): if not found and cntr not in client.containers.list():
raise RuntimeError( raise RuntimeError(
@ -295,9 +271,16 @@ async def open_ahabd(
# callers to have root perms? # callers to have root perms?
await trio.sleep_forever() await trio.sleep_forever()
finally: except (
BaseException,
# trio.Cancelled,
# KeyboardInterrupt,
):
with trio.CancelScope(shield=True): with trio.CancelScope(shield=True):
await cntr.cancel(stop_msg) await cntr.cancel()
raise
async def start_ahab( async def start_ahab(

View File

@ -127,15 +127,10 @@ def start_marketstore(
import os import os
import docker import docker
from .. import config from .. import config
get_console_log('info', name=__name__) get_console_log('info', name=__name__)
mktsdir = os.path.join(config._config_dir, 'marketstore') yml_file = os.path.join(config._config_dir, 'mkts.yml')
# create when dne
if not os.path.isdir(mktsdir):
os.mkdir(mktsdir)
yml_file = os.path.join(mktsdir, 'mkts.yml')
if not os.path.isfile(yml_file): if not os.path.isfile(yml_file):
log.warning( log.warning(
f'No `marketstore` config exists?: {yml_file}\n' f'No `marketstore` config exists?: {yml_file}\n'
@ -148,14 +143,14 @@ def start_marketstore(
# create a mount from user's local piker config dir into container # create a mount from user's local piker config dir into container
config_dir_mnt = docker.types.Mount( config_dir_mnt = docker.types.Mount(
target='/etc', target='/etc',
source=mktsdir, source=config._config_dir,
type='bind', type='bind',
) )
# create a user config subdir where the marketstore # create a user config subdir where the marketstore
# backing filesystem database can be persisted. # backing filesystem database can be persisted.
persistent_data_dir = os.path.join( persistent_data_dir = os.path.join(
mktsdir, 'data', config._config_dir, 'data',
) )
if not os.path.isdir(persistent_data_dir): if not os.path.isdir(persistent_data_dir):
os.mkdir(persistent_data_dir) os.mkdir(persistent_data_dir)
@ -185,14 +180,7 @@ def start_marketstore(
init=True, init=True,
# remove=True, # remove=True,
) )
return ( return dcntr, _config
dcntr,
_config,
# expected startup and stop msgs
"launching tcp listener for all services...",
"exiting...",
)
_tick_tbk_ids: tuple[str, str] = ('1Sec', 'TICK') _tick_tbk_ids: tuple[str, str] = ('1Sec', 'TICK')
@ -395,12 +383,7 @@ class Storage:
]: ]:
first_tsdb_dt, last_tsdb_dt = None, None first_tsdb_dt, last_tsdb_dt = None, None
tsdb_arrays = await self.read_ohlcv( tsdb_arrays = await self.read_ohlcv(fqsn)
fqsn,
# on first load we don't need to pull the max
# history per request size worth.
limit=3000,
)
log.info(f'Loaded tsdb history {tsdb_arrays}') log.info(f'Loaded tsdb history {tsdb_arrays}')
if tsdb_arrays: if tsdb_arrays:
@ -418,7 +401,6 @@ class Storage:
fqsn: str, fqsn: str,
timeframe: Optional[Union[int, str]] = None, timeframe: Optional[Union[int, str]] = None,
end: Optional[int] = None, end: Optional[int] = None,
limit: int = int(800e3),
) -> tuple[ ) -> tuple[
MarketstoreClient, MarketstoreClient,
@ -441,7 +423,7 @@ class Storage:
# TODO: figure the max limit here given the # TODO: figure the max limit here given the
# ``purepc`` msg size limit of purerpc: 33554432 # ``purepc`` msg size limit of purerpc: 33554432
limit=limit, limit=int(800e3),
) )
if timeframe is None: if timeframe is None:

View File

@ -361,7 +361,7 @@ async def cascade(
) -> tuple[TaskTracker, int]: ) -> tuple[TaskTracker, int]:
# TODO: adopt an incremental update engine/approach # TODO: adopt an incremental update engine/approach
# where possible here eventually! # where possible here eventually!
log.debug(f're-syncing fsp {func_name} to source') log.warning(f're-syncing fsp {func_name} to source')
tracker.cs.cancel() tracker.cs.cancel()
await tracker.complete.wait() await tracker.complete.wait()
tracker, index = await n.start(fsp_target) tracker, index = await n.start(fsp_target)

View File

@ -379,7 +379,7 @@ class Curve(pg.GraphicsObject):
) -> None: ) -> None:
# default line draw last call # default line draw last call
# with self.reset_cache(): with self.reset_cache():
x = render_data['index'] x = render_data['index']
y = render_data[array_key] y = render_data[array_key]

View File

@ -426,6 +426,71 @@ def graphics_update_cycle(
profiler('view incremented') profiler('view incremented')
if vlm_chart:
# always update y-label
ds.vlm_sticky.update_from_data(
*array[-1][['index', 'volume']]
)
if (
(
do_rt_update
or do_append
and liv
)
or trigger_all
):
# TODO: make it so this doesn't have to be called
# once the $vlm is up?
vlm_chart.update_graphics_from_flow(
'volume',
# UGGGh, see ``maxmin()`` impl in `._fsp` for
# the overlayed plotitems... we need a better
# bay to invoke a maxmin per overlay..
render=False,
# XXX: ^^^^ THIS IS SUPER IMPORTANT! ^^^^
# without this, since we disable the
# 'volume' (units) chart after the $vlm starts
# up we need to be sure to enable this
# auto-ranging otherwise there will be no handler
# connected to update accompanying overlay
# graphics..
)
profiler('`vlm_chart.update_graphics_from_flow()`')
if (
mx_vlm_in_view != vars['last_mx_vlm']
):
yrange = (0, mx_vlm_in_view * 1.375)
vlm_chart.view._set_yrange(
yrange=yrange,
)
profiler('`vlm_chart.view._set_yrange()`')
# print(f'mx vlm: {last_mx_vlm} -> {mx_vlm_in_view}')
vars['last_mx_vlm'] = mx_vlm_in_view
for curve_name, flow in vlm_chart._flows.items():
if not flow.render:
continue
update_fsp_chart(
vlm_chart,
flow,
curve_name,
array_key=curve_name,
# do_append=uppx < update_uppx,
do_append=do_append,
)
# is this even doing anything?
# (pretty sure it's the real-time
# resizing from last quote?)
fvb = flow.plot.vb
fvb._set_yrange(
# autoscale_linked_plots=False,
name=curve_name,
)
ticks_frame = quote.get('ticks', ()) ticks_frame = quote.get('ticks', ())
frames_by_type: dict[str, dict] = {} frames_by_type: dict[str, dict] = {}
@ -475,16 +540,15 @@ def graphics_update_cycle(
or do_append or do_append
or trigger_all or trigger_all
): ):
# TODO: we should always update the "last" datum
# since the current range should at least be updated
# to it's max/min on the last pixel.
chart.update_graphics_from_flow( chart.update_graphics_from_flow(
chart.name, chart.name,
# do_append=uppx < update_uppx, # do_append=uppx < update_uppx,
do_append=do_append, do_append=do_append,
) )
# NOTE: we always update the "last" datum
# since the current range should at least be updated
# to it's max/min on the last pixel.
# iterate in FIFO order per tick-frame # iterate in FIFO order per tick-frame
for typ, tick in lasts.items(): for typ, tick in lasts.items():
@ -589,106 +653,11 @@ def graphics_update_cycle(
vars['last_mx'], vars['last_mn'] = mx, mn vars['last_mx'], vars['last_mn'] = mx, mn
# run synchronous update on all linked flows # run synchronous update on all linked flows
# TODO: should the "main" (aka source) flow be special?
for curve_name, flow in chart._flows.items(): for curve_name, flow in chart._flows.items():
# update any overlayed fsp flows
if curve_name != chart.data_key:
update_fsp_chart(
chart,
flow,
curve_name,
array_key=curve_name,
)
# even if we're downsampled bigly
# draw the last datum in the final
# px column to give the user the mx/mn
# range of that set.
if ( if (
not do_append not (do_rt_update or do_append)
# and not do_rt_update
and liv and liv
):
flow.draw_last(
array_key=curve_name,
only_last_uppx=True,
)
# volume chart logic..
# TODO: can we unify this with the above loop?
if vlm_chart:
# always update y-label
ds.vlm_sticky.update_from_data(
*array[-1][['index', 'volume']]
)
if (
(
do_rt_update
or do_append
and liv
)
or trigger_all
):
# TODO: make it so this doesn't have to be called
# once the $vlm is up?
vlm_chart.update_graphics_from_flow(
'volume',
# UGGGh, see ``maxmin()`` impl in `._fsp` for
# the overlayed plotitems... we need a better
# bay to invoke a maxmin per overlay..
render=False,
# XXX: ^^^^ THIS IS SUPER IMPORTANT! ^^^^
# without this, since we disable the
# 'volume' (units) chart after the $vlm starts
# up we need to be sure to enable this
# auto-ranging otherwise there will be no handler
# connected to update accompanying overlay
# graphics..
)
profiler('`vlm_chart.update_graphics_from_flow()`')
if (
mx_vlm_in_view != vars['last_mx_vlm']
):
yrange = (0, mx_vlm_in_view * 1.375)
vlm_chart.view._set_yrange(
yrange=yrange,
)
profiler('`vlm_chart.view._set_yrange()`')
# print(f'mx vlm: {last_mx_vlm} -> {mx_vlm_in_view}')
vars['last_mx_vlm'] = mx_vlm_in_view
for curve_name, flow in vlm_chart._flows.items():
if (
curve_name != 'volume' and
flow.render and (
liv and
do_rt_update or do_append
)
):
update_fsp_chart(
vlm_chart,
flow,
curve_name,
array_key=curve_name,
# do_append=uppx < update_uppx,
do_append=do_append,
)
# is this even doing anything?
# (pretty sure it's the real-time
# resizing from last quote?)
fvb = flow.plot.vb
fvb._set_yrange(
name=curve_name,
)
elif (
curve_name != 'volume'
and not do_append
and liv
and uppx >= 1
# even if we're downsampled bigly # even if we're downsampled bigly
# draw the last datum in the final # draw the last datum in the final
# px column to give the user the mx/mn # px column to give the user the mx/mn
@ -696,9 +665,19 @@ def graphics_update_cycle(
): ):
# always update the last datum-element # always update the last datum-element
# graphic for all flows # graphic for all flows
# print(f'drawing last {flow.name}')
flow.draw_last(array_key=curve_name) flow.draw_last(array_key=curve_name)
# TODO: should the "main" (aka source) flow be special?
if curve_name == chart.data_key:
continue
update_fsp_chart(
chart,
flow,
curve_name,
array_key=curve_name,
)
async def display_symbol_data( async def display_symbol_data(
godwidget: GodWidget, godwidget: GodWidget,

View File

@ -175,7 +175,6 @@ def render_baritems(
name=f'{flow.name}_ds_ohlc', name=f'{flow.name}_ds_ohlc',
color=bars._color, color=bars._color,
) )
flow.ds_graphics = curve
curve.hide() curve.hide()
self.plot.addItem(curve) self.plot.addItem(curve)
@ -193,20 +192,18 @@ def render_baritems(
uppx = curve.x_uppx() uppx = curve.x_uppx()
in_line = should_line = curve.isVisible() in_line = should_line = curve.isVisible()
if ( if (
in_line should_line
and uppx < x_gt and uppx < x_gt
): ):
# print('FLIPPING TO BARS') # print('FLIPPING TO BARS')
should_line = False should_line = False
flow._in_ds = False
elif ( elif (
not in_line not should_line
and uppx >= x_gt and uppx >= x_gt
): ):
# print('FLIPPING TO LINE') # print('FLIPPING TO LINE')
should_line = True should_line = True
flow._in_ds = True
profiler(f'ds logic complete line={should_line}') profiler(f'ds logic complete line={should_line}')
@ -336,13 +333,7 @@ class Flow(msgspec.Struct): # , frozen=True):
''' '''
name: str name: str
plot: pg.PlotItem plot: pg.PlotItem
graphics: Union[Curve, BarItems] graphics: Curve
# in some cases a flow may want to change its
# graphical "type" or, "form" when downsampling,
# normally this is just a plain line.
ds_graphics: Optional[Curve] = None
_shm: ShmArray _shm: ShmArray
is_ohlc: bool = False is_ohlc: bool = False
@ -549,7 +540,6 @@ class Flow(msgspec.Struct): # , frozen=True):
should_redraw: bool = False should_redraw: bool = False
rkwargs = {} rkwargs = {}
should_line = False
if isinstance(graphics, BarItems): if isinstance(graphics, BarItems):
# XXX: special case where we change out graphics # XXX: special case where we change out graphics
# to a line after a certain uppx threshold. # to a line after a certain uppx threshold.
@ -566,8 +556,8 @@ class Flow(msgspec.Struct): # , frozen=True):
profiler, profiler,
**kwargs, **kwargs,
) )
# bars = True
should_redraw = changed_to_line or not should_line should_redraw = changed_to_line or not should_line
self._in_ds = should_line
else: else:
r = self._src_r r = self._src_r
@ -671,17 +661,6 @@ class Flow(msgspec.Struct): # , frozen=True):
# assign output paths to graphicis obj # assign output paths to graphicis obj
graphics.path = r.path graphics.path = r.path
graphics.fast_path = r.fast_path graphics.fast_path = r.fast_path
# XXX: we don't need this right?
# graphics.draw_last_datum(
# path,
# src_array,
# data,
# reset,
# array_key,
# )
# graphics.update()
# profiler('.update()')
else: else:
# assign output paths to graphicis obj # assign output paths to graphicis obj
graphics.path = r.path graphics.path = r.path
@ -694,15 +673,16 @@ class Flow(msgspec.Struct): # , frozen=True):
reset, reset,
array_key, array_key,
) )
graphics.update()
profiler('.update()') # TODO: is this ever better?
# graphics.prepareGeometryChange()
# profiler('.prepareGeometryChange()')
# TODO: does this actuallly help us in any way (prolly should # TODO: does this actuallly help us in any way (prolly should
# look at the source / ask ogi). I think it avoid artifacts on # look at the source / ask ogi). I think it avoid artifacts on
# wheel-scroll downsampling curve updates? # wheel-scroll downsampling curve updates?
# TODO: is this ever better? graphics.update()
# graphics.prepareGeometryChange() profiler('.update()')
# profiler('.prepareGeometryChange()')
# track downsampled state # track downsampled state
self._in_ds = r._in_ds self._in_ds = r._in_ds
@ -712,7 +692,6 @@ class Flow(msgspec.Struct): # , frozen=True):
def draw_last( def draw_last(
self, self,
array_key: Optional[str] = None, array_key: Optional[str] = None,
only_last_uppx: bool = False,
) -> None: ) -> None:
@ -732,41 +711,19 @@ class Flow(msgspec.Struct): # , frozen=True):
array_key, array_key,
) )
# the renderer is downsampling we choose if self._in_ds:
# to always try and updadte a single (interpolating)
# line segment that spans and tries to display
# the las uppx's worth of datums.
# we only care about the last pixel's # we only care about the last pixel's
# worth of data since that's all the screen # worth of data since that's all the screen
# can represent on the last column where # can represent on the last column where
# the most recent datum is being drawn. # the most recent datum is being drawn.
if self._in_ds or only_last_uppx:
dsg = self.ds_graphics or self.graphics
# XXX: pretty sure we don't need this?
# if isinstance(g, Curve):
# with dsg.reset_cache():
uppx = self._last_uppx uppx = self._last_uppx
y = y[-uppx:] y = y[-uppx:]
ymn, ymx = y.min(), y.max() ymn, ymx = y.min(), y.max()
# print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}') # print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}')
try: g._last_line = QLineF(
iuppx = x[-uppx] x[-2], ymn,
except IndexError:
# we're less then an x-px wide so just grab the start
# datum index.
iuppx = x[0]
dsg._last_line = QLineF(
iuppx, ymn,
x[-1], ymx, x[-1], ymx,
) )
# print(f'updating DS curve {self.name}')
dsg.update()
else:
# print(f'updating NOT DS curve {self.name}')
g.update()
def by_index_and_key( def by_index_and_key(

View File

@ -440,7 +440,7 @@ class FspAdmin:
# if the chart isn't hidden try to update # if the chart isn't hidden try to update
# the data on screen. # the data on screen.
if not self.linked.isHidden(): if not self.linked.isHidden():
log.debug(f'Re-syncing graphics for fsp: {ns_path}') log.info(f'Re-syncing graphics for fsp: {ns_path}')
self.linked.graphics_cycle( self.linked.graphics_cycle(
trigger_all=True, trigger_all=True,
prepend_update_index=info['first'], prepend_update_index=info['first'],

View File

@ -57,7 +57,6 @@ setup(
# from github currently (see requirements.txt) # from github currently (see requirements.txt)
# 'trimeter', # not released yet.. # 'trimeter', # not released yet..
# 'tractor', # 'tractor',
# asyncvnc,
# brokers # brokers
'asks==2.4.8', 'asks==2.4.8',
@ -72,34 +71,32 @@ setup(
# UI # UI
'PyQt5', 'PyQt5',
# 'pyqtgraph', from our fork see reqs.txt 'pyqtgraph',
'qdarkstyle >= 3.0.2', # themeing 'qdarkstyle >= 3.0.2',
'fuzzywuzzy[speedup]', # fuzzy search # fuzzy search
'fuzzywuzzy[speedup]',
# tsdbs # tsdbs
# anyio-marketstore # from gh see reqs.txt 'pymarketstore',
], ],
extras_require={ extras_require={
# serialization
'tsdb': [ 'tsdb': [
'docker', 'docker',
], ],
}, },
tests_require=['pytest'], tests_require=['pytest'],
python_requires=">=3.10", python_requires=">=3.9", # literally for ``datetime.datetime.fromisoformat``...
keywords=[ keywords=["async", "trading", "finance", "quant", "charting"],
"async",
"trading",
"finance",
"quant",
"charting",
],
classifiers=[ classifiers=[
'Development Status :: 3 - Alpha', 'Development Status :: 3 - Alpha',
'License :: OSI Approved :: ', 'License :: OSI Approved :: ',
'Operating System :: POSIX :: Linux', 'Operating System :: POSIX :: Linux',
"Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.10",
'Intended Audience :: Financial and Insurance Industry', 'Intended Audience :: Financial and Insurance Industry',
'Intended Audience :: Science/Research', 'Intended Audience :: Science/Research',