Merge pull request #453 from pikers/overlays_interaction_latency_tuning

Overlays interaction latency tuning
backend_spec
goodboy 2023-02-14 13:48:12 -05:00 committed by GitHub
commit 139b8ba0f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
19 changed files with 1084 additions and 901 deletions

View File

@ -257,7 +257,7 @@ async def open_piker_runtime(
# and spawn the service tree distributed per that.
start_method: str = 'trio',
tractor_kwargs: dict = {},
**tractor_kwargs,
) -> tuple[
tractor.Actor,

View File

@ -152,9 +152,14 @@ class Profiler(object):
# don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._msgs = []
# create an actual profiling object
if cls._depth < 1:
cls._msgs = []
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
@ -174,8 +179,12 @@ class Profiler(object):
self._markCount += 1
newTime = perf_counter()
tot_ms = (newTime - self._firstTime) * 1000
ms = (newTime - self._lastTime) * 1000
self._newMsg(" %s: %0.4f ms", msg, ms)
self._newMsg(
f" {msg}: {ms:0.4f}, tot:{tot_ms:0.4f}"
)
self._lastTime = newTime
def mark(self, msg=None):

View File

@ -55,6 +55,10 @@ class IncrementalFormatter(msgspec.Struct):
shm: ShmArray
viz: Viz
# the value to be multiplied any any index into the x/y_1d arrays
# given the input index is based on the original source data array.
flat_index_ratio: float = 1
@property
def index_field(self) -> 'str':
'''
@ -92,8 +96,8 @@ class IncrementalFormatter(msgspec.Struct):
xy_nd_stop: int | None = None
# TODO: eventually incrementally update 1d-pre-graphics path data?
# x_1d: Optional[np.ndarray] = None
# y_1d: Optional[np.ndarray] = None
x_1d: np.ndarray | None = None
y_1d: np.ndarray | None = None
# incremental view-change state(s) tracking
_last_vr: tuple[float, float] | None = None
@ -107,32 +111,6 @@ class IncrementalFormatter(msgspec.Struct):
'''
return self.viz.index_step()
def __repr__(self) -> str:
msg = (
f'{type(self)}: ->\n\n'
f'fqsn={self.viz.name}\n'
f'shm_name={self.shm.token["shm_name"]}\n\n'
f'last_vr={self._last_vr}\n'
f'last_ivdr={self._last_ivdr}\n\n'
f'xy_slice={self.xy_slice}\n'
# f'xy_nd_stop={self.xy_nd_stop}\n\n'
)
x_nd_len = 0
y_nd_len = 0
if self.x_nd is not None:
x_nd_len = len(self.x_nd)
y_nd_len = len(self.y_nd)
msg += (
f'x_nd_len={x_nd_len}\n'
f'y_nd_len={y_nd_len}\n'
)
return msg
def diff(
self,
new_read: tuple[np.ndarray],
@ -180,8 +158,6 @@ class IncrementalFormatter(msgspec.Struct):
# set us in a zero-to-append state
nd_stop = self.xy_nd_stop = src_stop
align_index = array[self.index_field]
# compute the length diffs between the first/last index entry in
# the input data and the last indexes we have on record from the
# last time we updated the curve index.
@ -334,6 +310,9 @@ class IncrementalFormatter(msgspec.Struct):
array = in_view
profiler(f'{self.viz.name} view range slice {view_range}')
# TODO: we need to check if the last-datum-in-view is true and
# if so only slice to the 2nd last datumonly slice to the 2nd
# last datum.
# hist = array[:slice_to_head]
# XXX: WOA WTF TRACTOR DEBUGGING BUGGG
@ -353,6 +332,11 @@ class IncrementalFormatter(msgspec.Struct):
array_key,
view_range,
)
# cache/save last 1d outputs for use by other
# readers (eg. `Viz.draw_last_datum()` in the
# only-draw-last-uppx case).
self.x_1d = x_1d
self.y_1d = y_1d
# app_tres = None
# if append_len:
@ -376,11 +360,6 @@ class IncrementalFormatter(msgspec.Struct):
# update the last "in view data range"
if len(x_1d):
self._last_ivdr = x_1d[0], x_1d[-1]
if (
self.index_field == 'time'
and (x_1d[-1] == 0.5).any()
):
breakpoint()
profiler('.format_to_1d()')
@ -503,14 +482,22 @@ class IncrementalFormatter(msgspec.Struct):
# NOTE: we don't include the very last datum which is filled in
# normally by another graphics object.
x_1d = array[self.index_field][:-1]
if (
self.index_field == 'time'
and x_1d.any()
and (x_1d[-1] == 0.5).any()
):
breakpoint()
y_1d = array[array_key][:-1]
# name = self.viz.name
# if 'trade_rate' == name:
# s = 4
# x_nd = list(self.x_nd[self.xy_slice][-s:-1])
# y_nd = list(self.y_nd[self.xy_slice][-s:-1])
# print(
# f'{name}:\n'
# f'XY data:\n'
# f'x: {x_nd}\n'
# f'y: {y_nd}\n\n'
# f'x_1d: {list(x_1d[-s:])}\n'
# f'y_1d: {list(y_1d[-s:])}\n\n'
# )
return (
x_1d,
y_1d,
@ -532,6 +519,7 @@ class OHLCBarsFmtr(IncrementalFormatter):
fields: list[str] = field(
default_factory=lambda: ['open', 'high', 'low', 'close']
)
flat_index_ratio: float = 4
def allocate_xy_nd(
self,
@ -627,7 +615,7 @@ class OHLCBarsFmtr(IncrementalFormatter):
'''
x, y, c = path_arrays_from_ohlc(
array,
array[:-1],
start,
bar_w=self.index_step_size,
bar_gap=w * self.index_step_size,
@ -826,13 +814,6 @@ class StepCurveFmtr(IncrementalFormatter):
x_1d = x_step_iv.reshape(x_step_iv.size)
y_1d = y_step_iv.reshape(y_step_iv.size)
if (
self.index_field == 'time'
and x_1d.any()
and (x_1d == 0.5).any()
):
breakpoint()
# debugging
# if y_1d.any():
# s = 6

View File

@ -91,6 +91,14 @@ def ds_m4(
x_end = x[-1] # x end value/highest in domain
xrange = (x_end - x_start)
if xrange < 0:
log.error(f'-VE M4 X-RANGE: {x_start} -> {x_end}')
# XXX: broken x-range calc-case, likely the x-end points
# are wrong and have some default value set (such as
# x_end -> <some epoch float> while x_start -> 0.5).
# breakpoint()
return None
# XXX: always round up on the input pixels
# lnx = len(x)
# uppx *= max(4 / (1 + math.log(uppx, 2)), 1)

View File

@ -17,6 +17,11 @@
Super fast ``QPainterPath`` generation related operator routines.
"""
from math import (
ceil,
floor,
)
import numpy as np
from numpy.lib import recfunctions as rfn
from numba import (
@ -58,20 +63,27 @@ def xy_downsample(
# downsample whenever more then 1 pixels per datum can be shown.
# always refresh data bounds until we get diffing
# working properly, see above..
bins, x, y, ymn, ymx = ds_m4(
m4_out = ds_m4(
x,
y,
uppx,
)
# flatten output to 1d arrays suitable for path-graphics generation.
x = np.broadcast_to(x[:, None], y.shape)
x = (x + np.array(
[-x_spacer, 0, 0, x_spacer]
)).flatten()
y = y.flatten()
if m4_out is not None:
bins, x, y, ymn, ymx = m4_out
# flatten output to 1d arrays suitable for path-graphics generation.
x = np.broadcast_to(x[:, None], y.shape)
x = (x + np.array(
[-x_spacer, 0, 0, x_spacer]
)).flatten()
y = y.flatten()
return x, y, ymn, ymx
return x, y, ymn, ymx
# XXX: we accept a None output for the case where the input range
# to ``ds_m4()`` is bad (-ve) and we want to catch and debug
# that (seemingly super rare) circumstance..
return None
@njit(
@ -285,10 +297,7 @@ def slice_from_time(
stop_t: float,
step: int | None = None,
) -> tuple[
slice,
slice,
]:
) -> slice:
'''
Calculate array indices mapped from a time range and return them in
a slice.
@ -308,22 +317,32 @@ def slice_from_time(
)
times = arr['time']
t_first = round(times[0])
t_first = floor(times[0])
t_last = ceil(times[-1])
# the greatest index we can return which slices to the
# end of the input array.
read_i_max = arr.shape[0]
# TODO: require this is always passed in?
if step is None:
step = round(times[-1] - times[-2])
step = round(t_last - times[-2])
if step == 0:
# XXX: HOW TF is this happening?
step = 1
# compute (presumed) uniform-time-step index offsets
i_start_t = round(start_t)
read_i_start = round(((i_start_t - t_first) // step)) - 1
i_start_t = floor(start_t)
read_i_start = floor(((i_start_t - t_first) // step)) - 1
i_stop_t = round(stop_t)
read_i_stop = round((i_stop_t - t_first) // step) + 1
i_stop_t = ceil(stop_t)
# XXX: edge case -> always set stop index to last in array whenever
# the input stop time is detected to be greater then the equiv time
# stamp at that last entry.
if i_stop_t >= t_last:
read_i_stop = read_i_max
else:
read_i_stop = ceil((i_stop_t - t_first) // step) + 1
# always clip outputs to array support
# for read start:
@ -367,7 +386,7 @@ def slice_from_time(
# up_to_arith_start = index[:read_i_start]
if (
new_read_i_start < read_i_start
new_read_i_start <= read_i_start
):
# t_diff = t_iv_start - start_t
# print(
@ -391,14 +410,15 @@ def slice_from_time(
# )
new_read_i_stop = np.searchsorted(
times[read_i_start:],
# times,
i_stop_t,
side='left',
)
if (
new_read_i_stop < read_i_stop
new_read_i_stop <= read_i_stop
):
read_i_stop = read_i_start + new_read_i_stop
read_i_stop = read_i_start + new_read_i_stop + 1
# sanity checks for range size
# samples = (i_stop_t - i_start_t) // step

View File

@ -207,7 +207,7 @@ def get_feed_bus(
) -> _FeedsBus:
'''
Retreive broker-daemon-local data feeds bus from process global
Retrieve broker-daemon-local data feeds bus from process global
scope. Serialize task access to lock.
'''
@ -250,6 +250,7 @@ async def start_backfill(
shm: ShmArray,
timeframe: float,
sampler_stream: tractor.MsgStream,
feed_is_live: trio.Event,
last_tsdb_dt: Optional[datetime] = None,
storage: Optional[Storage] = None,
@ -281,7 +282,14 @@ async def start_backfill(
- pendulum.from_timestamp(times[-2])
).seconds
if step_size_s == 60:
# if the market is open (aka we have a live feed) but the
# history sample step index seems off we report the surrounding
# data and drop into a bp. this case shouldn't really ever
# happen if we're doing history retrieval correctly.
if (
step_size_s == 60
and feed_is_live.is_set()
):
inow = round(time.time())
diff = inow - times[-1]
if abs(diff) > 60:
@ -499,6 +507,7 @@ async def basic_backfill(
bfqsn: str,
shms: dict[int, ShmArray],
sampler_stream: tractor.MsgStream,
feed_is_live: trio.Event,
) -> None:
@ -518,6 +527,7 @@ async def basic_backfill(
shm,
timeframe,
sampler_stream,
feed_is_live,
)
)
except DataUnavailable:
@ -534,6 +544,7 @@ async def tsdb_backfill(
bfqsn: str,
shms: dict[int, ShmArray],
sampler_stream: tractor.MsgStream,
feed_is_live: trio.Event,
task_status: TaskStatus[
tuple[ShmArray, ShmArray]
@ -568,6 +579,8 @@ async def tsdb_backfill(
shm,
timeframe,
sampler_stream,
feed_is_live,
last_tsdb_dt=last_tsdb_dt,
tsdb_is_up=True,
storage=storage,
@ -870,6 +883,7 @@ async def manage_history(
60: hist_shm,
},
sample_stream,
feed_is_live,
)
# yield back after client connect with filled shm
@ -904,6 +918,7 @@ async def manage_history(
60: hist_shm,
},
sample_stream,
feed_is_live,
)
task_status.started((
hist_zero_index,
@ -1065,7 +1080,10 @@ async def allocate_persistent_feed(
# seed the buffer with a history datum - this is most handy
# for many backends which don't sample @ 1s OHLC but do have
# slower data such as 1m OHLC.
if not len(rt_shm.array):
if (
not len(rt_shm.array)
and hist_shm.array.size
):
rt_shm.push(hist_shm.array[-3:-1])
ohlckeys = ['open', 'high', 'low', 'close']
rt_shm.array[ohlckeys][-2:] = hist_shm.array['close'][-1]
@ -1076,6 +1094,9 @@ async def allocate_persistent_feed(
rt_shm.array['time'][0] = ts
rt_shm.array['time'][1] = ts + 1
elif hist_shm.array.size == 0:
await tractor.breakpoint()
# wait the spawning parent task to register its subscriber
# send-stream entry before we start the sample loop.
await sub_registered.wait()

View File

@ -22,17 +22,11 @@ real-time data processing data-structures.
"""
from __future__ import annotations
from contextlib import asynccontextmanager as acm
from functools import partial
from typing import (
AsyncIterator,
TYPE_CHECKING,
)
import tractor
from tractor.trionics import (
maybe_open_context,
)
import pendulum
import numpy as np
@ -45,9 +39,6 @@ from ._sharedmem import (
ShmArray,
_Token,
)
from ._sampling import (
open_sample_stream,
)
# from .._profile import (
# Profiler,
# pg_profile_enabled,
@ -151,26 +142,6 @@ class Flume(Struct):
async def receive(self) -> dict:
return await self.stream.receive()
@acm
async def index_stream(
self,
delay_s: float = 1,
) -> AsyncIterator[int]:
if not self.feed:
raise RuntimeError('This flume is not part of any ``Feed``?')
# TODO: maybe a public (property) API for this in ``tractor``?
portal = self.stream._ctx._portal
assert portal
# XXX: this should be singleton on a host,
# a lone broker-daemon per provider should be
# created for all practical purposes
async with open_sample_stream(float(delay_s)) as stream:
yield stream
def get_ds_info(
self,
) -> tuple[float, float, float]:

View File

@ -54,7 +54,7 @@ def open_trade_ledger(
broker: str,
account: str,
) -> str:
) -> dict:
'''
Indempotently create and read in a trade log file from the
``<configuration_dir>/ledgers/`` directory.

View File

@ -50,7 +50,6 @@ from ._cursor import (
ContentsLabel,
)
from ..data._sharedmem import ShmArray
from ._l1 import L1Labels
from ._ohlc import BarItems
from ._curve import (
Curve,
@ -70,12 +69,10 @@ from ..data._source import Symbol
from ..log import get_logger
from ._interaction import ChartView
from ._forms import FieldsForm
from .._profile import pg_profile_enabled, ms_slower_then
from ._overlay import PlotItemOverlay
from ._dataviz import Viz
from ._search import SearchWidget
from . import _pg_overrides as pgo
from .._profile import Profiler
if TYPE_CHECKING:
from ._display import DisplayState
@ -127,7 +124,10 @@ class GodWidget(QWidget):
# self.init_strategy_ui()
# self.vbox.addLayout(self.hbox)
self._chart_cache: dict[str, LinkedSplits] = {}
self._chart_cache: dict[
str,
tuple[LinkedSplits, LinkedSplits],
] = {}
self.hist_linked: Optional[LinkedSplits] = None
self.rt_linked: Optional[LinkedSplits] = None
@ -147,23 +147,6 @@ class GodWidget(QWidget):
def linkedsplits(self) -> LinkedSplits:
return self.rt_linked
# def init_timeframes_ui(self):
# self.tf_layout = QHBoxLayout()
# self.tf_layout.setSpacing(0)
# self.tf_layout.setContentsMargins(0, 12, 0, 0)
# time_frames = ('1M', '5M', '15M', '30M', '1H', '1D', '1W', 'MN')
# btn_prefix = 'TF'
# for tf in time_frames:
# btn_name = ''.join([btn_prefix, tf])
# btn = QtWidgets.QPushButton(tf)
# # TODO:
# btn.setEnabled(False)
# setattr(self, btn_name, btn)
# self.tf_layout.addWidget(btn)
# self.toolbar_layout.addLayout(self.tf_layout)
# XXX: strat loader/saver that we don't need yet.
# def init_strategy_ui(self):
# self.strategy_box = StrategyBoxWidget(self)
@ -545,6 +528,8 @@ class LinkedSplits(QWidget):
style: str = 'ohlc_bar',
**add_plot_kwargs,
) -> ChartPlotWidget:
'''
Start up and show main (price) chart and all linked subcharts.
@ -569,6 +554,7 @@ class LinkedSplits(QWidget):
style=style,
_is_main=True,
sidepane=sidepane,
**add_plot_kwargs,
)
# add crosshair graphic
self.chart.addItem(self.cursor)
@ -593,6 +579,7 @@ class LinkedSplits(QWidget):
_is_main: bool = False,
sidepane: Optional[QWidget] = None,
draw_kwargs: dict = {},
**cpw_kwargs,
@ -650,7 +637,8 @@ class LinkedSplits(QWidget):
cpw.hideAxis('bottom')
if (
_xaxis_at == 'bottom' and (
_xaxis_at == 'bottom'
and (
self.xaxis_chart
or (
not self.subplots
@ -658,6 +646,8 @@ class LinkedSplits(QWidget):
)
)
):
# hide the previous x-axis chart's bottom axis since we're
# presumably being appended to the bottom subplot.
if self.xaxis_chart:
self.xaxis_chart.hideAxis('bottom')
@ -702,7 +692,12 @@ class LinkedSplits(QWidget):
# link chart x-axis to main chart
# this is 1/2 of where the `Link` in ``LinkedSplit``
# comes from ;)
cpw.setXLink(self.chart)
cpw.cv.setXLink(self.chart)
# NOTE: above is the same as the following,
# link this subchart's axes to the main top level chart.
# if self.chart:
# cpw.cv.linkView(0, self.chart.cv)
add_label = False
anchor_at = ('top', 'left')
@ -710,12 +705,12 @@ class LinkedSplits(QWidget):
# draw curve graphics
if style == 'ohlc_bar':
# graphics, data_key = cpw.draw_ohlc(
viz = cpw.draw_ohlc(
name,
shm,
flume=flume,
array_key=array_key
array_key=array_key,
**draw_kwargs,
)
self.cursor.contents_labels.add_label(
cpw,
@ -733,6 +728,7 @@ class LinkedSplits(QWidget):
flume,
array_key=array_key,
color='default_light',
**draw_kwargs,
)
elif style == 'step':
@ -746,11 +742,21 @@ class LinkedSplits(QWidget):
step_mode=True,
color='davies',
fill_color='davies',
**draw_kwargs,
)
else:
raise ValueError(f"Chart style {style} is currently unsupported")
# NOTE: back-link the new sub-chart to trigger y-autoranging in
# the (ohlc parent) main chart for this linked set.
if self.chart:
main_viz = self.chart.get_viz(self.chart.name)
self.chart.view.enable_auto_yrange(
src_vb=cpw.view,
viz=main_viz,
)
graphics = viz.graphics
data_key = viz.name
@ -814,7 +820,9 @@ class LinkedSplits(QWidget):
# write our own wrapper around `PlotItem`..
class ChartPlotWidget(pg.PlotWidget):
'''
``GraphicsView`` subtype containing a single ``PlotItem``.
``GraphicsView`` subtype containing a ``.plotItem: PlotItem`` as well
as a `.pi_overlay: PlotItemOverlay`` which helps manage and overlay flow
graphics view multiple compose view boxes.
- The added methods allow for plotting OHLC sequences from
``np.ndarray``s with appropriate field names.
@ -871,17 +879,17 @@ class ChartPlotWidget(pg.PlotWidget):
self.sidepane: Optional[FieldsForm] = None
# source of our custom interactions
self.cv = cv = self.mk_vb(name)
self.cv = self.mk_vb(name)
pi = pgo.PlotItem(
viewBox=cv,
viewBox=self.cv,
name=name,
**kwargs,
)
pi.chart_widget = self
super().__init__(
background=hcolor(view_color),
viewBox=cv,
viewBox=self.cv,
# parent=None,
# plotItem=None,
# antialias=True,
@ -892,7 +900,9 @@ class ChartPlotWidget(pg.PlotWidget):
# give viewbox as reference to chart
# allowing for kb controls and interactions on **this** widget
# (see our custom view mode in `._interactions.py`)
cv.chart = self
self.cv.chart = self
self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem)
# ensure internal pi matches
assert self.cv is self.plotItem.vb
@ -921,8 +931,6 @@ class ChartPlotWidget(pg.PlotWidget):
# show background grid
self.showGrid(x=False, y=True, alpha=0.3)
self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem)
# indempotent startup flag for auto-yrange subsys
# to detect the "first time" y-domain graphics begin
# to be shown in the (main) graphics view.
@ -1111,14 +1119,6 @@ class ChartPlotWidget(pg.PlotWidget):
link_axes=(0,),
)
# connect auto-yrange callbacks *from* this new
# view **to** this parent and likewise *from* the
# main/parent chart back *to* the created overlay.
cv.enable_auto_yrange(src_vb=self.view)
# makes it so that interaction on the new overlay will reflect
# back on the main chart (which overlay was added to).
self.view.enable_auto_yrange(src_vb=cv)
# add axis title
# TODO: do we want this API to still work?
# raxis = pi.getAxis('right')
@ -1158,8 +1158,6 @@ class ChartPlotWidget(pg.PlotWidget):
if is_ohlc:
graphics = BarItems(
linked=self.linked,
plotitem=pi,
color=color,
name=name,
**graphics_kwargs,
@ -1189,6 +1187,16 @@ class ChartPlotWidget(pg.PlotWidget):
# register curve graphics with this viz
graphics=graphics,
)
# connect auto-yrange callbacks *from* this new
# view **to** this parent and likewise *from* the
# main/parent chart back *to* the created overlay.
pi.vb.enable_auto_yrange(
src_vb=self.view,
viz=viz,
)
pi.viz = viz
assert isinstance(viz.shm, ShmArray)
# TODO: this probably needs its own method?
@ -1316,13 +1324,6 @@ class ChartPlotWidget(pg.PlotWidget):
If ``bars_range`` is provided use that range.
'''
profiler = Profiler(
msg=f'`{str(self)}.maxmin(name={name})`: `{self.name}`',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
delayed=True,
)
# TODO: here we should instead look up the ``Viz.shm.array``
# and read directly from shm to avoid copying to memory first
# and then reading it again here.
@ -1330,36 +1331,21 @@ class ChartPlotWidget(pg.PlotWidget):
viz = self._vizs.get(viz_key)
if viz is None:
log.error(f"viz {viz_key} doesn't exist in chart {self.name} !?")
key = res = 0, 0
return 0, 0
res = viz.maxmin()
if (
res is None
):
mxmn = 0, 0
if not self._on_screen:
self.default_view(do_ds=False)
self._on_screen = True
else:
(
l,
_,
lbar,
rbar,
_,
r,
) = bars_range or viz.datums_range()
x_range, read_slc, mxmn = res
profiler(f'{self.name} got bars range')
key = lbar, rbar
res = viz.maxmin(*key)
if (
res is None
):
log.warning(
f"{viz_key} no mxmn for bars_range => {key} !?"
)
res = 0, 0
if not self._on_screen:
self.default_view(do_ds=False)
self._on_screen = True
profiler(f'yrange mxmn: {key} -> {res}')
# print(f'{viz_key} yrange mxmn: {key} -> {res}')
return res
return mxmn
def get_viz(
self,

View File

@ -198,12 +198,11 @@ class ContentsLabel(pg.LabelItem):
self,
name: str,
index: int,
ix: int,
array: np.ndarray,
) -> None:
# this being "html" is the dumbest shit :eyeroll:
first = array[0]['index']
self.setText(
"<b>i</b>:{index}<br/>"
@ -216,7 +215,7 @@ class ContentsLabel(pg.LabelItem):
"<b>C</b>:{}<br/>"
"<b>V</b>:{}<br/>"
"<b>wap</b>:{}".format(
*array[index - first][
*array[ix][
[
'time',
'open',
@ -228,7 +227,7 @@ class ContentsLabel(pg.LabelItem):
]
],
name=name,
index=index,
index=ix,
)
)
@ -236,15 +235,12 @@ class ContentsLabel(pg.LabelItem):
self,
name: str,
index: int,
ix: int,
array: np.ndarray,
) -> None:
first = array[0]['index']
if index < array[-1]['index'] and index > first:
data = array[index - first][name]
self.setText(f"{name}: {data:.2f}")
data = array[ix][name]
self.setText(f"{name}: {data:.2f}")
class ContentsLabels:
@ -269,17 +265,20 @@ class ContentsLabels:
def update_labels(
self,
index: int,
x_in: int,
) -> None:
for chart, name, label, update in self._labels:
viz = chart.get_viz(name)
array = viz.shm.array
index = array[viz.index_field]
start = index[0]
stop = index[-1]
if not (
index >= 0
and index < array[-1]['index']
x_in >= start
and x_in <= stop
):
# out of range
print('WTF out of range?')
@ -288,7 +287,10 @@ class ContentsLabels:
# call provided update func with data point
try:
label.show()
update(index, array)
ix = np.searchsorted(index, x_in)
if ix > len(array):
breakpoint()
update(ix, array)
except IndexError:
log.exception(f"Failed to update label: {name}")

View File

@ -60,11 +60,89 @@ class FlowGraphic(pg.GraphicsObject):
'''
# sub-type customization methods
declare_paintables: Optional[Callable] = None
sub_paint: Optional[Callable] = None
declare_paintables: Callable | None = None
sub_paint: Callable | None = None
# TODO: can we remove this?
# sub_br: Optional[Callable] = None
# XXX-NOTE-XXX: graphics caching B)
# see explanation for different caching modes:
# https://stackoverflow.com/a/39410081
cache_mode: int = QGraphicsItem.DeviceCoordinateCache
# XXX: WARNING item caching seems to only be useful
# if we don't re-generate the entire QPainterPath every time
# don't ever use this - it's a colossal nightmare of artefacts
# and is disastrous for performance.
# QGraphicsItem.ItemCoordinateCache
# TODO: still questions todo with coord-cacheing that we should
# probably talk to a core dev about:
# - if this makes trasform interactions slower (such as zooming)
# and if so maybe if/when we implement a "history" mode for the
# view we disable this in that mode?
def __init__(
self,
*args,
name: str | None = None,
# line styling
color: str = 'bracket',
last_step_color: str | None = None,
fill_color: Optional[str] = None,
style: str = 'solid',
**kwargs
) -> None:
self._name = name
# primary graphics item used for history
self.path: QPainterPath = QPainterPath()
# additional path that can be optionally used for appends which
# tries to avoid triggering an update/redraw of the presumably
# larger historical ``.path`` above. the flag to enable
# this behaviour is found in `Renderer.render()`.
self.fast_path: QPainterPath | None = None
# TODO: evaluating the path capacity stuff and see
# if it really makes much diff pre-allocating it.
# self._last_cap: int = 0
# cap = path.capacity()
# if cap != self._last_cap:
# print(f'NEW CAPACITY: {self._last_cap} -> {cap}')
# self._last_cap = cap
# all history of curve is drawn in single px thickness
self._color: str = color
pen = pg.mkPen(hcolor(color), width=1)
pen.setStyle(_line_styles[style])
if 'dash' in style:
pen.setDashPattern([8, 3])
self._pen = pen
self._brush = pg.functions.mkBrush(
hcolor(fill_color or color)
)
# last segment is drawn in 2px thickness for emphasis
if last_step_color:
self.last_step_pen = pg.mkPen(
hcolor(last_step_color),
width=2,
)
else:
self.last_step_pen = pg.mkPen(
self._pen,
width=2,
)
self._last_line: QLineF = QLineF()
super().__init__(*args, **kwargs)
# apply cache mode
self.setCacheMode(self.cache_mode)
def x_uppx(self) -> int:
@ -112,81 +190,32 @@ class Curve(FlowGraphic):
updates don't trigger a full path redraw.
'''
# TODO: can we remove this?
# sub_br: Optional[Callable] = None
def __init__(
self,
*args,
step_mode: bool = False,
color: str = 'default_lightest',
fill_color: Optional[str] = None,
style: str = 'solid',
name: Optional[str] = None,
# color: str = 'default_lightest',
# fill_color: Optional[str] = None,
# style: str = 'solid',
**kwargs
) -> None:
self._name = name
# brutaaalll, see comments within..
self.yData = None
self.xData = None
# self._last_cap: int = 0
self.path: Optional[QPainterPath] = None
# additional path that can be optionally used for appends which
# tries to avoid triggering an update/redraw of the presumably
# larger historical ``.path`` above. the flag to enable
# this behaviour is found in `Renderer.render()`.
self.fast_path: QPainterPath | None = None
# TODO: we can probably just dispense with the parent since
# we're basically only using the pen setting now...
super().__init__(*args, **kwargs)
# all history of curve is drawn in single px thickness
pen = pg.mkPen(hcolor(color))
pen.setStyle(_line_styles[style])
if 'dash' in style:
pen.setDashPattern([8, 3])
self._pen = pen
# last segment is drawn in 2px thickness for emphasis
# self.last_step_pen = pg.mkPen(hcolor(color), width=2)
self.last_step_pen = pg.mkPen(pen, width=2)
self._last_line: QLineF = QLineF()
# flat-top style histogram-like discrete curve
# self._step_mode: bool = step_mode
# self._fill = True
self._brush = pg.functions.mkBrush(hcolor(fill_color or color))
# NOTE: this setting seems to mostly prevent redraws on mouse
# interaction which is a huge boon for avg interaction latency.
# TODO: one question still remaining is if this makes trasform
# interactions slower (such as zooming) and if so maybe if/when
# we implement a "history" mode for the view we disable this in
# that mode?
# don't enable caching by default for the case where the
# only thing drawn is the "last" line segment which can
# have a weird artifact where it won't be fully drawn to its
# endpoint (something we saw on trade rate curves)
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
# XXX-NOTE-XXX: graphics caching.
# see explanation for different caching modes:
# https://stackoverflow.com/a/39410081 seems to only be useful
# if we don't re-generate the entire QPainterPath every time
# don't ever use this - it's a colossal nightmare of artefacts
# and is disastrous for performance.
# self.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache)
# allow sub-type customization
declare = self.declare_paintables
@ -317,14 +346,10 @@ class Curve(FlowGraphic):
p.setPen(self.last_step_pen)
p.drawLine(self._last_line)
profiler('.drawLine()')
p.setPen(self._pen)
profiler('last datum `.drawLine()`')
p.setPen(self._pen)
path = self.path
# cap = path.capacity()
# if cap != self._last_cap:
# print(f'NEW CAPACITY: {self._last_cap} -> {cap}')
# self._last_cap = cap
if path:
p.drawPath(path)
@ -369,7 +394,7 @@ class Curve(FlowGraphic):
# from last datum to current such that
# the end of line touches the "beginning"
# of the current datum step span.
x_2last , y[-2],
x_2last, y[-2],
x_last, y[-1],
)
@ -382,6 +407,9 @@ class Curve(FlowGraphic):
# (via it's max / min) even when highly zoomed out.
class FlattenedOHLC(Curve):
# avoids strange dragging/smearing artifacts when panning..
cache_mode: int = QGraphicsItem.NoCache
def draw_last_datum(
self,
path: QPainterPath,

View File

@ -19,6 +19,10 @@ Data vizualization APIs
'''
from __future__ import annotations
from math import (
ceil,
floor,
)
from typing import (
Optional,
Literal,
@ -56,6 +60,7 @@ from ..log import get_logger
from .._profile import (
Profiler,
pg_profile_enabled,
ms_slower_then,
)
@ -127,9 +132,9 @@ def render_baritems(
# baseline "line" downsampled OHLC curve that should
# kick on only when we reach a certain uppx threshold.
self._render_table = (ds_curve_r, curve)
self._alt_r = (ds_curve_r, curve)
ds_r, curve = self._render_table
ds_r, curve = self._alt_r
# print(
# f'r: {r.fmtr.xy_slice}\n'
@ -265,14 +270,17 @@ class Viz(msgspec.Struct): # , frozen=True):
_index_step: float | None = None
# map from uppx -> (downsampled data, incremental graphics)
_src_r: Optional[Renderer] = None
_render_table: dict[
Optional[int],
tuple[Renderer, pg.GraphicsItem],
] = (None, None)
_src_r: Renderer | None = None
_alt_r: tuple[
Renderer,
pg.GraphicsItem
] | None = None
# cache of y-range values per x-range input.
_mxmns: dict[tuple[int, int], tuple[float, float]] = {}
_mxmns: dict[
tuple[int, int],
tuple[float, float],
] = {}
@property
def shm(self) -> ShmArray:
@ -320,59 +328,97 @@ class Viz(msgspec.Struct): # , frozen=True):
def maxmin(
self,
lbar: int,
rbar: int,
x_range: slice | tuple[int, int] | None = None,
i_read_range: tuple[int, int] | None = None,
use_caching: bool = True,
) -> Optional[tuple[float, float]]:
) -> tuple[float, float] | None:
'''
Compute the cached max and min y-range values for a given
x-range determined by ``lbar`` and ``rbar`` or ``None``
if no range can be determined (yet).
'''
# TODO: hash the slice instead maybe?
# https://stackoverflow.com/a/29980872
rkey = (round(lbar), round(rbar))
do_print: bool = False
if use_caching:
cached_result = self._mxmns.get(rkey)
if cached_result:
if do_print:
print(
f'{self.name} CACHED maxmin\n'
f'{rkey} -> {cached_result}'
)
return cached_result
name = self.name
profiler = Profiler(
msg=f'`Viz[{name}].maxmin()`',
disabled=not pg_profile_enabled(),
ms_threshold=4,
delayed=True,
)
shm = self.shm
if shm is None:
return None
do_print: bool = False
arr = shm.array
# get relative slice indexes into array
if self.index_field == 'time':
read_slc = slice_from_time(
arr,
start_t=lbar,
stop_t=rbar,
step=self.index_step(),
)
if i_read_range is not None:
read_slc = slice(*i_read_range)
index = arr[read_slc][self.index_field]
if not index.size:
return None
ixrng = (index[0], index[-1])
else:
ifirst = arr[0]['index']
read_slc = slice(
lbar - ifirst,
(rbar - ifirst) + 1
)
if x_range is None:
(
l,
_,
lbar,
rbar,
_,
r,
) = self.datums_range()
profiler(f'{self.name} got bars range')
x_range = lbar, rbar
# TODO: hash the slice instead maybe?
# https://stackoverflow.com/a/29980872
lbar, rbar = ixrng = round(x_range[0]), round(x_range[1])
if use_caching:
cached_result = self._mxmns.get(ixrng)
if cached_result:
if do_print:
print(
f'{self.name} CACHED maxmin\n'
f'{ixrng} -> {cached_result}'
)
read_slc, mxmn = cached_result
return (
ixrng,
read_slc,
mxmn,
)
if i_read_range is None:
# get relative slice indexes into array
if self.index_field == 'time':
read_slc = slice_from_time(
arr,
start_t=lbar,
stop_t=rbar,
step=self.index_step(),
)
else:
ifirst = arr[0]['index']
read_slc = slice(
lbar - ifirst,
(rbar - ifirst) + 1
)
slice_view = arr[read_slc]
if not slice_view.size:
log.warning(f'{self.name} no maxmin in view?')
log.warning(
f'{self.name} no maxmin in view?\n'
f"{name} no mxmn for bars_range => {ixrng} !?"
)
return None
elif self.yrange:
@ -380,9 +426,8 @@ class Viz(msgspec.Struct): # , frozen=True):
if do_print:
print(
f'{self.name} M4 maxmin:\n'
f'{rkey} -> {mxmn}'
f'{ixrng} -> {mxmn}'
)
else:
if self.is_ohlc:
ylow = np.min(slice_view['low'])
@ -400,7 +445,7 @@ class Viz(msgspec.Struct): # , frozen=True):
s = 3
print(
f'{self.name} MANUAL ohlc={self.is_ohlc} maxmin:\n'
f'{rkey} -> {mxmn}\n'
f'{ixrng} -> {mxmn}\n'
f'read_slc: {read_slc}\n'
# f'abs_slc: {slice_view["index"]}\n'
f'first {s}:\n{slice_view[:s]}\n'
@ -409,9 +454,13 @@ class Viz(msgspec.Struct): # , frozen=True):
# cache result for input range
assert mxmn
self._mxmns[rkey] = mxmn
return mxmn
self._mxmns[ixrng] = (read_slc, mxmn)
profiler(f'yrange mxmn cacheing: {x_range} -> {mxmn}')
return (
ixrng,
read_slc,
mxmn,
)
def view_range(self) -> tuple[int, int]:
'''
@ -456,13 +505,13 @@ class Viz(msgspec.Struct): # , frozen=True):
array = self.shm.array
index = array[index_field]
first = round(index[0])
last = round(index[-1])
first = floor(index[0])
last = ceil(index[-1])
# first and last datums in view determined by
# l / r view range.
leftmost = round(l)
rightmost = round(r)
leftmost = floor(l)
rightmost = ceil(r)
# invalid view state
if (
@ -609,7 +658,11 @@ class Viz(msgspec.Struct): # , frozen=True):
**kwargs,
) -> pg.GraphicsObject:
) -> tuple[
bool,
tuple[int, int],
pg.GraphicsObject,
]:
'''
Read latest datums from shm and render to (incrementally)
render to graphics.
@ -618,13 +671,17 @@ class Viz(msgspec.Struct): # , frozen=True):
profiler = Profiler(
msg=f'Viz.update_graphics() for {self.name}',
disabled=not pg_profile_enabled(),
ms_threshold=4,
# ms_threshold=ms_slower_then,
ms_threshold=ms_slower_then,
# ms_threshold=4,
)
# shm read and slice to view
read = (
xfirst, xlast, src_array,
ivl, ivr, in_view,
xfirst,
xlast,
src_array,
ivl,
ivr,
in_view,
) = self.read(profiler=profiler)
profiler('read src shm data')
@ -635,8 +692,12 @@ class Viz(msgspec.Struct): # , frozen=True):
not in_view.size
or not render
):
# print('exiting early')
return graphics
# print(f'{self.name} not in view (exiting early)')
return (
False,
(ivl, ivr),
graphics,
)
should_redraw: bool = False
ds_allowed: bool = True # guard for m4 activation
@ -753,13 +814,18 @@ class Viz(msgspec.Struct): # , frozen=True):
if not out:
log.warning(f'{self.name} failed to render!?')
return graphics
return (
False,
(ivl, ivr),
graphics,
)
path, reset_cache = out
# XXX: SUPER UGGGHHH... without this we get stale cache
# graphics that "smear" across the view horizontally
# when panning and the first datum is out of view..
reset_cache = False
if (
reset_cache
):
@ -768,36 +834,53 @@ class Viz(msgspec.Struct): # , frozen=True):
with graphics.reset_cache():
graphics.path = r.path
graphics.fast_path = r.fast_path
self.draw_last(
array_key=array_key,
last_read=read,
reset_cache=reset_cache,
)
else:
# assign output paths to graphicis obj
graphics.path = r.path
graphics.fast_path = r.fast_path
graphics.draw_last_datum(
path,
src_array,
reset_cache,
array_key,
index_field=self.index_field,
)
graphics.update()
profiler('.update()')
self.draw_last(
array_key=array_key,
last_read=read,
reset_cache=reset_cache,
)
# graphics.draw_last_datum(
# path,
# src_array,
# reset_cache,
# array_key,
# index_field=self.index_field,
# )
# TODO: does this actuallly help us in any way (prolly should
# look at the source / ask ogi). I think it avoid artifacts on
# wheel-scroll downsampling curve updates?
# TODO: is this ever better?
# graphics.prepareGeometryChange()
# profiler('.prepareGeometryChange()')
graphics.prepareGeometryChange()
profiler('.prepareGeometryChange()')
graphics.update()
profiler('.update()')
# track downsampled state
self._in_ds = r._in_ds
return graphics
return (
True,
(ivl, ivr),
graphics,
)
def draw_last(
self,
array_key: Optional[str] = None,
array_key: str | None = None,
last_read: tuple | None = None,
reset_cache: bool = False,
only_last_uppx: bool = False,
) -> None:
@ -806,17 +889,11 @@ class Viz(msgspec.Struct): # , frozen=True):
(
xfirst, xlast, src_array,
ivl, ivr, in_view,
) = self.read()
) = last_read or self.read()
g = self.graphics
array_key = array_key or self.name
x, y = g.draw_last_datum(
g.path,
src_array,
False, # never reset path
array_key,
self.index_field,
)
gfx = self.graphics
# the renderer is downsampling we choose
# to always try and update a single (interpolating)
@ -826,36 +903,55 @@ class Viz(msgspec.Struct): # , frozen=True):
# worth of data since that's all the screen
# can represent on the last column where
# the most recent datum is being drawn.
if (
self._in_ds
or only_last_uppx
):
dsg = self.ds_graphics or self.graphics
uppx = ceil(gfx.x_uppx())
# XXX: pretty sure we don't need this?
# if isinstance(g, Curve):
# with dsg.reset_cache():
uppx = round(self._last_uppx)
y = y[-uppx:]
if (
(self._in_ds or only_last_uppx)
and uppx > 0
):
alt_renderer = self._alt_r
if alt_renderer:
renderer, gfx = alt_renderer
else:
renderer = self._src_r
fmtr = renderer.fmtr
x = fmtr.x_1d
y = fmtr.y_1d
iuppx = ceil(uppx)
if alt_renderer:
iuppx = ceil(uppx / fmtr.flat_index_ratio)
y = y[-iuppx:]
ymn, ymx = y.min(), y.max()
# print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}')
try:
iuppx = x[-uppx]
x_start = x[-iuppx]
except IndexError:
# we're less then an x-px wide so just grab the start
# datum index.
iuppx = x[0]
x_start = x[0]
dsg._last_line = QLineF(
iuppx, ymn,
gfx._last_line = QLineF(
x_start, ymn,
x[-1], ymx,
)
# print(f'updating DS curve {self.name}')
dsg.update()
# print(
# f'updating DS curve {self.name}@{time_step}s\n'
# f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}'
# )
else:
x, y = gfx.draw_last_datum(
gfx.path,
src_array,
reset_cache, # never reset path
array_key,
self.index_field,
)
# print(f'updating NOT DS curve {self.name}')
g.update()
gfx.update()
def default_view(
self,
@ -964,7 +1060,9 @@ class Viz(msgspec.Struct): # , frozen=True):
l_reset = r_reset - rl_diff
else:
raise RuntimeError(f'Unknown view state {vl} -> {vr}')
log.warning(f'Unknown view state {vl} -> {vr}')
return
# raise RuntimeError(f'Unknown view state {vl} -> {vr}')
else:
# maintain the l->r view distance
@ -981,11 +1079,9 @@ class Viz(msgspec.Struct): # , frozen=True):
)
if do_ds:
# view.interaction_graphics_cycle()
view.maybe_downsample_graphics()
view._set_yrange()
# caller should do this!
# self.linked.graphics_cycle()
view._set_yrange(viz=self)
def incr_info(
self,
@ -994,8 +1090,46 @@ class Viz(msgspec.Struct): # , frozen=True):
is_1m: bool = False,
) -> tuple:
'''
Return a slew of graphics related data-flow metrics to do with
incrementally updating a data view.
_, _, _, r = self.bars_range() # most recent right datum index in-view
Output info includes,
----------------------
uppx: float
x-domain units-per-pixel.
liv: bool
telling if the "last datum" is in vie"last datum" is in
view.
do_px_step: bool
recent data append(s) are enough that the next physical
pixel-column should be used for drawing.
i_diff_t: float
the difference between the last globally recorded time stamp
aand the current one.
append_diff: int
diff between last recorded "append index" (the index at whic
`do_px_step` was last returned `True`) and the current index.
do_rt_update: bool
`True` only when the uppx is less then some threshold
defined by `update_uppx`.
should_tread: bool
determines the first step, globally across all callers, that
the a set of data views should be "treaded", shifted in the
x-domain such that the last datum in view is always in the
same spot in non-view/scene (aka GUI coord) terms.
'''
# get most recent right datum index in-view
l, start, datum_start, datum_stop, stop, r = self.datums_range()
lasts = self.shm.array[-1]
i_step = lasts['index'] # last index-specific step.
i_step_t = lasts['time'] # last time step.
@ -1044,9 +1178,9 @@ class Viz(msgspec.Struct): # , frozen=True):
# is such that a datum(s) update to graphics wouldn't span
# to a new pixel, we don't update yet.
i_last_append = varz['i_last_append']
append_diff = i_step - i_last_append
append_diff: int = i_step - i_last_append
do_px_step = append_diff >= uppx
do_px_step = (append_diff * self.index_step()) >= uppx
do_rt_update = (uppx < update_uppx)
if (

View File

@ -25,7 +25,10 @@ from functools import partial
import itertools
from math import floor
import time
from typing import Optional, Any, Callable
from typing import (
Optional,
Any,
)
import tractor
import trio
@ -43,7 +46,10 @@ from ..data.types import Struct
from ..data._sharedmem import (
ShmArray,
)
from ..data._sampling import _tick_groups
from ..data._sampling import (
_tick_groups,
open_sample_stream,
)
from ._axes import YAxisLabel
from ._chart import (
ChartPlotWidget,
@ -84,11 +90,11 @@ log = get_logger(__name__)
# approach, likely with ``numba``:
# https://arxiv.org/abs/cs/0610046
# https://github.com/lemire/pythonmaxmin
def chart_maxmin(
chart: ChartPlotWidget,
fqsn: str,
# ohlcv_shm: ShmArray,
vlm_chart: ChartPlotWidget | None = None,
def multi_maxmin(
i_read_range: tuple[int, int] | None,
fast_viz: Viz,
vlm_viz: Viz | None = None,
profiler: Profiler = None,
) -> tuple[
@ -102,30 +108,51 @@ def chart_maxmin(
Compute max and min datums "in view" for range limits.
'''
main_viz = chart.get_viz(chart.name)
last_bars_range = main_viz.bars_range()
out = chart.maxmin(name=fqsn)
out = fast_viz.maxmin(
i_read_range=i_read_range,
)
if out is None:
return (last_bars_range, 0, 0, 0)
# log.warning(f'No yrange provided for {name}!?')
return (0, 0, 0)
mn, mx = out
(
ixrng,
read_slc,
yrange,
) = out
mx_vlm_in_view = 0
if profiler:
profiler(f'fast_viz.maxmin({read_slc})')
mn, mx = yrange
# TODO: we need to NOT call this to avoid a manual
# np.max/min trigger and especially on the vlm_chart
# vizs which aren't shown.. like vlm?
if vlm_chart:
out = vlm_chart.maxmin()
mx_vlm_in_view = 0
if vlm_viz:
out = vlm_viz.maxmin(
i_read_range=i_read_range,
)
if out:
_, mx_vlm_in_view = out
(
ixrng,
read_slc,
mxmn,
) = out
mx_vlm_in_view = mxmn[1]
if profiler:
profiler(f'vlm_viz.maxmin({read_slc})')
return (
last_bars_range,
mx,
max(mn, 0), # presuming price can't be negative?
mx_vlm_in_view,
# enforcing price can't be negative?
# TODO: do we even need this?
max(mn, 0),
mx_vlm_in_view, # vlm max
)
@ -134,10 +161,10 @@ class DisplayState(Struct):
Chart-local real-time graphics state container.
'''
fqsn: str
godwidget: GodWidget
quotes: dict[str, Any]
maxmin: Callable
flume: Flume
# high level chart handles and underlying ``Viz``
@ -151,6 +178,8 @@ class DisplayState(Struct):
last_price_sticky: YAxisLabel
hist_last_price_sticky: YAxisLabel
vlm_viz: Viz
# misc state tracking
vars: dict[str, Any] = field(
default_factory=lambda: {
@ -194,9 +223,17 @@ async def increment_history_view(
# wakeups/ctx switches verus logic checks (as normal)
# - we need increment logic that only does the view shift
# call when the uppx permits/needs it
async with hist_viz.flume.index_stream(int(1)) as istream:
async with open_sample_stream(1.) as istream:
async for msg in istream:
profiler = Profiler(
msg=f'History chart cycle for: `{ds.fqsn}`',
delayed=True,
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
# ms_threshold=4,
)
# l3 = ds.viz.shm.array[-3:]
# print(
# f'fast step for {ds.flume.symbol.fqsn}:\n'
@ -208,7 +245,7 @@ async def increment_history_view(
(
uppx,
liv,
do_append,
do_px_step,
i_diff_t,
append_diff,
do_rt_update,
@ -219,15 +256,22 @@ async def increment_history_view(
is_1m=True,
)
if (
do_append
and liv
):
hist_viz.plot.vb._set_yrange()
if do_px_step:
hist_viz.update_graphics()
profiler('`hist Viz.update_graphics()` call')
# check if tread-in-place x-shift is needed
if liv:
hist_viz.plot.vb._set_yrange(viz=hist_viz)
profiler('hist chart yrange view')
# check if tread-in-place view x-shift is needed
if should_tread:
# ensure path graphics append is shown on treads since
# the main rt loop does not call this.
hist_chart.increment_view(datums=append_diff)
profiler('hist tread view')
profiler.finish()
async def graphics_update_loop(
@ -304,19 +348,17 @@ async def graphics_update_loop(
)
vlm_chart = vlm_charts[fqsn]
maxmin = partial(
chart_maxmin,
fast_chart,
fqsn,
vlm_chart,
)
last_bars_range: tuple[float, float]
vlm_viz = vlm_chart._vizs.get('volume') if vlm_chart else None
(
last_bars_range,
last_mx,
last_mn,
last_mx_vlm,
) = maxmin()
) = multi_maxmin(
None,
fast_viz,
vlm_viz,
)
last, volume = ohlcv.array[-1][['close', 'volume']]
@ -344,9 +386,10 @@ async def graphics_update_loop(
last_quote_s = time.time()
dss[fqsn] = ds = linked.display_state = DisplayState(**{
'fqsn': fqsn,
'godwidget': godwidget,
'quotes': {},
'maxmin': maxmin,
# 'maxmin': maxmin,
'flume': flume,
@ -358,6 +401,8 @@ async def graphics_update_loop(
'hist_viz': hist_viz,
'hist_last_price_sticky': hist_last_price_sticky,
'vlm_viz': vlm_viz,
'l1': l1,
'vars': {
@ -372,7 +417,7 @@ async def graphics_update_loop(
})
if vlm_chart:
vlm_pi = vlm_chart._vizs['volume'].plot
vlm_pi = vlm_viz.plot
vlm_sticky = vlm_pi.getAxis('right')._stickies['volume']
ds.vlm_chart = vlm_chart
ds.vlm_sticky = vlm_sticky
@ -408,7 +453,8 @@ async def graphics_update_loop(
# and quote_rate >= _quote_throttle_rate * 2
and quote_rate >= display_rate
):
log.warning(f'High quote rate {symbol.key}: {quote_rate}')
pass
# log.warning(f'High quote rate {symbol.key}: {quote_rate}')
last_quote_s = time.time()
@ -452,105 +498,99 @@ def graphics_update_cycle(
) -> None:
profiler = Profiler(
msg=f'Graphics loop cycle for: `{ds.fqsn}`',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
delayed=True,
# ms_threshold=4,
)
# TODO: SPEEDing this all up..
# - optimize this whole graphics stack with ``numba`` hopefully
# or at least a little `mypyc` B)
# - pass more direct refs as input to avoid so many attr accesses?
# - use a streaming minmax algo and drop the use of the
# state-tracking ``chart_maxmin()`` routine from above?
# state-tracking ``multi_maxmin()`` routine from above?
fqsn = ds.fqsn
chart = ds.chart
hist_chart = ds.godwidget.hist_linked.chart
flume = ds.flume
sym = flume.symbol
fqsn = sym.fqsn
main_viz = chart._vizs[fqsn]
index_field = main_viz.index_field
profiler = Profiler(
msg=f'Graphics loop cycle for: `{chart.name}`',
delayed=True,
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
)
# unpack multi-referenced components
vlm_chart = ds.vlm_chart
# rt "HFT" chart
varz = ds.vars
l1 = ds.l1
flume = ds.flume
ohlcv = flume.rt_shm
array = ohlcv.array
varz = ds.vars
hist_viz = ds.hist_viz
main_viz = ds.viz
index_field = main_viz.index_field
tick_margin = varz['tick_margin']
(
uppx,
liv,
do_append,
do_px_step,
i_diff_t,
append_diff,
do_rt_update,
should_tread,
) = main_viz.incr_info(ds=ds)
profiler('`.incr_info()`')
# TODO: we should only run mxmn when we know
# an update is due via ``do_append`` above.
(
brange,
mx_in_view,
mn_in_view,
mx_vlm_in_view,
) = ds.maxmin()
l, lbar, rbar, r = brange
mx = mx_in_view + tick_margin
mn = mn_in_view - tick_margin
profiler('`ds.maxmin()` call')
# an update is due via ``do_px_step`` above.
# TODO: eventually we want to separate out the dark vlm and show
# them as an additional graphic.
clear_types = _tick_groups['clears']
mx = varz['last_mx']
mn = varz['last_mn']
mx_vlm_in_view = varz['last_mx_vlm']
# update ohlc sampled price bars
if (
do_rt_update
or do_append
# do_rt_update
# or do_px_step
(liv and do_px_step)
or trigger_all
):
chart.update_graphics_from_flow(
fqsn,
# chart.name,
# do_append=do_append,
)
main_viz.draw_last(array_key=fqsn)
_, i_read_range, _ = main_viz.update_graphics()
profiler('`Viz.update_graphics()` call')
hist_chart.update_graphics_from_flow(
fqsn,
# chart.name,
# do_append=do_append,
)
# don't real-time "shift" the curve to the
# left unless we get one of the following:
if (
(
should_tread
and do_append
and liv
mx_in_view,
mn_in_view,
mx_vlm_in_view,
) = multi_maxmin(
i_read_range,
main_viz,
ds.vlm_viz,
profiler,
)
or trigger_all
):
chart.increment_view(datums=append_diff)
main_viz.plot.vb._set_yrange()
# NOTE: since vlm and ohlc charts are axis linked now we don't
# need the double increment request?
# if vlm_chart:
# vlm_chart.increment_view(datums=append_diff)
mx = mx_in_view + tick_margin
mn = mn_in_view - tick_margin
profiler('{fqsdn} `multi_maxmin()` call')
profiler('view incremented')
# don't real-time "shift" the curve to the
# left unless we get one of the following:
if (
should_tread
or trigger_all
):
chart.increment_view(datums=append_diff)
# main_viz.plot.vb._set_yrange(viz=main_viz)
# NOTE: since vlm and ohlc charts are axis linked now we don't
# need the double increment request?
# if vlm_chart:
# vlm_chart.increment_view(datums=append_diff)
profiler('view incremented')
# iterate frames of ticks-by-type such that we only update graphics
# using the last update per type where possible.
@ -589,9 +629,14 @@ def graphics_update_cycle(
ds.last_price_sticky.update_from_data(*end_ic)
ds.hist_last_price_sticky.update_from_data(*end_ic)
if wap_in_history:
# update vwap overlay line
chart.update_graphics_from_flow('bar_wap')
# update vwap overlay line
# if wap_in_history:
# chart.get_viz('bar_wap').update_graphics()
# update OHLC chart last bars
# TODO: fix the only last uppx stuff....
main_viz.draw_last() # only_last_uppx=True)
hist_viz.draw_last() # only_last_uppx=True)
# L1 book label-line updates
if typ in ('last',):
@ -628,7 +673,10 @@ def graphics_update_cycle(
):
l1.bid_label.update_fields({'level': price, 'size': size})
# check for y-autorange re-size
profiler('L1 labels updates')
# Y-autoranging: adjust y-axis limits based on state tracking
# of previous "last" L1 values which are in view.
lmx = varz['last_mx']
lmn = varz['last_mn']
mx_diff = mx - lmx
@ -638,6 +686,8 @@ def graphics_update_cycle(
mx_diff
or mn_diff
):
# complain about out-of-range outliers which can show up
# in certain annoying feeds (like ib)..
if (
abs(mx_diff) > .25 * lmx
or
@ -652,19 +702,21 @@ def graphics_update_cycle(
f'mx_diff: {mx_diff}\n'
f'mn_diff: {mn_diff}\n'
)
# fast chart resize case
# FAST CHART resize case
elif (
liv
and not chart._static_yrange == 'axis'
):
main_vb = chart._vizs[fqsn].plot.vb
main_vb = main_viz.plot.vb
if (
main_vb._ic is None
or not main_vb._ic.is_set()
):
yr = (mn, mx)
# print(
# f'updating y-range due to mxmn\n'
# f'MAIN VIZ yrange update\n'
# f'{fqsn}: {yr}'
# )
@ -677,9 +729,9 @@ def graphics_update_cycle(
# range_margin=0.1,
yrange=yr
)
profiler('main vb y-autorange')
# check if slow chart needs a resize
hist_viz = hist_chart._vizs[fqsn]
# SLOW CHART resize case
(
_,
hist_liv,
@ -692,48 +744,74 @@ def graphics_update_cycle(
ds=ds,
is_1m=True,
)
if hist_liv:
hist_viz.plot.vb._set_yrange()
profiler('hist `Viz.incr_info()`')
# XXX: update this every draw cycle to make
# TODO: track local liv maxmin without doing a recompute all the
# time..plut, just generally the user is more likely to be
# zoomed out enough on the slow chart that this is never an
# issue (the last datum going out of y-range).
# hist_chart = ds.hist_chart
# if (
# hist_liv
# and not hist_chart._static_yrange == 'axis'
# ):
# hist_viz.plot.vb._set_yrange(
# viz=hist_viz,
# # yrange=yr, # this is the rt range, not hist.. XD
# )
# profiler('hist vb y-autorange')
# XXX: update this every draw cycle to ensure y-axis auto-ranging
# only adjusts when the in-view data co-domain actually expands or
# contracts.
varz['last_mx'], varz['last_mn'] = mx, mn
# run synchronous update on all linked viz
# TODO: should the "main" (aka source) viz be special?
# TODO: a similar, only-update-full-path-on-px-step approach for all
# fsp overlays and vlm stuff..
# run synchronous update on all `Viz` overlays
for curve_name, viz in chart._vizs.items():
# update any overlayed fsp flows
if (
# curve_name != chart.data_key
curve_name != fqsn
and not viz.is_ohlc
):
update_fsp_chart(
chart,
viz,
curve_name,
array_key=curve_name,
)
# even if we're downsampled bigly
# draw the last datum in the final
# px column to give the user the mx/mn
# range of that set.
if (
liv
# and not do_append
# and not do_rt_update
):
viz.draw_last(
array_key=curve_name,
only_last_uppx=True,
)
# even if we're downsampled bigly
# draw the last datum in the final
# px column to give the user the mx/mn
# range of that set.
if (
curve_name != fqsn
and liv
# and not do_px_step
# and not do_rt_update
):
viz.draw_last(
array_key=curve_name,
# TODO: XXX this is currently broken for the
# `FlattenedOHLC` case since we aren't returning the
# full x/y uppx's worth of src-data from
# `draw_last_datum()` ..
only_last_uppx=True,
)
profiler('overlays updates')
# volume chart logic..
# TODO: can we unify this with the above loop?
if vlm_chart:
# print(f"DOING VLM {fqsn}")
vlm_vizs = vlm_chart._vizs
main_vlm_viz = vlm_vizs['volume']
# always update y-label
ds.vlm_sticky.update_from_data(
*array[-1][[
@ -745,19 +823,20 @@ def graphics_update_cycle(
if (
(
do_rt_update
or do_append
or do_px_step
and liv
)
or trigger_all
):
# TODO: make it so this doesn't have to be called
# once the $vlm is up?
vlm_chart.update_graphics_from_flow(
'volume',
main_vlm_viz.update_graphics(
# UGGGh, see ``maxmin()`` impl in `._fsp` for
# the overlayed plotitems... we need a better
# bay to invoke a maxmin per overlay..
render=False,
# XXX: ^^^^ THIS IS SUPER IMPORTANT! ^^^^
# without this, since we disable the
# 'volume' (units) chart after the $vlm starts
@ -766,60 +845,65 @@ def graphics_update_cycle(
# connected to update accompanying overlay
# graphics..
)
profiler('`vlm_chart.update_graphics_from_flow()`')
profiler('`main_vlm_viz.update_graphics()`')
if (
mx_vlm_in_view != varz['last_mx_vlm']
):
vlm_yr = (0, mx_vlm_in_view * 1.375)
vlm_chart.view._set_yrange(yrange=vlm_yr)
profiler('`vlm_chart.view._set_yrange()`')
# print(f'mx vlm: {last_mx_vlm} -> {mx_vlm_in_view}')
varz['last_mx_vlm'] = mx_vlm_in_view
# vlm_yr = (0, mx_vlm_in_view * 1.375)
# vlm_chart.view._set_yrange(yrange=vlm_yr)
# profiler('`vlm_chart.view._set_yrange()`')
# update all downstream FSPs
for curve_name, viz in vlm_vizs.items():
if curve_name == 'volume':
continue
if (
curve_name not in {'volume', fqsn}
and viz.render
viz.render
and (
liv and do_rt_update
or do_append
or do_px_step
)
# and not viz.is_ohlc
# and curve_name != fqsn
and curve_name not in {fqsn}
):
update_fsp_chart(
vlm_chart,
viz,
curve_name,
array_key=curve_name,
# do_append=uppx < update_uppx,
# do_append=do_append,
)
profiler(f'vlm `Viz[{viz.name}].update_graphics()`')
# is this even doing anything?
# (pretty sure it's the real-time
# resizing from last quote?)
fvb = viz.plot.vb
fvb._set_yrange(
name=curve_name,
)
# XXX: without this we get completely
# mangled/empty vlm display subchart..
# fvb = viz.plot.vb
# fvb._set_yrange(
# viz=viz,
# )
profiler(f'vlm `Viz[{viz.name}].plot.vb._set_yrange()`')
# even if we're downsampled bigly
# draw the last datum in the final
# px column to give the user the mx/mn
# range of that set.
elif (
curve_name != 'volume'
and not do_append
not do_px_step
and liv
and uppx >= 1
# even if we're downsampled bigly
# draw the last datum in the final
# px column to give the user the mx/mn
# range of that set.
):
# always update the last datum-element
# graphic for all vizs
# print(f'drawing last {viz.name}')
viz.draw_last(array_key=curve_name)
profiler(f'vlm `Viz[{viz.name}].draw_last()`')
profiler('vlm Viz all updates complete')
profiler.finish()
async def link_views_with_region(
@ -989,32 +1073,6 @@ async def link_views_with_region(
# region.sigRegionChangeFinished.connect(update_pi_from_region)
# force 0 to always be in view
def multi_maxmin(
chart: ChartPlotWidget,
names: list[str],
) -> tuple[float, float]:
'''
Viz "group" maxmin loop; assumes all named vizs
are in the same co-domain and thus can be sorted
as one set.
Iterates all the named vizs and calls the chart
api to find their range values and return.
TODO: really we should probably have a more built-in API
for this?
'''
mx = 0
for name in names:
ymn, ymx = chart.maxmin(name=name)
mx = max(mx, ymx)
return 0, mx
_quote_throttle_rate: int = 60 - 6
@ -1061,17 +1119,28 @@ async def display_symbol_data(
display_rate = main_window().current_screen().refreshRate()
_quote_throttle_rate = floor(display_rate) - 6
# TODO: we should be able to increase this if we use some
# `mypyc` speedups elsewhere? 22ish seems to be the sweet
# spot for single-feed chart.
num_of_feeds = len(fqsns)
mx: int = 22
if num_of_feeds > 1:
# there will be more ctx switches with more than 1 feed so we
# max throttle down a bit more.
mx = 16
# limit to at least display's FPS
# avoiding needless Qt-in-guest-mode context switches
cycles_per_feed = min(
round(_quote_throttle_rate/num_of_feeds),
mx,
)
feed: Feed
async with open_feed(
fqsns,
loglevel=loglevel,
# limit to at least display's FPS
# avoiding needless Qt-in-guest-mode context switches
tick_throttle=min(
round(_quote_throttle_rate/len(fqsns)),
22, # aka 6 + 16
),
tick_throttle=cycles_per_feed,
) as feed:
@ -1159,10 +1228,8 @@ async def display_symbol_data(
# ensure the last datum graphic is generated
# for zoom-interaction purposes.
hist_chart.get_viz(fqsn).draw_last(
array_key=fqsn,
# only_last_uppx=True,
)
hist_viz = hist_chart.get_viz(fqsn)
hist_viz.draw_last(array_key=fqsn)
pis.setdefault(fqsn, [None, None])[1] = hist_chart.plotItem
# don't show when not focussed
@ -1176,6 +1243,7 @@ async def display_symbol_data(
# to avoid internal pane creation.
sidepane=pp_pane,
)
rt_viz = rt_chart.get_viz(fqsn)
pis.setdefault(fqsn, [None, None])[0] = rt_chart.plotItem
# for pause/resume on mouse interaction
@ -1192,7 +1260,7 @@ async def display_symbol_data(
and has_vlm(ohlcv)
and vlm_chart is None
):
vlm_charts[fqsn] = await ln.start(
vlm_chart = vlm_charts[fqsn] = await ln.start(
open_vlm_displays,
rt_linked,
flume,
@ -1223,6 +1291,9 @@ async def display_symbol_data(
# add_label=False,
# )
godwidget.resize_all()
await trio.sleep(0)
for fqsn, flume in fitems[1:]:
# get a new color from the palette
bg_chart_color, bg_last_bar_color = next(palette)
@ -1245,7 +1316,7 @@ async def display_symbol_data(
# are none?
hist_pi.hideAxis('left')
viz = hist_chart.draw_curve(
hist_viz = hist_chart.draw_curve(
fqsn,
hist_ohlcv,
flume,
@ -1255,15 +1326,12 @@ async def display_symbol_data(
is_ohlc=True,
color=bg_chart_color,
last_bar_color=bg_last_bar_color,
last_step_color=bg_last_bar_color,
)
# ensure the last datum graphic is generated
# for zoom-interaction purposes.
viz.draw_last(
array_key=fqsn,
# only_last_uppx=True,
)
hist_viz.draw_last(array_key=fqsn)
hist_pi.vb.maxmin = partial(
hist_chart.maxmin,
@ -1273,8 +1341,8 @@ async def display_symbol_data(
# specially store ref to shm for lookup in display loop
# since only a placeholder of `None` is entered in
# ``.draw_curve()``.
viz = hist_chart._vizs[fqsn]
assert viz.plot is hist_pi
hist_viz = hist_chart._vizs[fqsn]
assert hist_viz.plot is hist_pi
pis.setdefault(fqsn, [None, None])[1] = hist_pi
rt_pi = rt_chart.overlay_plotitem(
@ -1285,7 +1353,7 @@ async def display_symbol_data(
rt_pi.hideAxis('left')
rt_pi.hideAxis('bottom')
viz = rt_chart.draw_curve(
rt_viz = rt_chart.draw_curve(
fqsn,
ohlcv,
flume,
@ -1295,7 +1363,7 @@ async def display_symbol_data(
is_ohlc=True,
color=bg_chart_color,
last_bar_color=bg_last_bar_color,
last_step_color=bg_last_bar_color,
)
rt_pi.vb.maxmin = partial(
rt_chart.maxmin,
@ -1306,8 +1374,8 @@ async def display_symbol_data(
# specially store ref to shm for lookup in display loop
# since only a placeholder of `None` is entered in
# ``.draw_curve()``.
viz = rt_chart._vizs[fqsn]
assert viz.plot is rt_pi
rt_viz = rt_chart._vizs[fqsn]
assert rt_viz.plot is rt_pi
pis.setdefault(fqsn, [None, None])[0] = rt_pi
rt_chart.setFocus()
@ -1375,16 +1443,17 @@ async def display_symbol_data(
rt_linked.mode = mode
viz = rt_chart.get_viz(order_ctl_symbol)
viz.plot.setFocus()
rt_viz = rt_chart.get_viz(order_ctl_symbol)
rt_viz.plot.setFocus()
# default view adjuments and sidepane alignment
# as final default UX touch.
rt_chart.default_view()
rt_chart.view.enable_auto_yrange()
await trio.sleep(0)
hist_chart.default_view()
hist_chart.view.enable_auto_yrange()
hist_viz = hist_chart.get_viz(fqsn)
await trio.sleep(0)
godwidget.resize_all()

View File

@ -78,7 +78,6 @@ def has_vlm(ohlcv: ShmArray) -> bool:
def update_fsp_chart(
chart: ChartPlotWidget,
viz,
graphics_name: str,
array_key: Optional[str],
@ -101,18 +100,14 @@ def update_fsp_chart(
# update graphics
# NOTE: this does a length check internally which allows it
# staying above the last row check below..
chart.update_graphics_from_flow(
graphics_name,
array_key=array_key or graphics_name,
**kwargs,
)
viz.update_graphics()
# XXX: re: ``array_key``: fsp func names must be unique meaning we
# can't have duplicates of the underlying data even if multiple
# sub-charts reference it under different 'named charts'.
# read from last calculated value and update any label
last_val_sticky = chart.plotItem.getAxis(
last_val_sticky = viz.plot.getAxis(
'right')._stickies.get(graphics_name)
if last_val_sticky:
last = last_row[array_key]
@ -287,9 +282,10 @@ async def run_fsp_ui(
# profiler(f'fsp:{name} chart created')
# first UI update, usually from shm pushed history
viz = chart.get_viz(array_key)
update_fsp_chart(
chart,
chart.get_viz(array_key),
viz,
name,
array_key=array_key,
)
@ -316,7 +312,7 @@ async def run_fsp_ui(
# level_line(chart, 70, orient_v='bottom')
# level_line(chart, 80, orient_v='top')
chart.view._set_yrange()
chart.view._set_yrange(viz=viz)
# done() # status updates
# profiler(f'fsp:{func_name} starting update loop')
@ -670,7 +666,7 @@ async def open_vlm_displays(
# built-in vlm which we plot ASAP since it's
# usually data provided directly with OHLC history.
shm = ohlcv
ohlc_chart = linked.chart
# ohlc_chart = linked.chart
vlm_chart = linked.add_plot(
name='volume',
@ -688,37 +684,7 @@ async def open_vlm_displays(
# the curve item internals are pretty convoluted.
style='step',
)
vlm_chart.view.enable_auto_yrange()
# back-link the volume chart to trigger y-autoranging
# in the ohlc (parent) chart.
ohlc_chart.view.enable_auto_yrange(
src_vb=vlm_chart.view,
)
# force 0 to always be in view
def multi_maxmin(
names: list[str],
) -> tuple[float, float]:
'''
Viz "group" maxmin loop; assumes all named flows
are in the same co-domain and thus can be sorted
as one set.
Iterates all the named flows and calls the chart
api to find their range values and return.
TODO: really we should probably have a more built-in API
for this?
'''
mx = 0
for name in names:
ymn, ymx = vlm_chart.maxmin(name=name)
mx = max(mx, ymx)
return 0, mx
vlm_viz = vlm_chart._vizs['volume']
# TODO: fix the x-axis label issue where if you put
# the axis on the left it's totally not lined up...
@ -741,12 +707,14 @@ async def open_vlm_displays(
last_val_sticky.update_from_data(-1, value)
vlm_curve = vlm_chart.update_graphics_from_flow(
_, _, vlm_curve = vlm_chart.update_graphics_from_flow(
'volume',
)
# size view to data once at outset
vlm_chart.view._set_yrange()
vlm_chart.view._set_yrange(
viz=vlm_viz
)
# add axis title
axis = vlm_chart.getAxis('right')
@ -761,7 +729,7 @@ async def open_vlm_displays(
{ # fsp engine conf
'func_name': 'dolla_vlm',
'zero_on_step': False,
'zero_on_step': True,
'params': {
'price_func': {
'default_value': 'chl3',
@ -811,7 +779,7 @@ async def open_vlm_displays(
dvlm_pi.hideAxis('bottom')
# all to be overlayed curve names
fields = [
dvlm_fields = [
'dolla_vlm',
'dark_vlm',
]
@ -824,16 +792,6 @@ async def open_vlm_displays(
'dark_trade_rate',
]
group_mxmn = partial(
multi_maxmin,
# keep both regular and dark vlm in view
names=fields,
# names=fields + dvlm_rate_fields,
)
# add custom auto range handler
dvlm_pi.vb._maxmin = group_mxmn
# add dvlm (step) curves to common view
def chart_curves(
names: list[str],
@ -870,7 +828,7 @@ async def open_vlm_displays(
assert viz.plot is pi
chart_curves(
fields,
dvlm_fields,
dvlm_pi,
dvlm_flume.rt_shm,
dvlm_flume,
@ -930,12 +888,6 @@ async def open_vlm_displays(
},
)
# add custom auto range handler
tr_pi.vb.maxmin = partial(
multi_maxmin,
# keep both regular and dark vlm in view
names=trade_rate_fields,
)
tr_pi.hideAxis('bottom')
chart_curves(

View File

@ -20,8 +20,13 @@ Chart view box primitives
"""
from __future__ import annotations
from contextlib import asynccontextmanager
from functools import partial
import time
from typing import Optional, Callable
from typing import (
Optional,
Callable,
TYPE_CHECKING,
)
import pyqtgraph as pg
# from pyqtgraph.GraphicsScene import mouseEvents
@ -39,6 +44,10 @@ from .._profile import pg_profile_enabled, ms_slower_then
from ._editors import SelectRect
from . import _event
if TYPE_CHECKING:
from ._chart import ChartPlotWidget
from ._dataviz import Viz
log = get_logger(__name__)
@ -365,7 +374,6 @@ class ChartView(ViewBox):
)
# for "known y-range style"
self._static_yrange = static_yrange
self._maxmin = None
# disable vertical scrolling
self.setMouseEnabled(
@ -374,7 +382,7 @@ class ChartView(ViewBox):
)
self.linked = None
self._chart: 'ChartPlotWidget' = None # noqa
self._chart: ChartPlotWidget | None = None # noqa
# add our selection box annotator
self.select_box = SelectRect(self)
@ -385,6 +393,7 @@ class ChartView(ViewBox):
self.setFocusPolicy(QtCore.Qt.StrongFocus)
self._ic = None
self._yranger: Callable | None = None
def start_ic(
self,
@ -445,29 +454,18 @@ class ChartView(ViewBox):
yield self
@property
def chart(self) -> 'ChartPlotWidget': # type: ignore # noqa
def chart(self) -> ChartPlotWidget: # type: ignore # noqa
return self._chart
@chart.setter
def chart(self, chart: 'ChartPlotWidget') -> None: # type: ignore # noqa
def chart(self, chart: ChartPlotWidget) -> None: # type: ignore # noqa
self._chart = chart
self.select_box.chart = chart
if self._maxmin is None:
self._maxmin = chart.maxmin
@property
def maxmin(self) -> Callable:
return self._maxmin
@maxmin.setter
def maxmin(self, callback: Callable) -> None:
self._maxmin = callback
def wheelEvent(
self,
ev,
axis=None,
# relayed_from: ChartView = None,
):
'''
Override "center-point" location for scrolling.
@ -482,7 +480,6 @@ class ChartView(ViewBox):
if (
not linked
):
# print(f'{self.name} not linked but relay from {relayed_from.name}')
return
if axis in (0, 1):
@ -494,18 +491,19 @@ class ChartView(ViewBox):
chart = self.linked.chart
# don't zoom more then the min points setting
out = l, lbar, rbar, r = chart.get_viz(chart.name).bars_range()
# vl = r - l
viz = chart.get_viz(chart.name)
vl, lbar, rbar, vr = viz.bars_range()
# if ev.delta() > 0 and vl <= _min_points_to_show:
# log.debug("Max zoom bruh...")
# TODO: max/min zoom limits incorporating time step size.
# rl = vr - vl
# if ev.delta() > 0 and rl <= _min_points_to_show:
# log.warning("Max zoom bruh...")
# return
# if (
# ev.delta() < 0
# and vl >= len(chart._vizs[chart.name].shm.array) + 666
# and rl >= len(chart._vizs[chart.name].shm.array) + 666
# ):
# log.debug("Min zoom bruh...")
# log.warning("Min zoom bruh...")
# return
# actual scaling factor
@ -536,49 +534,17 @@ class ChartView(ViewBox):
self.scaleBy(s, center)
else:
# center = pg.Point(
# fn.invertQTransform(self.childGroup.transform()).map(ev.pos())
# )
# XXX: scroll "around" the right most element in the view
# which stays "pinned" in place.
# furthest_right_coord = self.boundingRect().topRight()
# yaxis = pg.Point(
# fn.invertQTransform(
# self.childGroup.transform()
# ).map(furthest_right_coord)
# )
# This seems like the most "intuitive option, a hybrid of
# tws and tv styles
last_bar = pg.Point(int(rbar)) + 1
ryaxis = chart.getAxis('right')
r_axis_x = ryaxis.pos().x()
end_of_l1 = pg.Point(
round(
chart.cv.mapToView(
pg.Point(r_axis_x - chart._max_l1_line_len)
# QPointF(chart._max_l1_line_len, 0)
).x()
)
) # .x()
# self.state['viewRange'][0][1] = end_of_l1
# focal = pg.Point((last_bar.x() + end_of_l1)/2)
# use right-most point of current curve graphic
xl = viz.graphics.x_last()
focal = min(
last_bar,
end_of_l1,
key=lambda p: p.x()
xl,
vr,
)
# focal = pg.Point(last_bar.x() + end_of_l1)
self._resetTarget()
# NOTE: scroll "around" the right most datum-element in view
# gives the feeling of staying "pinned" in place.
self.scaleBy(s, focal)
# XXX: the order of the next 2 lines i'm pretty sure
@ -604,21 +570,8 @@ class ChartView(ViewBox):
self,
ev,
axis: Optional[int] = None,
# relayed_from: ChartView = None,
) -> None:
# if relayed_from:
# print(f'PAN: {self.name} -> RELAYED FROM: {relayed_from.name}')
# NOTE since in the overlay case axes are already
# "linked" any x-range change will already be mirrored
# in all overlaid ``PlotItems``, so we need to simply
# ignore the signal here since otherwise we get N-calls
# from N-overlays resulting in an "accelerated" feeling
# panning motion instead of the expect linear shift.
# if relayed_from:
# return
pos = ev.pos()
lastPos = ev.lastPos()
dif = pos - lastPos
@ -688,9 +641,6 @@ class ChartView(ViewBox):
# PANNING MODE
else:
# XXX: WHY
ev.accept()
try:
self.start_ic()
except RuntimeError:
@ -722,6 +672,9 @@ class ChartView(ViewBox):
# self._ic = None
# self.chart.resume_all_feeds()
# XXX: WHY
ev.accept()
# WEIRD "RIGHT-CLICK CENTER ZOOM" MODE
elif button & QtCore.Qt.RightButton:
@ -767,7 +720,12 @@ class ChartView(ViewBox):
*,
yrange: Optional[tuple[float, float]] = None,
range_margin: float = 0.06,
viz: Viz | None = None,
# NOTE: this value pairs (more or less) with L1 label text
# height offset from from the bid/ask lines.
range_margin: float = 0.09,
bars_range: Optional[tuple[int, int, int, int]] = None,
# flag to prevent triggering sibling charts from the same linked
@ -820,18 +778,28 @@ class ChartView(ViewBox):
# XXX: only compute the mxmn range
# if none is provided as input!
if not yrange:
# flow = chart._vizs[name]
yrange = self._maxmin()
if not viz:
breakpoint()
out = viz.maxmin()
if out is None:
log.warning(f'No yrange provided for {name}!?')
return
(
ixrng,
_,
yrange
) = out
profiler(f'`{self.name}:Viz.maxmin()` -> {ixrng}=>{yrange}')
if yrange is None:
log.warning(f'No yrange provided for {name}!?')
print(f"WTF NO YRANGE {name}")
return
ylow, yhigh = yrange
profiler(f'callback ._maxmin(): {yrange}')
# view margins: stay within a % of the "true range"
diff = yhigh - ylow
ylow = ylow - (diff * range_margin)
@ -851,6 +819,7 @@ class ChartView(ViewBox):
def enable_auto_yrange(
self,
viz: Viz,
src_vb: Optional[ChartView] = None,
) -> None:
@ -862,8 +831,17 @@ class ChartView(ViewBox):
if src_vb is None:
src_vb = self
if self._yranger is None:
self._yranger = partial(
self._set_yrange,
viz=viz,
)
# widget-UIs/splitter(s) resizing
src_vb.sigResized.connect(self._set_yrange)
src_vb.sigResized.connect(self._yranger)
# mouse wheel doesn't emit XRangeChanged
src_vb.sigRangeChangedManually.connect(self._yranger)
# re-sampling trigger:
# TODO: a smarter way to avoid calling this needlessly?
@ -875,34 +853,21 @@ class ChartView(ViewBox):
src_vb.sigRangeChangedManually.connect(
self.maybe_downsample_graphics
)
# mouse wheel doesn't emit XRangeChanged
src_vb.sigRangeChangedManually.connect(self._set_yrange)
# XXX: enabling these will cause "jittery"-ness
# on zoom where sharp diffs in the y-range will
# not re-size right away until a new sample update?
# if src_vb is not self:
# src_vb.sigXRangeChanged.connect(self._set_yrange)
# src_vb.sigXRangeChanged.connect(
# self.maybe_downsample_graphics
# )
def disable_auto_yrange(self) -> None:
# XXX: not entirely sure why we can't de-reg this..
self.sigResized.disconnect(
self._set_yrange,
self._yranger,
)
self.sigRangeChangedManually.disconnect(
self._yranger,
)
self.sigRangeChangedManually.disconnect(
self.maybe_downsample_graphics
)
self.sigRangeChangedManually.disconnect(
self._set_yrange,
)
# self.sigXRangeChanged.disconnect(self._set_yrange)
# self.sigXRangeChanged.disconnect(
# self.maybe_downsample_graphics
# )
def x_uppx(self) -> float:
'''
@ -924,7 +889,7 @@ class ChartView(ViewBox):
def maybe_downsample_graphics(
self,
autoscale_overlays: bool = True,
autoscale_overlays: bool = False,
):
profiler = Profiler(
msg=f'ChartView.maybe_downsample_graphics() for {self.name}',
@ -960,21 +925,19 @@ class ChartView(ViewBox):
# pass in no array which will read and render from the last
# passed array (normally provided by the display loop.)
chart.update_graphics_from_flow(
name,
use_vr=True,
)
chart.update_graphics_from_flow(name)
# for each overlay on this chart auto-scale the
# y-range to max-min values.
if autoscale_overlays:
overlay = chart.pi_overlay
if overlay:
for pi in overlay.overlays:
pi.vb._set_yrange(
# TODO: get the range once up front...
# bars_range=br,
)
profiler('autoscaled linked plots')
# if autoscale_overlays:
# overlay = chart.pi_overlay
# if overlay:
# for pi in overlay.overlays:
# pi.vb._set_yrange(
# # TODO: get the range once up front...
# # bars_range=br,
# viz=pi.viz,
# )
# profiler('autoscaled linked plots')
profiler(f'<{chart_name}>.update_graphics_from_flow({name})')
profiler(f'<{chart_name}>.update_graphics_from_flow({name})')

View File

@ -18,13 +18,8 @@ Super fast OHLC sampling graphics types.
"""
from __future__ import annotations
from typing import (
Optional,
TYPE_CHECKING,
)
import numpy as np
import pyqtgraph as pg
from PyQt5 import (
QtGui,
QtWidgets,
@ -33,18 +28,14 @@ from PyQt5.QtCore import (
QLineF,
QRectF,
)
from PyQt5.QtWidgets import QGraphicsItem
from PyQt5.QtGui import QPainterPath
from ._curve import FlowGraphic
from .._profile import pg_profile_enabled, ms_slower_then
from ._style import hcolor
from ..log import get_logger
from .._profile import Profiler
if TYPE_CHECKING:
from ._chart import LinkedSplits
log = get_logger(__name__)
@ -100,30 +91,18 @@ class BarItems(FlowGraphic):
"Price range" bars graphics rendered from a OHLC sampled sequence.
'''
# XXX: causes this weird jitter bug when click-drag panning
# where the path curve will awkwardly flicker back and forth?
cache_mode: int = QGraphicsItem.NoCache
def __init__(
self,
linked: LinkedSplits,
plotitem: 'pg.PlotItem', # noqa
color: str = 'bracket',
last_bar_color: str = 'original',
name: Optional[str] = None,
*args,
**kwargs,
) -> None:
super().__init__()
self.linked = linked
# XXX: for the mega-lulz increasing width here increases draw
# latency... so probably don't do it until we figure that out.
self._color = color
self.bars_pen = pg.mkPen(hcolor(color), width=1)
self.last_bar_pen = pg.mkPen(hcolor(last_bar_color), width=2)
self._name = name
# XXX: causes this weird jitter bug when click-drag panning
# where the path curve will awkwardly flicker back and forth?
# self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
self.path = QPainterPath()
super().__init__(*args, **kwargs)
self._last_bar_lines: tuple[QLineF, ...] | None = None
def x_last(self) -> None | float:
@ -218,12 +197,12 @@ class BarItems(FlowGraphic):
# as is necesarry for what's in "view". Not sure if this will
# lead to any perf gains other then when zoomed in to less bars
# in view.
p.setPen(self.last_bar_pen)
p.setPen(self.last_step_pen)
if self._last_bar_lines:
p.drawLines(*tuple(filter(bool, self._last_bar_lines)))
profiler('draw last bar')
p.setPen(self.bars_pen)
p.setPen(self._pen)
p.drawPath(self.path)
profiler(f'draw history path: {self.path.capacity()}')
@ -299,5 +278,4 @@ class BarItems(FlowGraphic):
# date / from some previous sample. It's weird though
# because i've seen it do this to bars i - 3 back?
# return ohlc['time'], ohlc['close']
return ohlc[index_field], ohlc['close']

View File

@ -92,11 +92,11 @@ class ComposedGridLayout:
'''
def __init__(
self,
item: PlotItem,
pi: PlotItem,
) -> None:
self.items: list[PlotItem] = []
self.pitems: list[PlotItem] = []
self._pi2axes: dict[ # TODO: use a ``bidict`` here?
int,
dict[str, AxisItem],
@ -125,7 +125,7 @@ class ComposedGridLayout:
layout.setOrientation(orient)
self.insert_plotitem(0, item)
self.insert_plotitem(0, pi)
# insert surrounding linear layouts into the parent pi's layout
# such that additional axes can be appended arbitrarily without
@ -135,13 +135,14 @@ class ComposedGridLayout:
# TODO: do we need this?
# axis should have been removed during insert above
index = _axes_layout_indices[name]
axis = item.layout.itemAt(*index)
axis = pi.layout.itemAt(*index)
if axis and axis.isVisible():
assert linlayout.itemAt(0) is axis
# item.layout.removeItem(axis)
item.layout.addItem(linlayout, *index)
layout = item.layout.itemAt(*index)
# XXX: see comment in ``.insert_plotitem()``...
# pi.layout.removeItem(axis)
pi.layout.addItem(linlayout, *index)
layout = pi.layout.itemAt(*index)
assert layout is linlayout
def _register_item(
@ -157,14 +158,14 @@ class ComposedGridLayout:
self._pi2axes.setdefault(name, {})[index] = axis
# enter plot into list for index tracking
self.items.insert(index, plotitem)
self.pitems.insert(index, plotitem)
def insert_plotitem(
self,
index: int,
plotitem: PlotItem,
) -> (int, int):
) -> tuple[int, list[AxisItem]]:
'''
Place item at index by inserting all axes into the grid
at list-order appropriate position.
@ -175,11 +176,14 @@ class ComposedGridLayout:
'`.insert_plotitem()` only supports an index >= 0'
)
inserted_axes: list[AxisItem] = []
# add plot's axes in sequence to the embedded linear layouts
# for each "side" thus avoiding graphics collisions.
for name, axis_info in plotitem.axes.copy().items():
linlayout, axes = self.sides[name]
axis = axis_info['item']
inserted_axes.append(axis)
if axis in axes:
# TODO: re-order using ``.pop()`` ?
@ -192,19 +196,20 @@ class ComposedGridLayout:
if (
not axis.isVisible()
# XXX: we never skip moving the axes for the *first*
# XXX: we never skip moving the axes for the *root*
# plotitem inserted (even if not shown) since we need to
# move all the hidden axes into linear sub-layouts for
# that "central" plot in the overlay. Also if we don't
# do it there's weird geomoetry calc offsets that make
# view coords slightly off somehow .. smh
and not len(self.items) == 0
and not len(self.pitems) == 0
):
continue
# XXX: Remove old axis? No, turns out we don't need this?
# DON'T unlink it since we the original ``ViewBox``
# to still drive it B)
# XXX: Remove old axis?
# No, turns out we don't need this?
# DON'T UNLINK IT since we need the original ``ViewBox`` to
# still drive it with events/handlers B)
# popped = plotitem.removeAxis(name, unlink=False)
# assert axis is popped
@ -220,7 +225,7 @@ class ComposedGridLayout:
self._register_item(index, plotitem)
return index
return (index, inserted_axes)
def append_plotitem(
self,
@ -234,7 +239,7 @@ class ComposedGridLayout:
'''
# for left and bottom axes we have to first remove
# items and re-insert to maintain a list-order.
return self.insert_plotitem(len(self.items), item)
return self.insert_plotitem(len(self.pitems), item)
def get_axis(
self,
@ -247,7 +252,7 @@ class ComposedGridLayout:
if axis for that name is not shown.
'''
index = self.items.index(plot)
index = self.pitems.index(plot)
named = self._pi2axes[name]
return named.get(index)
@ -306,10 +311,13 @@ class PlotItemOverlay:
# events/signals.
root_plotitem.vb.setZValue(10)
self.overlays: list[PlotItem] = []
self.layout = ComposedGridLayout(root_plotitem)
self._relays: dict[str, Signal] = {}
@property
def overlays(self) -> list[PlotItem]:
return self.layout.pitems
def add_plotitem(
self,
plotitem: PlotItem,
@ -324,11 +332,9 @@ class PlotItemOverlay:
# (0, 1), # link both
link_axes: tuple[int] = (),
) -> None:
) -> tuple[int, list[AxisItem]]:
index = index or len(self.overlays)
root = self.root_plotitem
self.overlays.insert(index, plotitem)
vb: ViewBox = plotitem.vb
# TODO: some sane way to allow menu event broadcast XD
@ -476,7 +482,10 @@ class PlotItemOverlay:
# ``PlotItem`` dynamically.
# append-compose into the layout all axes from this plot
self.layout.insert_plotitem(index, plotitem)
if index is None:
insert_index, axes = self.layout.append_plotitem(plotitem)
else:
insert_index, axes = self.layout.insert_plotitem(index, plotitem)
plotitem.setGeometry(root.vb.sceneBoundingRect())
@ -496,6 +505,11 @@ class PlotItemOverlay:
vb.setZValue(100)
return (
index,
axes,
)
def get_axis(
self,
plot: PlotItem,

View File

@ -24,7 +24,6 @@ for fast incremental update.
'''
from __future__ import annotations
from typing import (
Optional,
TYPE_CHECKING,
)
@ -58,19 +57,8 @@ class Renderer(msgspec.Struct):
# output graphics rendering, the main object
# processed in ``QGraphicsObject.paint()``
path: Optional[QPainterPath] = None
fast_path: Optional[QPainterPath] = None
# XXX: just ideas..
# called on the final data (transform) output to convert
# to "graphical data form" a format that can be passed to
# the ``.draw()`` implementation.
# graphics_t: Optional[Callable[ShmArray, np.ndarray]] = None
# graphics_t_shm: Optional[ShmArray] = None
# path graphics update implementation methods
# prepend_fn: Optional[Callable[QPainterPath, QPainterPath]] = None
# append_fn: Optional[Callable[QPainterPath, QPainterPath]] = None
path: QPainterPath | None = None
fast_path: QPainterPath | None = None
# downsampling state
_last_uppx: float = 0
@ -81,7 +69,7 @@ class Renderer(msgspec.Struct):
x: np.ndarray,
y: np.ndarray,
connect: str | np.ndarray = 'all',
path: Optional[QPainterPath] = None,
path: QPainterPath | None = None,
redraw: bool = False,
) -> QPainterPath:
@ -105,7 +93,7 @@ class Renderer(msgspec.Struct):
# - https://doc.qt.io/qt-5/qpainterpath.html#reserve
# - https://doc.qt.io/qt-5/qpainterpath.html#capacity
# - https://doc.qt.io/qt-5/qpainterpath.html#clear
# XXX: right now this is based on had hoc checks on a
# XXX: right now this is based on ad-hoc checks on a
# hidpi 3840x2160 4k monitor but we should optimize for
# the target display(s) on the sys.
# if no_path_yet:
@ -218,22 +206,24 @@ class Renderer(msgspec.Struct):
):
# print(f"{self.viz.name} -> REDRAWING BRUH")
if new_sample_rate and showing_src_data:
log.info(f'DEDOWN -> {array_key}')
log.info(f'DE-downsampling -> {array_key}')
self._in_ds = False
elif should_ds and uppx > 1:
x_1d, y_1d, ymn, ymx = xy_downsample(
ds_out = xy_downsample(
x_1d,
y_1d,
uppx,
)
self.viz.yrange = ymn, ymx
# print(f'{self.viz.name} post ds: ymn, ymx: {ymn},{ymx}')
if ds_out is not None:
x_1d, y_1d, ymn, ymx = ds_out
self.viz.yrange = ymn, ymx
# print(f'{self.viz.name} post ds: ymn, ymx: {ymn},{ymx}')
reset = True
profiler(f'FULL PATH downsample redraw={should_ds}')
self._in_ds = True
reset = True
profiler(f'FULL PATH downsample redraw={should_ds}')
self._in_ds = True
path = self.draw_path(
x=x_1d,
@ -269,10 +259,7 @@ class Renderer(msgspec.Struct):
append_length > 0
and do_append
):
print(f'{array_key} append len: {append_length}')
# new_x = x_1d[-append_length - 2:] # slice_to_head]
# new_y = y_1d[-append_length - 2:] # slice_to_head]
profiler('sliced append path')
profiler(f'sliced append path {append_length}')
# (
# x_1d,
# y_1d,
@ -300,22 +287,23 @@ class Renderer(msgspec.Struct):
profiler('generated append qpath')
if use_fpath:
# print(f'{self.viz.name}: FAST PATH')
# an attempt at trying to make append-updates faster..
if fast_path is None:
fast_path = append_path
# fast_path.reserve(int(6e3))
else:
# print(
# f'{self.viz.name}: FAST PATH\n'
# f"append_path br: {append_path.boundingRect()}\n"
# f"path size: {size}\n"
# f"append_path len: {append_path.length()}\n"
# f"fast_path len: {fast_path.length()}\n"
# )
fast_path.connectPath(append_path)
size = fast_path.capacity()
profiler(f'connected fast path w size: {size}')
print(
f"append_path br: {append_path.boundingRect()}\n"
f"path size: {size}\n"
f"append_path len: {append_path.length()}\n"
f"fast_path len: {fast_path.length()}\n"
)
# graphics.path.moveTo(new_x[0], new_y[0])
# path.connectPath(append_path)

View File

@ -144,15 +144,29 @@ class CompleterView(QTreeView):
self._font_size: int = 0 # pixels
self._init: bool = False
async def on_pressed(self, idx: QModelIndex) -> None:
async def on_pressed(
self,
idx: QModelIndex,
) -> None:
'''
Mouse pressed on view handler.
'''
search = self.parent()
await search.chart_current_item()
await search.chart_current_item(
clear_to_cache=True,
)
# XXX: this causes Qt to hang and segfault..lovely
# self.show_cache_entries(
# only=True,
# keep_current_item_selected=True,
# )
search.focus()
def set_font_size(self, size: int = 18):
# print(size)
if size < 0:
@ -288,7 +302,7 @@ class CompleterView(QTreeView):
def select_first(self) -> QStandardItem:
'''
Select the first depth >= 2 entry from the completer tree and
return it's item.
return its item.
'''
# ensure we're **not** selecting the first level parent node and
@ -615,6 +629,8 @@ class SearchWidget(QtWidgets.QWidget):
def show_cache_entries(
self,
only: bool = False,
keep_current_item_selected: bool = False,
) -> None:
'''
Clear the search results view and show only cached (aka recently
@ -624,10 +640,14 @@ class SearchWidget(QtWidgets.QWidget):
godw = self.godwidget
# first entry in the cache is the current symbol(s)
fqsns = []
fqsns = set()
for multi_fqsns in list(godw._chart_cache):
fqsns.extend(list(multi_fqsns))
for fqsn in set(multi_fqsns):
fqsns.add(fqsn)
if keep_current_item_selected:
sel = self.view.selectionModel()
cidx = sel.currentIndex()
self.view.set_section_entries(
'cache',
@ -637,7 +657,17 @@ class SearchWidget(QtWidgets.QWidget):
reverse=True,
)
def get_current_item(self) -> Optional[tuple[str, str]]:
if (
keep_current_item_selected
and cidx.isValid()
):
# set current selection back to what it was before filling out
# the view results.
self.view.select_from_idx(cidx)
else:
self.view.select_first()
def get_current_item(self) -> tuple[QModelIndex, str, str] | None:
'''
Return the current completer tree selection as
a tuple ``(parent: str, child: str)`` if valid, else ``None``.
@ -665,7 +695,11 @@ class SearchWidget(QtWidgets.QWidget):
if provider == 'cache':
symbol, _, provider = symbol.rpartition('.')
return provider, symbol
return (
cidx,
provider,
symbol,
)
else:
return None
@ -686,7 +720,7 @@ class SearchWidget(QtWidgets.QWidget):
if value is None:
return None
provider, symbol = value
cidx, provider, symbol = value
godw = self.godwidget
fqsn = f'{symbol}.{provider}'
@ -715,7 +749,9 @@ class SearchWidget(QtWidgets.QWidget):
godw.rt_linked,
)
)
self.show_cache_entries(only=True)
self.show_cache_entries(
only=True,
)
self.bar.focus()
return fqsn
@ -956,11 +992,10 @@ async def handle_keyboard_input(
global _search_active, _search_enabled
# startup
bar = searchbar
search = searchbar.parent()
godwidget = search.godwidget
view = bar.view
view.set_font_size(bar.dpi_font.px_size)
searchw = searchbar.parent()
godwidget = searchw.godwidget
view = searchbar.view
view.set_font_size(searchbar.dpi_font.px_size)
send, recv = trio.open_memory_channel(616)
async with trio.open_nursery() as n:
@ -971,13 +1006,13 @@ async def handle_keyboard_input(
n.start_soon(
partial(
fill_results,
search,
searchw,
recv,
)
)
bar.focus()
search.show_cache_entries()
searchbar.focus()
searchw.show_cache_entries()
await trio.sleep(0)
async for kbmsg in recv_chan:
@ -994,16 +1029,24 @@ async def handle_keyboard_input(
Qt.Key_Return
):
_search_enabled = False
await search.chart_current_item(clear_to_cache=True)
search.show_cache_entries(only=True)
await searchw.chart_current_item(clear_to_cache=True)
# XXX: causes hang and segfault..
# searchw.show_cache_entries(
# only=True,
# keep_current_item_selected=True,
# )
view.show_matches()
search.focus()
elif not ctl and not bar.text():
searchw.focus()
elif (
not ctl
and not searchbar.text()
):
# TODO: really should factor this somewhere..bc
# we're doin it in another spot as well..
search.show_cache_entries(only=True)
searchw.show_cache_entries(only=True)
continue
# cancel and close
@ -1012,7 +1055,7 @@ async def handle_keyboard_input(
Qt.Key_Space, # i feel like this is the "native" one
Qt.Key_Alt,
}:
bar.unfocus()
searchbar.unfocus()
# kill the search and focus back on main chart
if godwidget:
@ -1020,41 +1063,54 @@ async def handle_keyboard_input(
continue
if ctl and key in {
Qt.Key_L,
}:
if (
ctl
and key in {Qt.Key_L}
):
# like url (link) highlight in a web browser
bar.focus()
searchbar.focus()
# selection navigation controls
elif ctl and key in {
Qt.Key_D,
}:
elif (
ctl
and key in {Qt.Key_D}
):
view.next_section(direction='down')
_search_enabled = False
elif ctl and key in {
Qt.Key_U,
}:
elif (
ctl
and key in {Qt.Key_U}
):
view.next_section(direction='up')
_search_enabled = False
# selection navigation controls
elif (ctl and key in {
elif (
ctl and (
key in {
Qt.Key_K,
Qt.Key_J,
}
Qt.Key_K,
Qt.Key_J,
}) or key in {
Qt.Key_Up,
Qt.Key_Down,
}:
or key in {
Qt.Key_Up,
Qt.Key_Down,
}
)
):
_search_enabled = False
if key in {Qt.Key_K, Qt.Key_Up}:
if key in {
Qt.Key_K,
Qt.Key_Up
}:
item = view.select_previous()
elif key in {Qt.Key_J, Qt.Key_Down}:
elif key in {
Qt.Key_J,
Qt.Key_Down,
}:
item = view.select_next()
if item:
@ -1063,15 +1119,18 @@ async def handle_keyboard_input(
# if we're in the cache section and thus the next
# selection is a cache item, switch and show it
# immediately since it should be very fast.
if parent_item and parent_item.text() == 'cache':
await search.chart_current_item(clear_to_cache=False)
if (
parent_item
and parent_item.text() == 'cache'
):
await searchw.chart_current_item(clear_to_cache=False)
# ACTUAL SEARCH BLOCK #
# where we fuzzy complete and fill out sections.
elif not ctl:
# relay to completer task
_search_enabled = True
send.send_nowait(search.bar.text())
send.send_nowait(searchw.bar.text())
_search_active.set()