From 9ace053aaf26215037ce87472d68e20e146d38fb Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 26 Nov 2022 15:08:36 -0500 Subject: [PATCH 01/96] Copy timestamps from source to FSP dest buffer --- piker/fsp/_engine.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/piker/fsp/_engine.py b/piker/fsp/_engine.py index a78308a4..f5af2ac2 100644 --- a/piker/fsp/_engine.py +++ b/piker/fsp/_engine.py @@ -188,6 +188,8 @@ async def fsp_compute( history_by_field['time'] = src_time[-len(history_by_field):] + history['time'] = src.array['time'] + # TODO: XXX: # THERE'S A BIG BUG HERE WITH THE `index` field since we're # prepending a copy of the first value a few times to make From 86d09d9305d3c10b03287540e74e4cf3d7f00b06 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 24 Nov 2022 15:33:58 -0500 Subject: [PATCH 02/96] Rename `Flow` -> `Viz` The type is better described as a "data visualization": https://en.wikipedia.org/wiki/Data_and_information_visualization Add `ChartPlotWidget.get_viz()` to start working towards not accessing the private table directly XD We'll probably end up using the name `Flow` for a type that tracks a collection of composed/cascaded `Flume`s: https://en.wikipedia.org/wiki/Two-port_network#Cascade_connection --- piker/ui/_axes.py | 2 +- piker/ui/_chart.py | 46 +++++++++++--------- piker/ui/_cursor.py | 27 +++++++----- piker/ui/_display.py | 80 +++++++++++++++++------------------ piker/ui/_editors.py | 2 +- piker/ui/_flows.py | 64 +++++++++++++--------------- piker/ui/_fsp.py | 12 +++--- piker/ui/_interaction.py | 8 ++-- piker/ui/_pathops.py | 16 +++---- piker/ui/qt/__init__.py | 3 -- piker/ui/qt/stackof_candle.py | 67 ----------------------------- 11 files changed, 132 insertions(+), 195 deletions(-) delete mode 100644 piker/ui/qt/__init__.py delete mode 100644 piker/ui/qt/stackof_candle.py diff --git a/piker/ui/_axes.py b/piker/ui/_axes.py index 2ee60bb9..52278819 100644 --- a/piker/ui/_axes.py +++ b/piker/ui/_axes.py @@ -302,7 +302,7 @@ class DynamicDateAxis(Axis): # XX: ARGGGGG AG:LKSKDJF:LKJSDFD chart = self.pi.chart_widget - flow = chart._flows[chart.name] + flow = chart._vizs[chart.name] shm = flow.shm bars = shm.array first = shm._first.value diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index bfe1c110..55579906 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -72,7 +72,7 @@ from ._interaction import ChartView from ._forms import FieldsForm from .._profile import pg_profile_enabled, ms_slower_then from ._overlay import PlotItemOverlay -from ._flows import Flow +from ._flows import Viz from ._search import SearchWidget from . import _pg_overrides as pgo from .._profile import Profiler @@ -711,7 +711,7 @@ class LinkedSplits(QWidget): if style == 'ohlc_bar': # graphics, data_key = cpw.draw_ohlc( - flow = cpw.draw_ohlc( + viz = cpw.draw_ohlc( name, shm, flume=flume, @@ -727,7 +727,7 @@ class LinkedSplits(QWidget): elif style == 'line': add_label = True # graphics, data_key = cpw.draw_curve( - flow = cpw.draw_curve( + viz = cpw.draw_curve( name, shm, flume, @@ -738,7 +738,7 @@ class LinkedSplits(QWidget): elif style == 'step': add_label = True # graphics, data_key = cpw.draw_curve( - flow = cpw.draw_curve( + viz = cpw.draw_curve( name, shm, flume, @@ -751,8 +751,8 @@ class LinkedSplits(QWidget): else: raise ValueError(f"Chart style {style} is currently unsupported") - graphics = flow.graphics - data_key = flow.name + graphics = viz.graphics + data_key = viz.name if _is_main: assert style == 'ohlc_bar', 'main chart must be OHLC' @@ -908,7 +908,7 @@ class ChartPlotWidget(pg.PlotWidget): # self.setViewportMargins(0, 0, 0, 0) # registry of overlay curve names - self._flows: dict[str, Flow] = {} + self._vizs: dict[str, Viz] = {} self.feed: Feed | None = None @@ -974,7 +974,7 @@ class ChartPlotWidget(pg.PlotWidget): Return a range tuple for the bars present in view. ''' - main_flow = self._flows[self.name] + main_flow = self._vizs[self.name] ifirst, l, lbar, rbar, r, ilast = main_flow.datums_range() return l, lbar, rbar, r @@ -1038,9 +1038,9 @@ class ChartPlotWidget(pg.PlotWidget): Set the view box to the "default" startup view of the scene. ''' - flow = self._flows.get(self.name) + flow = self._vizs.get(self.name) if not flow: - log.warning(f'`Flow` for {self.name} not loaded yet?') + log.warning(f'`Viz` for {self.name} not loaded yet?') return arr = flow.shm.array @@ -1220,7 +1220,7 @@ class ChartPlotWidget(pg.PlotWidget): **graphics_kwargs, - ) -> Flow: + ) -> Viz: ''' Draw a "curve" (line plot graphics) for the provided data in the input shm array ``shm``. @@ -1254,7 +1254,7 @@ class ChartPlotWidget(pg.PlotWidget): **graphics_kwargs, ) - flow = self._flows[data_key] = Flow( + flow = self._vizs[data_key] = Viz( data_key, pi, shm, @@ -1332,7 +1332,7 @@ class ChartPlotWidget(pg.PlotWidget): array_key: Optional[str] = None, **draw_curve_kwargs, - ) -> Flow: + ) -> Viz: ''' Draw OHLC datums to chart. @@ -1358,7 +1358,7 @@ class ChartPlotWidget(pg.PlotWidget): Update the named internal graphics from ``array``. ''' - flow = self._flows[array_key or graphics_name] + flow = self._vizs[array_key or graphics_name] return flow.update_graphics( array_key=array_key, **kwargs, @@ -1426,15 +1426,15 @@ class ChartPlotWidget(pg.PlotWidget): delayed=True, ) - # TODO: here we should instead look up the ``Flow.shm.array`` + # TODO: here we should instead look up the ``Viz.shm.array`` # and read directly from shm to avoid copying to memory first # and then reading it again here. flow_key = name or self.name - flow = self._flows.get(flow_key) + viz = self._vizs.get(flow_key) if ( - flow is None + viz is None ): - log.error(f"flow {flow_key} doesn't exist in chart {self.name} !?") + log.error(f"viz {flow_key} doesn't exist in chart {self.name} !?") key = res = 0, 0 else: @@ -1445,11 +1445,11 @@ class ChartPlotWidget(pg.PlotWidget): rbar, r, last, - ) = bars_range or flow.datums_range() + ) = bars_range or viz.datums_range() profiler(f'{self.name} got bars range') key = round(lbar), round(rbar) - res = flow.maxmin(*key) + res = viz.maxmin(*key) if ( res is None @@ -1465,3 +1465,9 @@ class ChartPlotWidget(pg.PlotWidget): profiler(f'yrange mxmn: {key} -> {res}') # print(f'{flow_key} yrange mxmn: {key} -> {res}') return res + + def get_viz( + self, + key: str, + ) -> Viz: + return self._vizs[key] diff --git a/piker/ui/_cursor.py b/piker/ui/_cursor.py index fd00c380..762acf73 100644 --- a/piker/ui/_cursor.py +++ b/piker/ui/_cursor.py @@ -274,8 +274,8 @@ class ContentsLabels: ) -> None: for chart, name, label, update in self._labels: - flow = chart._flows[name] - array = flow.shm.array + viz = chart.get_viz(name) + array = viz.shm.array if not ( index >= 0 @@ -482,25 +482,32 @@ class Cursor(pg.GraphicsObject): def add_curve_cursor( self, - plot: ChartPlotWidget, # noqa + chart: ChartPlotWidget, # noqa curve: 'PlotCurveItem', # noqa ) -> LineDot: - # if this plot contains curves add line dot "cursors" to denote + # if this chart contains curves add line dot "cursors" to denote # the current sample under the mouse - main_flow = plot._flows[plot.name] + main_viz = chart.get_viz(chart.name) + # read out last index - i = main_flow.shm.array[-1]['index'] + i = main_viz.shm.array[-1]['index'] cursor = LineDot( curve, index=i, - plot=plot + plot=chart ) - plot.addItem(cursor) - self.graphics[plot].setdefault('cursors', []).append(cursor) + chart.addItem(cursor) + self.graphics[chart].setdefault('cursors', []).append(cursor) return cursor - def mouseAction(self, action, plot): # noqa + def mouseAction( + self, + action: str, + plot: ChartPlotWidget, + + ) -> None: # noqa + log.debug(f"{(action, plot.name)}") if action == 'Enter': self.active_plot = plot diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 475cec55..07033a0c 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -78,7 +78,7 @@ from .._profile import Profiler log = get_logger(__name__) -# TODO: delegate this to each `Flow.maxmin()` which includes +# TODO: delegate this to each `Viz.maxmin()` which includes # caching and further we should implement the following stream based # approach, likely with ``numba``: # https://arxiv.org/abs/cs/0610046 @@ -113,7 +113,7 @@ def chart_maxmin( # TODO: we need to NOT call this to avoid a manual # np.max/min trigger and especially on the vlm_chart - # flows which aren't shown.. like vlm? + # vizs which aren't shown.. like vlm? if vlm_chart: out = vlm_chart.maxmin() if out: @@ -220,7 +220,7 @@ class DisplayState(Struct): _, _, _, r = chart.bars_range() liv = r >= shm.index - # update the "last datum" (aka extending the flow graphic with + # update the "last datum" (aka extending the vizs graphic with # new data) only if the number of unit steps is >= the number of # such unit steps per pixel (aka uppx). Iow, if the zoom level # is such that a datum(s) update to graphics wouldn't span @@ -301,14 +301,14 @@ async def graphics_update_loop( fqsn = symbol.fqsn # update last price sticky - fast_pi = fast_chart._flows[fqsn].plot + fast_pi = fast_chart._vizs[fqsn].plot last_price_sticky = fast_pi.getAxis('right')._stickies[fqsn] last_price_sticky.update_from_data( *ohlcv.array[-1][['index', 'close']] ) last_price_sticky.show() - slow_pi = hist_chart._flows[fqsn].plot + slow_pi = hist_chart._vizs[fqsn].plot hist_last_price_sticky = slow_pi.getAxis('right')._stickies[fqsn] hist_last_price_sticky.update_from_data( *hist_ohlcv.array[-1][['index', 'close']] @@ -383,7 +383,7 @@ async def graphics_update_loop( }) if vlm_chart: - vlm_pi = vlm_chart._flows['volume'].plot + vlm_pi = vlm_chart._vizs['volume'].plot vlm_sticky = vlm_pi.getAxis('right')._stickies['volume'] ds.vlm_chart = vlm_chart ds.vlm_sticky = vlm_sticky @@ -441,8 +441,8 @@ async def graphics_update_loop( and liv ): # hist_chart.increment_view(steps=i_diff) - flow = hist_chart._flows[fqsn] - flow.plot.vb._set_yrange( + viz = hist_chart._vizs[fqsn] + viz.plot.vb._set_yrange( # yrange=hist_chart.maxmin(name=fqsn) ) # hist_chart.view._set_yrange(yrange=hist_chart.maxmin()) @@ -518,7 +518,7 @@ def graphics_update_cycle( flume = ds.flume sym = flume.symbol fqsn = sym.fqsn - main_flow = chart._flows[fqsn] + main_viz = chart._vizs[fqsn] profiler = Profiler( msg=f'Graphics loop cycle for: `{chart.name}`', @@ -562,7 +562,7 @@ def graphics_update_cycle( ): # print(f'INCREMENTING {fqsn}') chart.increment_view(steps=i_diff) - main_flow.plot.vb._set_yrange( + main_viz.plot.vb._set_yrange( # yrange=(mn, mx), ) @@ -629,7 +629,7 @@ def graphics_update_cycle( # chart.name, # do_append=do_append, ) - main_flow.draw_last(array_key=fqsn) + main_viz.draw_last(array_key=fqsn) hist_chart.update_graphics_from_flow( fqsn, @@ -748,7 +748,7 @@ def graphics_update_cycle( and not chart._static_yrange == 'axis' ): # main_vb = chart.view - main_vb = chart._flows[fqsn].plot.vb + main_vb = chart._vizs[fqsn].plot.vb if ( main_vb._ic is None or not main_vb._ic.is_set() @@ -779,26 +779,26 @@ def graphics_update_cycle( is_1m=True, ) if hist_liv: - flow = hist_chart._flows[fqsn] - flow.plot.vb._set_yrange( + viz = hist_chart._vizs[fqsn] + viz.plot.vb._set_yrange( # yrange=hist_chart.maxmin(name=fqsn), ) # XXX: update this every draw cycle to make L1-always-in-view work. vars['last_mx'], vars['last_mn'] = mx, mn - # run synchronous update on all linked flows - # TODO: should the "main" (aka source) flow be special? - for curve_name, flow in chart._flows.items(): + # run synchronous update on all linked viz + # TODO: should the "main" (aka source) viz be special? + for curve_name, viz in chart._vizs.items(): # update any overlayed fsp flows if ( # curve_name != chart.data_key curve_name != fqsn - and not flow.is_ohlc + and not viz.is_ohlc ): update_fsp_chart( chart, - flow, + viz, curve_name, array_key=curve_name, ) @@ -812,7 +812,7 @@ def graphics_update_cycle( # and not do_append # and not do_rt_update ): - flow.draw_last( + viz.draw_last( array_key=curve_name, only_last_uppx=True, ) @@ -821,7 +821,7 @@ def graphics_update_cycle( # TODO: can we unify this with the above loop? if vlm_chart: # print(f"DOING VLM {fqsn}") - vlm_flows = vlm_chart._flows + vlm_vizs = vlm_chart._vizs # always update y-label ds.vlm_sticky.update_from_data( @@ -866,21 +866,21 @@ def graphics_update_cycle( vars['last_mx_vlm'] = mx_vlm_in_view # update all downstream FSPs - for curve_name, flow in vlm_flows.items(): + for curve_name, viz in vlm_vizs.items(): if ( curve_name not in {'volume', fqsn} - and flow.render + and viz.render and ( liv and do_rt_update or do_append ) - # and not flow.is_ohlc + # and not viz.is_ohlc # and curve_name != fqsn ): update_fsp_chart( vlm_chart, - flow, + viz, curve_name, array_key=curve_name, # do_append=uppx < update_uppx, @@ -889,7 +889,7 @@ def graphics_update_cycle( # is this even doing anything? # (pretty sure it's the real-time # resizing from last quote?) - fvb = flow.plot.vb + fvb = viz.plot.vb fvb._set_yrange( name=curve_name, ) @@ -905,9 +905,9 @@ def graphics_update_cycle( # range of that set. ): # always update the last datum-element - # graphic for all flows - # print(f'drawing last {flow.name}') - flow.draw_last(array_key=curve_name) + # graphic for all vizs + # print(f'drawing last {viz.name}') + viz.draw_last(array_key=curve_name) async def link_views_with_region( @@ -937,12 +937,12 @@ async def link_views_with_region( hist_pi.addItem(region, ignoreBounds=True) region.setOpacity(6/16) - flow = rt_chart._flows[flume.symbol.fqsn] - assert flow + viz = rt_chart._vizs[flume.symbol.fqsn] + assert viz # XXX: no idea why this doesn't work but it's causing # a weird placement of the region on the way-far-left.. - # region.setClipItem(flow.graphics) + # region.setClipItem(viz.graphics) # poll for datums load and timestep detection for _ in range(100): @@ -1052,11 +1052,11 @@ def multi_maxmin( ) -> tuple[float, float]: ''' - Flows "group" maxmin loop; assumes all named flows + Viz "group" maxmin loop; assumes all named vizs are in the same co-domain and thus can be sorted as one set. - Iterates all the named flows and calls the chart + Iterates all the named vizs and calls the chart api to find their range values and return. TODO: really we should probably have a more built-in API @@ -1279,7 +1279,7 @@ async def display_symbol_data( hist_pi.hideAxis('left') hist_pi.hideAxis('bottom') - flow = hist_chart.draw_curve( + viz = hist_chart.draw_curve( fqsn, hist_ohlcv, flume, @@ -1300,8 +1300,8 @@ async def display_symbol_data( # specially store ref to shm for lookup in display loop # since only a placeholder of `None` is entered in # ``.draw_curve()``. - flow = hist_chart._flows[fqsn] - assert flow.plot is hist_pi + viz = hist_chart._vizs[fqsn] + assert viz.plot is hist_pi pis.setdefault(fqsn, [None, None])[1] = hist_pi rt_pi = rt_chart.overlay_plotitem( @@ -1312,7 +1312,7 @@ async def display_symbol_data( rt_pi.hideAxis('left') rt_pi.hideAxis('bottom') - flow = rt_chart.draw_curve( + viz = rt_chart.draw_curve( fqsn, ohlcv, flume, @@ -1333,8 +1333,8 @@ async def display_symbol_data( # specially store ref to shm for lookup in display loop # since only a placeholder of `None` is entered in # ``.draw_curve()``. - flow = rt_chart._flows[fqsn] - assert flow.plot is rt_pi + viz = rt_chart._vizs[fqsn] + assert viz.plot is rt_pi pis.setdefault(fqsn, [None, None])[0] = rt_pi rt_chart.setFocus() diff --git a/piker/ui/_editors.py b/piker/ui/_editors.py index 3703558a..08f19852 100644 --- a/piker/ui/_editors.py +++ b/piker/ui/_editors.py @@ -377,7 +377,7 @@ class SelectRect(QtWidgets.QGraphicsRectItem): nbars = ixmx - ixmn + 1 chart = self._chart - data = chart._flows[chart.name].shm.array[ixmn:ixmx] + data = chart.get_viz(chart.name).shm.array[ixmn:ixmx] if len(data): std = data['close'].std() diff --git a/piker/ui/_flows.py b/piker/ui/_flows.py index 2e04bb37..97abca94 100644 --- a/piker/ui/_flows.py +++ b/piker/ui/_flows.py @@ -65,7 +65,7 @@ log = get_logger(__name__) def render_baritems( - flow: Flow, + viz: Viz, graphics: BarItems, read: tuple[ int, int, np.ndarray, @@ -89,7 +89,7 @@ def render_baritems( bars = graphics # if no source data renderer exists create one. - self = flow + self = viz show_bars: bool = False r = self._src_r @@ -98,28 +98,28 @@ def render_baritems( # OHLC bars path renderer r = self._src_r = Renderer( - flow=self, + viz=self, fmtr=OHLCBarsFmtr( - shm=flow.shm, - flow=flow, + shm=viz.shm, + viz=viz, _last_read=read, ), ) ds_curve_r = Renderer( - flow=self, + viz=self, fmtr=OHLCBarsAsCurveFmtr( - shm=flow.shm, - flow=flow, + shm=viz.shm, + viz=viz, _last_read=read, ), ) curve = FlattenedOHLC( - name=f'{flow.name}_ds_ohlc', + name=f'{viz.name}_ds_ohlc', color=bars._color, ) - flow.ds_graphics = curve + viz.ds_graphics = curve curve.hide() self.plot.addItem(curve) @@ -142,7 +142,7 @@ def render_baritems( ): # print('FLIPPING TO BARS') should_line = False - flow._in_ds = False + viz._in_ds = False elif ( not in_line @@ -150,7 +150,7 @@ def render_baritems( ): # print('FLIPPING TO LINE') should_line = True - flow._in_ds = True + viz._in_ds = True profiler(f'ds logic complete line={should_line}') @@ -196,9 +196,9 @@ def render_baritems( ) -class Flow(msgspec.Struct): # , frozen=True): +class Viz(msgspec.Struct): # , frozen=True): ''' - (Financial Signal-)Flow compound type which wraps a real-time + (Data) "Visualization" compound type which wraps a real-time shm array stream with displayed graphics (curves, charts) for high level access and control as well as efficient incremental update. @@ -216,7 +216,7 @@ class Flow(msgspec.Struct): # , frozen=True): # for tracking y-mn/mx for y-axis auto-ranging yrange: tuple[float, float] = None - # in some cases a flow may want to change its + # in some cases a viz may want to change its # graphical "type" or, "form" when downsampling, to # start this is only ever an interpolation line. ds_graphics: Optional[Curve] = None @@ -251,12 +251,6 @@ class Flow(msgspec.Struct): # , frozen=True): def shm(self) -> ShmArray: return self._shm - # TODO: remove this and only allow setting through - # private ``._shm`` attr? - # @shm.setter - # def shm(self, shm: ShmArray) -> ShmArray: - # self._shm = shm - def maxmin( self, lbar: int, @@ -318,7 +312,7 @@ class Flow(msgspec.Struct): # , frozen=True): def view_range(self) -> tuple[int, int]: ''' Return the indexes in view for the associated - plot displaying this flow's data. + plot displaying this viz's data. ''' vr = self.plot.viewRect() @@ -344,7 +338,7 @@ class Flow(msgspec.Struct): # , frozen=True): # TODO: avoid this and have shm passed # in earlier. if self.shm is None: - # haven't initialized the flow yet + # haven't initialized the viz yet return (0, l, 0, 0, r, 0) array = self.shm.array @@ -420,7 +414,7 @@ class Flow(msgspec.Struct): # , frozen=True): ''' profiler = Profiler( - msg=f'Flow.update_graphics() for {self.name}', + msg=f'Viz.update_graphics() for {self.name}', disabled=not pg_profile_enabled(), ms_threshold=4, # ms_threshold=ms_slower_then, @@ -475,10 +469,10 @@ class Flow(msgspec.Struct): # , frozen=True): if isinstance(graphics, StepCurve): r = self._src_r = Renderer( - flow=self, + viz=self, fmtr=StepCurveFmtr( shm=self.shm, - flow=self, + viz=self, _last_read=read, ), ) @@ -493,10 +487,10 @@ class Flow(msgspec.Struct): # , frozen=True): if not r: # just using for ``.diff()`` atm.. r = self._src_r = Renderer( - flow=self, + viz=self, fmtr=IncrementalFormatter( shm=self.shm, - flow=self, + viz=self, _last_read=read, ), ) @@ -581,7 +575,7 @@ class Flow(msgspec.Struct): # , frozen=True): path, data, reset = out # if self.yrange: - # print(f'flow {self.name} yrange from m4: {self.yrange}') + # print(f'viz {self.name} yrange from m4: {self.yrange}') # XXX: SUPER UGGGHHH... without this we get stale cache # graphics that don't update until you downsampler again.. @@ -691,7 +685,7 @@ class Flow(msgspec.Struct): # , frozen=True): class Renderer(msgspec.Struct): - flow: Flow + viz: Viz fmtr: IncrementalFormatter # output graphics rendering, the main object @@ -794,7 +788,7 @@ class Renderer(msgspec.Struct): - blah blah blah (from notes) ''' - # TODO: can the renderer just call ``Flow.read()`` directly? + # TODO: can the renderer just call ``Viz.read()`` directly? # unpack latest source data read fmtr = self.fmtr @@ -858,7 +852,7 @@ class Renderer(msgspec.Struct): path is None or should_redraw ): - # print(f"{self.flow.name} -> REDRAWING BRUH") + # print(f"{self.viz.name} -> REDRAWING BRUH") if new_sample_rate and showing_src_data: log.info(f'DEDOWN -> {array_key}') self._in_ds = False @@ -870,8 +864,8 @@ class Renderer(msgspec.Struct): y_1d, uppx, ) - self.flow.yrange = ymn, ymx - # print(f'{self.flow.name} post ds: ymn, ymx: {ymn},{ymx}') + self.viz.yrange = ymn, ymx + # print(f'{self.viz.name} post ds: ymn, ymx: {ymn},{ymx}') reset = True profiler(f'FULL PATH downsample redraw={should_ds}') @@ -942,7 +936,7 @@ class Renderer(msgspec.Struct): profiler('generated append qpath') if use_fpath: - # print(f'{self.flow.name}: FAST PATH') + # print(f'{self.viz.name}: FAST PATH') # an attempt at trying to make append-updates faster.. if fast_path is None: fast_path = append_path diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index 29162635..c3b13cbc 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -289,7 +289,7 @@ async def run_fsp_ui( # first UI update, usually from shm pushed history update_fsp_chart( chart, - chart._flows[array_key], + chart.get_viz(array_key), name, array_key=array_key, ) @@ -357,7 +357,7 @@ async def run_fsp_ui( # last = time.time() -# TODO: maybe this should be our ``Flow`` type since it maps +# TODO: maybe this should be our ``Viz`` type since it maps # one flume to the next? The machinery for task/actor mgmt should # be part of the instantiation API? class FspAdmin: @@ -386,7 +386,7 @@ class FspAdmin: # TODO: make this a `.src_flume` and add # a `dst_flume`? - # (=> but then wouldn't this be the most basic `Flow`?) + # (=> but then wouldn't this be the most basic `Viz`?) self.flume = flume def rr_next_portal(self) -> tractor.Portal: @@ -694,7 +694,7 @@ async def open_vlm_displays( ) -> tuple[float, float]: ''' - Flows "group" maxmin loop; assumes all named flows + Viz "group" maxmin loop; assumes all named flows are in the same co-domain and thus can be sorted as one set. @@ -865,7 +865,7 @@ async def open_vlm_displays( # specially store ref to shm for lookup in display loop # since only a placeholder of `None` is entered in # ``.draw_curve()``. - # flow = chart._flows[name] + # viz = chart._vizs[name] assert flow.plot is pi chart_curves( @@ -901,7 +901,7 @@ async def open_vlm_displays( # liquidity events (well at least on low OHLC periods - 1s). vlm_curve.hide() chart.removeItem(vlm_curve) - vflow = chart._flows['volume'] + vflow = chart._vizs['volume'] vflow.render = False # avoid range sorting on volume once disabled diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index e17e662e..2d755c57 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -504,7 +504,7 @@ class ChartView(ViewBox): # if ( # ev.delta() < 0 - # and vl >= len(chart._flows[chart.name].shm.array) + 666 + # and vl >= len(chart._vizs[chart.name].shm.array) + 666 # ): # log.debug("Min zoom bruh...") # return @@ -821,7 +821,7 @@ class ChartView(ViewBox): # XXX: only compute the mxmn range # if none is provided as input! if not yrange: - # flow = chart._flows[name] + # flow = chart._vizs[name] yrange = self._maxmin() if yrange is None: @@ -912,7 +912,7 @@ class ChartView(ViewBox): graphics items which are our children. ''' - graphics = [f.graphics for f in self._chart._flows.values()] + graphics = [f.graphics for f in self._chart._vizs.values()] if not graphics: return 0 @@ -948,7 +948,7 @@ class ChartView(ViewBox): plots |= linked.subplots for chart_name, chart in plots.items(): - for name, flow in chart._flows.items(): + for name, flow in chart._vizs.items(): if ( not flow.render diff --git a/piker/ui/_pathops.py b/piker/ui/_pathops.py index 8b3eecaf..a4ce947a 100644 --- a/piker/ui/_pathops.py +++ b/piker/ui/_pathops.py @@ -43,7 +43,7 @@ from ._compression import ( if TYPE_CHECKING: from ._flows import ( Renderer, - Flow, + Viz, ) from .._profile import Profiler @@ -73,7 +73,7 @@ class IncrementalFormatter(msgspec.Struct): ''' shm: ShmArray - flow: Flow + viz: Viz # last read from shm (usually due to an update call) _last_read: tuple[ @@ -90,7 +90,7 @@ class IncrementalFormatter(msgspec.Struct): def __repr__(self) -> str: msg = ( f'{type(self)}: ->\n\n' - f'fqsn={self.flow.name}\n' + f'fqsn={self.viz.name}\n' f'shm_name={self.shm.token["shm_name"]}\n\n' f'last_vr={self._last_vr}\n' @@ -130,7 +130,7 @@ class IncrementalFormatter(msgspec.Struct): last_in_view, ) = self.last_read - # TODO: can the renderer just call ``Flow.read()`` directly? + # TODO: can the renderer just call ``Viz.read()`` directly? # unpack latest source data read ( xfirst, @@ -337,7 +337,7 @@ class IncrementalFormatter(msgspec.Struct): if slice_to_inview: view_changed = self._track_inview_range(view_range) array = in_view - profiler(f'{self.flow.name} view range slice {view_range}') + profiler(f'{self.viz.name} view range slice {view_range}') hist = array[:slice_to_head] @@ -370,9 +370,9 @@ class IncrementalFormatter(msgspec.Struct): # # assert (len(appended) - 1) == append_len # # assert len(appended) == append_len # print( - # f'{self.flow.name} APPEND LEN: {append_len}\n' - # f'{self.flow.name} APPENDED: {appended}\n' - # f'{self.flow.name} app_tres: {app_tres}\n' + # f'{self.viz.name} APPEND LEN: {append_len}\n' + # f'{self.viz.name} APPENDED: {appended}\n' + # f'{self.viz.name} app_tres: {app_tres}\n' # ) # update the last "in view data range" diff --git a/piker/ui/qt/__init__.py b/piker/ui/qt/__init__.py deleted file mode 100644 index 8513b317..00000000 --- a/piker/ui/qt/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -Super hawt Qt UI components -""" diff --git a/piker/ui/qt/stackof_candle.py b/piker/ui/qt/stackof_candle.py deleted file mode 100644 index 0bcd37e4..00000000 --- a/piker/ui/qt/stackof_candle.py +++ /dev/null @@ -1,67 +0,0 @@ -import sys - -from PySide2.QtCharts import QtCharts -from PySide2.QtWidgets import QApplication, QMainWindow -from PySide2.QtCore import Qt, QPointF -from PySide2 import QtGui -import qdarkstyle - -data = ((1, 7380, 7520, 7380, 7510, 7324), - (2, 7520, 7580, 7410, 7440, 7372), - (3, 7440, 7650, 7310, 7520, 7434), - (4, 7450, 7640, 7450, 7550, 7480), - (5, 7510, 7590, 7460, 7490, 7502), - (6, 7500, 7590, 7480, 7560, 7512), - (7, 7560, 7830, 7540, 7800, 7584)) - - -app = QApplication([]) -# set dark stylesheet -# import pdb; pdb.set_trace() -app.setStyleSheet(qdarkstyle.load_stylesheet_pyside()) - -series = QtCharts.QCandlestickSeries() -series.setDecreasingColor(Qt.darkRed) -series.setIncreasingColor(Qt.darkGreen) - -ma5 = QtCharts.QLineSeries() # 5-days average data line -tm = [] # stores str type data - -# in a loop, series and ma5 append corresponding data -for num, o, h, l, c, m in data: - candle = QtCharts.QCandlestickSet(o, h, l, c) - series.append(candle) - ma5.append(QPointF(num, m)) - tm.append(str(num)) - -pen = candle.pen() -# import pdb; pdb.set_trace() - -chart = QtCharts.QChart() - -# import pdb; pdb.set_trace() -series.setBodyOutlineVisible(False) -series.setCapsVisible(False) -# brush = QtGui.QBrush() -# brush.setColor(Qt.green) -# series.setBrush(brush) -chart.addSeries(series) # candle -chart.addSeries(ma5) # ma5 line - -chart.setAnimationOptions(QtCharts.QChart.SeriesAnimations) -chart.createDefaultAxes() -chart.legend().hide() - -chart.axisX(series).setCategories(tm) -chart.axisX(ma5).setVisible(False) - -view = QtCharts.QChartView(chart) -view.chart().setTheme(QtCharts.QChart.ChartTheme.ChartThemeDark) -view.setRubberBand(QtCharts.QChartView.HorizontalRubberBand) -# chartview.chart().setTheme(QtCharts.QChart.ChartTheme.ChartThemeBlueCerulean) - -ui = QMainWindow() -# ui.setGeometry(50, 50, 500, 300) -ui.setCentralWidget(view) -ui.show() -sys.exit(app.exec_()) From baee86a2d67d2d4ed90b08f842148569af792447 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 24 Nov 2022 15:48:42 -0500 Subject: [PATCH 03/96] Rename `.ui._flows.py` -> `.ui._render.py` --- piker/ui/_chart.py | 2 +- piker/ui/_curve.py | 2 +- piker/ui/_pathops.py | 2 +- piker/ui/{_flows.py => _render.py} | 0 4 files changed, 3 insertions(+), 3 deletions(-) rename piker/ui/{_flows.py => _render.py} (100%) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 55579906..2820a0f1 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -72,7 +72,7 @@ from ._interaction import ChartView from ._forms import FieldsForm from .._profile import pg_profile_enabled, ms_slower_then from ._overlay import PlotItemOverlay -from ._flows import Viz +from ._render import Viz from ._search import SearchWidget from . import _pg_overrides as pgo from .._profile import Profiler diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index b9a143a2..663b328f 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -72,7 +72,7 @@ class Curve(pg.GraphicsObject): lower level graphics data can be rendered in different threads and then read and drawn in this main thread without having to worry about dealing with Qt's concurrency primitives. See - ``piker.ui._flows.Renderer`` for details and logic related to lower + ``piker.ui._render.Renderer`` for details and logic related to lower level path generation and incremental update. The main differences in the path generation code include: diff --git a/piker/ui/_pathops.py b/piker/ui/_pathops.py index a4ce947a..77928c1d 100644 --- a/piker/ui/_pathops.py +++ b/piker/ui/_pathops.py @@ -41,7 +41,7 @@ from ._compression import ( ) if TYPE_CHECKING: - from ._flows import ( + from ._render import ( Renderer, Viz, ) diff --git a/piker/ui/_flows.py b/piker/ui/_render.py similarity index 100% rename from piker/ui/_flows.py rename to piker/ui/_render.py From e45bc4c619cbc493a97cd8915cbda97a749a6a6f Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 24 Nov 2022 16:22:20 -0500 Subject: [PATCH 04/96] Move `ui._compression`/`._pathops` to `.data` subpkg Since these modules no longer contain Qt specific code we might as well include them in the data sub-package. Also, add `IncrementalFormatter.index_field` as single point to def the indexing field that should be used for all x-domain graphics-data rendering. --- piker/{ui => data}/_compression.py | 0 piker/{ui => data}/_pathops.py | 31 +++++++++--------------------- piker/ui/_render.py | 10 +++++----- 3 files changed, 14 insertions(+), 27 deletions(-) rename piker/{ui => data}/_compression.py (100%) rename piker/{ui => data}/_pathops.py (97%) diff --git a/piker/ui/_compression.py b/piker/data/_compression.py similarity index 100% rename from piker/ui/_compression.py rename to piker/data/_compression.py diff --git a/piker/ui/_pathops.py b/piker/data/_pathops.py similarity index 97% rename from piker/ui/_pathops.py rename to piker/data/_pathops.py index 77928c1d..62a8a8da 100644 --- a/piker/ui/_pathops.py +++ b/piker/data/_pathops.py @@ -32,7 +32,7 @@ from msgspec import field # from PyQt5 import QtGui # from PyQt5.QtCore import QLineF, QPointF -from ..data._sharedmem import ( +from ._sharedmem import ( ShmArray, ) # from .._profile import pg_profile_enabled, ms_slower_then @@ -42,26 +42,11 @@ from ._compression import ( if TYPE_CHECKING: from ._render import ( - Renderer, Viz, ) from .._profile import Profiler -def by_index_and_key( - renderer: Renderer, - array: np.ndarray, - array_key: str, - vr: tuple[int, int], - -) -> tuple[ - np.ndarray, - np.ndarray, - np.ndarray, -]: - return array['index'], array[array_key], 'all' - - class IncrementalFormatter(msgspec.Struct): ''' Incrementally updating, pre-path-graphics tracking, formatter. @@ -74,6 +59,7 @@ class IncrementalFormatter(msgspec.Struct): ''' shm: ShmArray viz: Viz + index_field: str = 'index' # last read from shm (usually due to an update call) _last_read: tuple[ @@ -407,7 +393,6 @@ class IncrementalFormatter(msgspec.Struct): self, src_shm: ShmArray, data_field: str, - index_field: str = 'index', ) -> tuple[ np.ndarray, # x @@ -421,7 +406,7 @@ class IncrementalFormatter(msgspec.Struct): ''' y_nd = src_shm._array[data_field].copy() - x_nd = src_shm._array[index_field].copy() + x_nd = src_shm._array[self.index_field].copy() return x_nd, y_nd # XXX: was ``.update_xy()`` @@ -478,7 +463,7 @@ class IncrementalFormatter(msgspec.Struct): ''' return ( - array['index'], + array[self.index_field], array[array_key], # 1d connection array or style-key to @@ -512,7 +497,7 @@ class OHLCBarsFmtr(IncrementalFormatter): # generate an flat-interpolated x-domain x_nd = ( np.broadcast_to( - ohlc_shm._array['index'][:, None], + ohlc_shm._array[self.index_field][:, None], ( ohlc_shm._array.size, # 4, # only ohlc @@ -543,6 +528,7 @@ class OHLCBarsFmtr(IncrementalFormatter): data: np.ndarray, start: int64, bar_gap: float64 = 0.43, + index_field: str = 'index', ) -> tuple[ np.ndarray, @@ -574,7 +560,7 @@ class OHLCBarsFmtr(IncrementalFormatter): high = q['high'] low = q['low'] close = q['close'] - index = float64(q['index']) + index = float64(q[index_field]) istart = i * 6 istop = istart + 6 @@ -631,6 +617,7 @@ class OHLCBarsFmtr(IncrementalFormatter): array, start, bar_gap=w, + index_field=self.index_field, ) return x, y, c @@ -723,7 +710,7 @@ class StepCurveFmtr(IncrementalFormatter): for use by path graphics generation. ''' - i = shm._array['index'].copy() + i = shm._array[self.index_field].copy() out = shm._array[data_field].copy() x_out = np.broadcast_to( diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 97abca94..e44251e1 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -37,17 +37,17 @@ from ..data._sharedmem import ( ShmArray, ) from ..data.feed import Flume -from .._profile import ( - pg_profile_enabled, - # ms_slower_then, -) -from ._pathops import ( +from ..data._pathops import ( IncrementalFormatter, OHLCBarsFmtr, # Plain OHLC renderer OHLCBarsAsCurveFmtr, # OHLC converted to line StepCurveFmtr, # "step" curve (like for vlm) xy_downsample, ) +from .._profile import ( + pg_profile_enabled, + # ms_slower_then, +) from ._ohlc import ( BarItems, # bar_from_ohlc_row, From d3f5ff1b4f1662e568a8e7ed442cf9491ccf0a2a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 25 Nov 2022 13:25:38 -0500 Subject: [PATCH 05/96] Go back to hard-coded index field Turns out https://github.com/numba/numba/issues/8622 is real and the suggested `numba.literally` hack doesn't seem to work.. --- piker/data/_pathops.py | 44 ++++++++++++++++++++++++++++-------------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 62a8a8da..850f3a0b 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -24,18 +24,23 @@ from typing import ( ) import msgspec +from msgspec import field import numpy as np from numpy.lib import recfunctions as rfn -from numba import njit, float64, int64 # , optional -from msgspec import field -# import pyqtgraph as pg -# from PyQt5 import QtGui -# from PyQt5.QtCore import QLineF, QPointF +from numba import ( + types, + njit, + float64, + int64, + optional, +) +from numba.core.types.misc import StringLiteral +# from numba.extending import as_numba_type from ._sharedmem import ( ShmArray, ) -# from .._profile import pg_profile_enabled, ms_slower_then +# from ._source import numba_ohlc_dtype from ._compression import ( ds_m4, ) @@ -474,7 +479,9 @@ class IncrementalFormatter(msgspec.Struct): class OHLCBarsFmtr(IncrementalFormatter): - fields: list[str] = field(default_factory=lambda: ['open', 'high', 'low', 'close']) + fields: list[str] = field( + default_factory=lambda: ['open', 'high', 'low', 'close'] + ) def allocate_xy_nd( self, @@ -515,11 +522,17 @@ class OHLCBarsFmtr(IncrementalFormatter): @staticmethod @njit( - # TODO: for now need to construct this manually for readonly + # NOTE: need to construct this manually for readonly # arrays, see https://github.com/numba/numba/issues/4511 - # ntypes.tuple((float64[:], float64[:], float64[:]))( - # numba_ohlc_dtype[::1], # contiguous + # ( + # types.Array( + # numba_ohlc_dtype, + # 1, + # 'C', + # readonly=True, + # ), # int64, + # types.unicode_type, # optional(float64), # ), nogil=True @@ -528,7 +541,7 @@ class OHLCBarsFmtr(IncrementalFormatter): data: np.ndarray, start: int64, bar_gap: float64 = 0.43, - index_field: str = 'index', + # index_field: str, ) -> tuple[ np.ndarray, @@ -541,8 +554,10 @@ class OHLCBarsFmtr(IncrementalFormatter): ''' size = int(data.shape[0] * 6) + # XXX: see this for why the dtype might have to be defined outside + # the routine. + # https://github.com/numba/numba/issues/4098#issuecomment-493914533 x = np.zeros( - # data, shape=size, dtype=float64, ) @@ -560,7 +575,8 @@ class OHLCBarsFmtr(IncrementalFormatter): high = q['high'] low = q['low'] close = q['close'] - index = float64(q[index_field]) + # index = float64(q[index_field]) + index = float64(q['index']) istart = i * 6 istop = istart + 6 @@ -616,8 +632,8 @@ class OHLCBarsFmtr(IncrementalFormatter): x, y, c = self.path_arrays_from_ohlc( array, start, + # self.index_field, bar_gap=w, - index_field=self.index_field, ) return x, y, c From 5b08e9cba3b44788ba426192e6fad5b8d5f9184e Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 28 Nov 2022 09:44:33 -0500 Subject: [PATCH 06/96] Add breakpoint on -ve range for now --- piker/data/_compression.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/piker/data/_compression.py b/piker/data/_compression.py index c66b3e58..aed1d7d3 100644 --- a/piker/data/_compression.py +++ b/piker/data/_compression.py @@ -190,6 +190,8 @@ def ds_m4( if xrange is None: x_end = x[-1] # x end value/highest in domain xrange = (x_end - x_start) + if xrange < 0: + breakpoint() # XXX: always round up on the input pixels # lnx = len(x) From 6cacd7d18b1714b647fbf7d5daf32770beeae9fb Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 28 Nov 2022 10:09:45 -0500 Subject: [PATCH 07/96] Make `Viz.slice_from_time()` take input array Probably means it doesn't need to be a `Flume` method but it's convenient to expect the caller to pass in the `np.ndarray` with a `'time'` field instead of a `timeframe: str` arg; also, return the slice mask instead of the sliced array as output (again allowing the caller to do any slicing). Also, handle the slice-outside-time-range case by just returning the entire index range with a `None` mask. Adjust `Viz.view_data()` to instead do timeframe (for rt vs. hist shm array) lookup and equiv array slicing with the returned mask. --- piker/data/flows.py | 88 ++++++++++++++++++++++++++++++++------------- 1 file changed, 64 insertions(+), 24 deletions(-) diff --git a/piker/data/flows.py b/piker/data/flows.py index 9bb27230..655f9ff7 100644 --- a/piker/data/flows.py +++ b/piker/data/flows.py @@ -236,29 +236,43 @@ class Flume(Struct): def slice_from_time( self, - array: np.ndarray, + arr: np.ndarray, start_t: float, stop_t: float, - timeframe_s: int = 1, - return_data: bool = False, - ) -> np.ndarray: + ) -> tuple[ + slice, + slice, + np.ndarray | None, + ]: ''' - Slice an input struct array providing only datums - "in view" of this chart. + Slice an input struct array to a time range and return the absolute + and "readable" slices for that array as well as the indexing mask + for the caller to use to slice the input array if needed. ''' - arr = { - 1: self.rt_shm.array, - 60: self.hist_shm.arry, - }[timeframe_s] - times = arr['time'] - index = array['index'] + index = arr['index'] + + if ( + start_t < 0 + or start_t >= stop_t + ): + return ( + slice( + index[0], + index[-1], + ), + slice( + 0, + len(arr), + ), + None, + ) # use advanced indexing to map the # time range to the index range. - mask = ( + mask: np.ndarray = ( (times >= start_t) & (times < stop_t) @@ -273,7 +287,24 @@ class Flume(Struct): # ] i_by_t = index[mask] - i_0 = i_by_t[0] + try: + i_0 = i_by_t[0] + except IndexError: + if ( + start_t < times[0] + or stop_t >= times[-1] + ): + return ( + slice( + index[0], + index[-1], + ), + slice( + 0, + len(arr), + ), + None, + ) abs_slc = slice( i_0, @@ -285,17 +316,12 @@ class Flume(Struct): 0, i_by_t[-1] - i_0, ) - if not return_data: - return ( - abs_slc, - read_slc, - ) # also return the readable data from the timerange return ( abs_slc, read_slc, - arr[mask], + mask, ) def view_data( @@ -304,18 +330,32 @@ class Flume(Struct): timeframe_s: int = 1, ) -> np.ndarray: + ''' + Return sliced-to-view source data along with absolute + (``ShmArray._array['index']``) and read-relative + (``ShmArray.array``) slices. + ''' # get far-side x-indices plot view vr = plot.viewRect() + if timeframe_s > 1: + arr = self.hist_shm.array + else: + arr = self.rt_shm.array + ( abs_slc, - buf_slc, - iv_arr, + read_slc, + mask, ) = self.slice_from_time( + arr, start_t=vr.left(), stop_t=vr.right(), timeframe_s=timeframe_s, - return_data=True, ) - return iv_arr + return ( + abs_slc, + read_slc, + arr[mask] if mask is not None else arr, + ) From 5a0673d66fb0927c86855e03473aa1da19be045a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 28 Nov 2022 09:47:41 -0500 Subject: [PATCH 08/96] Add `Viz.bars_range()` (moved from chart API) Call it from view kb loop. --- piker/ui/_interaction.py | 3 +- piker/ui/_render.py | 149 +++++++++++++++++++++++++++++++-------- 2 files changed, 119 insertions(+), 33 deletions(-) diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 2d755c57..9e9b2d3b 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -76,7 +76,6 @@ async def handle_viewmode_kb_inputs( pressed: set[str] = set() last = time.time() - trigger_mode: str action: str on_next_release: Optional[Callable] = None @@ -495,7 +494,7 @@ class ChartView(ViewBox): chart = self.linked.chart # don't zoom more then the min points setting - l, lbar, rbar, r = chart.bars_range() + l, lbar, rbar, r = chart.get_viz(chart.name).bars_range() # vl = r - l # if ev.delta() > 0 and vl <= _min_points_to_show: diff --git a/piker/ui/_render.py b/piker/ui/_render.py index e44251e1..564c7b5c 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -129,12 +129,18 @@ def render_baritems( ds_r, curve = self._render_table + # print( + # f'r: {r.fmtr.xy_slice}\n' + # f'ds_r: {ds_r.fmtr.xy_slice}\n' + # ) + # do checks for whether or not we require downsampling: # - if we're **not** downsampling then we simply want to # render the bars graphics curve and update.. # - if instead we are in a downsamplig state then we to x_gt = 6 uppx = curve.x_uppx() + print(f'BARS UPPX: {uppx}') in_line = should_line = curve.isVisible() if ( in_line @@ -241,9 +247,6 @@ class Viz(msgspec.Struct): # , frozen=True): # to the underlying shm ref after startup? # _shm: Optional[ShmArray] = None # currently, may be filled in "later" - # last read from shm (usually due to an update call) - _last_read: Optional[np.ndarray] = None - # cache of y-range values per x-range input. _mxmns: dict[tuple[int, int], tuple[float, float]] = {} @@ -263,6 +266,8 @@ class Viz(msgspec.Struct): # , frozen=True): if no range can be determined (yet). ''' + # TODO: hash the slice instead maybe? + # https://stackoverflow.com/a/29980872 rkey = (lbar, rbar) cached_result = self._mxmns.get(rkey) if cached_result: @@ -274,14 +279,27 @@ class Viz(msgspec.Struct): # , frozen=True): arr = shm.array - # build relative indexes into shm array + # get relative slice indexes into array + ( + abs_slc, + read_slc, + mask, + ) = self.flume.slice_from_time( + arr, + start_t=lbar, + stop_t=rbar, + ) + # TODO: should we just add/use a method # on the shm to do this? - ifirst = arr[0]['index'] - slice_view = arr[ - lbar - ifirst: - (rbar - ifirst) + 1 - ] + + # ifirst = arr[0]['index'] + # slice_view = arr[ + # lbar - ifirst: + # (rbar - ifirst) + 1 + # ] + + slice_view = arr[mask] if not slice_view.size: return None @@ -321,9 +339,18 @@ class Viz(msgspec.Struct): # , frozen=True): vr.right(), ) + def bars_range(self) -> tuple[int, int, int, int]: + ''' + Return a range tuple for the bars present in view. + + ''' + start, l, datum_start, datum_stop, r, stop = self.datums_range() + return l, datum_start, datum_stop, r + def datums_range( self, - index_field: str = 'index', + index_field: str = 'time', + ) -> tuple[ int, int, int, int, int, int ]: @@ -335,26 +362,39 @@ class Viz(msgspec.Struct): # , frozen=True): l = round(l) r = round(r) - # TODO: avoid this and have shm passed - # in earlier. - if self.shm is None: - # haven't initialized the viz yet - return (0, l, 0, 0, r, 0) + # # TODO: avoid this and have shm passed + # # in earlier. + # if self.shm is None: + # # haven't initialized the viz yet + # return (0, l, 0, 0, r, 0) array = self.shm.array - index = array['index'] + index = array[index_field] start = index[0] - end = index[-1] - lbar = max(l, start) - rbar = min(r, end) + stop = index[-1] + datum_start = max(l, start) + datum_stop = min(r, stop) return ( - start, l, lbar, rbar, r, end, + start, + l, # left x-in-view + datum_start, + datum_stop, + r, # right-x-in-view + stop, ) + def bars_range(self) -> tuple[int, int, int, int]: + ''' + Return a range tuple for the bars present in view. + + ''' + start, l, datum_start, datum_stop, r, stop = self.datums_range() + return l, datum_start, datum_stop, r + def read( self, array_field: Optional[str] = None, - index_field: str = 'index', + index_field: str = 'time', ) -> tuple[ int, int, np.ndarray, @@ -370,30 +410,59 @@ class Viz(msgspec.Struct): # , frozen=True): # readable data array = self.shm.array - indexes = array[index_field] - ifirst = indexes[0] - ilast = indexes[-1] + # indexes = array[index_field] + # ifirst = indexes[0] + # ilast = indexes[-1] - ifirst, l, lbar, rbar, r, ilast = self.datums_range() + ( + ifirst, + l, + lbar, + rbar, + r, + ilast, + ) = self.datums_range(index_field=index_field) + ( + abs_slc, + read_slc, + mask, + ) = self.flume.slice_from_time( + array, + start_t=lbar, + stop_t=rbar, + ) + + # ( + # abs_slc, + # read_slc, + # in_view, + # ) = self.flume.view_data( + # self.plot, + # ) # get read-relative indices adjusting # for master shm index. - lbar_i = max(l, ifirst) - ifirst - rbar_i = min(r, ilast) - ifirst + # lbar_i = max(l, ifirst) - ifirst + # rbar_i = min(r, ilast) - ifirst + in_view = array[read_slc] if array_field: array = array[array_field] # TODO: we could do it this way as well no? # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] - in_view = array[lbar_i: rbar_i + 1] + # in_view = array[lbar_i: rbar_i + 1] return ( # abs indices + full data set - ifirst, ilast, array, + abs_slc.start, + abs_slc.stop, + array, - # relative indices + in view datums - lbar_i, rbar_i, in_view, + # relative (read) indices + in view data + read_slc.start, + read_slc.stop, + in_view, ) def update_graphics( @@ -495,6 +564,9 @@ class Viz(msgspec.Struct): # , frozen=True): ), ) + if isinstance(graphics, StepCurve): + slice_to_head = -2 + # ``Curve`` derivative case(s): array_key = array_key or self.name # print(array_key) @@ -682,6 +754,21 @@ class Viz(msgspec.Struct): # , frozen=True): # print(f'updating NOT DS curve {self.name}') g.update() + def curve_width_pxs( + self, + ) -> float: + ''' + + Return the width of the current datums in view in pixel units. + ''' + _, lbar, rbar, _ = self.bars_range() + return self.view.mapViewToDevice( + QLineF( + lbar, 0, + rbar, 0 + ) + ).length() + class Renderer(msgspec.Struct): From be21f9829e70b2d151d688ab1179236bdeb37037 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 28 Nov 2022 12:58:47 -0500 Subject: [PATCH 09/96] Pepper render routines with time-slice calls --- piker/ui/_render.py | 115 ++++++++++++++++++++------------------------ 1 file changed, 53 insertions(+), 62 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 564c7b5c..23cbb7de 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -140,7 +140,7 @@ def render_baritems( # - if instead we are in a downsamplig state then we to x_gt = 6 uppx = curve.x_uppx() - print(f'BARS UPPX: {uppx}') + # print(f'BARS UPPX: {uppx}') in_line = should_line = curve.isVisible() if ( in_line @@ -280,26 +280,25 @@ class Viz(msgspec.Struct): # , frozen=True): arr = shm.array # get relative slice indexes into array - ( - abs_slc, - read_slc, - mask, - ) = self.flume.slice_from_time( - arr, - start_t=lbar, - stop_t=rbar, - ) + # ( + # abs_slc, + # read_slc, + # mask, + # ) = self.flume.slice_from_time( + # arr, + # start_t=lbar, + # stop_t=rbar, + # ) + # slice_view = arr[mask] # TODO: should we just add/use a method # on the shm to do this? - # ifirst = arr[0]['index'] - # slice_view = arr[ - # lbar - ifirst: - # (rbar - ifirst) + 1 - # ] - - slice_view = arr[mask] + ifirst = arr[0]['index'] + slice_view = arr[ + lbar - ifirst: + (rbar - ifirst) + 1 + ] if not slice_view.size: return None @@ -349,7 +348,7 @@ class Viz(msgspec.Struct): # , frozen=True): def datums_range( self, - index_field: str = 'time', + index_field: str = 'index', ) -> tuple[ int, int, int, int, int, int @@ -359,11 +358,11 @@ class Viz(msgspec.Struct): # , frozen=True): ''' l, r = self.view_range() - l = round(l) - r = round(r) - # # TODO: avoid this and have shm passed - # # in earlier. + if index_field == 'index': + l, r = round(l), round(r) + + # # TODO: avoid this and have shm passed in earlier? # if self.shm is None: # # haven't initialized the viz yet # return (0, l, 0, 0, r, 0) @@ -383,22 +382,14 @@ class Viz(msgspec.Struct): # , frozen=True): stop, ) - def bars_range(self) -> tuple[int, int, int, int]: - ''' - Return a range tuple for the bars present in view. - - ''' - start, l, datum_start, datum_stop, r, stop = self.datums_range() - return l, datum_start, datum_stop, r - def read( self, array_field: Optional[str] = None, - index_field: str = 'time', + index_field: str = 'index', ) -> tuple[ - int, int, np.ndarray, - int, int, np.ndarray, + int, int, np.ndarray, + int, int, np.ndarray, ]: ''' Read the underlying shm array buffer and @@ -410,10 +401,6 @@ class Viz(msgspec.Struct): # , frozen=True): # readable data array = self.shm.array - # indexes = array[index_field] - # ifirst = indexes[0] - # ilast = indexes[-1] - ( ifirst, l, @@ -423,36 +410,40 @@ class Viz(msgspec.Struct): # , frozen=True): ilast, ) = self.datums_range(index_field=index_field) - ( - abs_slc, - read_slc, - mask, - ) = self.flume.slice_from_time( - array, - start_t=lbar, - stop_t=rbar, - ) + abs_slc = slice(ifirst, ilast) - # ( - # abs_slc, - # read_slc, - # in_view, - # ) = self.flume.view_data( - # self.plot, - # ) - # get read-relative indices adjusting - # for master shm index. - # lbar_i = max(l, ifirst) - ifirst - # rbar_i = min(r, ilast) - ifirst - in_view = array[read_slc] + # TODO: support time slicing + if index_field == 'time': + ( + abs_slc, + read_slc, + mask, + ) = self.flume.slice_from_time( + array, + start_t=lbar, + stop_t=rbar, + ) + in_view = array[read_slc] + + # array-index slicing + # TODO: can we do time based indexing using arithmetic presuming + # a uniform time stamp step size? + else: + # get read-relative indices adjusting for master shm index. + lbar_i = max(l, ifirst) - ifirst + rbar_i = min(r, ilast) - ifirst + + # NOTE: the slice here does NOT include the extra ``+ 1`` + # BUT the ``in_view`` slice DOES.. + read_slc = slice(lbar_i, rbar_i) + in_view = array[lbar_i: rbar_i + 1] + + # XXX: same as ^ + # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] if array_field: array = array[array_field] - # TODO: we could do it this way as well no? - # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] - # in_view = array[lbar_i: rbar_i + 1] - return ( # abs indices + full data set abs_slc.start, From 696c6f88978edfd1a238d03407090e8375ffc6e7 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 28 Nov 2022 13:35:38 -0500 Subject: [PATCH 10/96] First attempt, field-index agnostic formatting Remove harcoded `'index'` field refs from all formatters in a first attempt at moving towards epoch-time alignment (though don't actually use it it yet). Adjustments to the formatter interface: - property for `.xy_nd` the x/y nd arrays. - property for and `.xy_slice` the nd format array(s) start->stop index slice. Internal routine tweaks: - drop `read_src_from_key` and always pass full source array on updates and adjust handlers to expect to have to index the data field of interest. - set `.last_read` right after update calls instead of after 1d conversion. - drop `slice_to_head` array read slicing. - add some debug points for testing 'time' indexing (though not used here yet). - add `.x_nd` array update logic for when the `.index_field` is not 'index' - i.e. when we begin to try and support epoch time. - simplify some new y_nd updates to not require use of `np.broadcast()` where possible. --- piker/data/_pathops.py | 208 ++++++++++++++++++++++++++--------------- 1 file changed, 132 insertions(+), 76 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 850f3a0b..f02d0345 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -28,14 +28,12 @@ from msgspec import field import numpy as np from numpy.lib import recfunctions as rfn from numba import ( - types, + # types, njit, float64, int64, - optional, + # optional, ) -from numba.core.types.misc import StringLiteral -# from numba.extending import as_numba_type from ._sharedmem import ( ShmArray, @@ -78,6 +76,40 @@ class IncrementalFormatter(msgspec.Struct): def last_read(self) -> tuple | None: return self._last_read + # Incrementally updated xy ndarray formatted data, a pre-1d + # format which is updated and cached independently of the final + # pre-graphics-path 1d format. + x_nd: Optional[np.ndarray] = None + y_nd: Optional[np.ndarray] = None + + @property + def xy_nd(self) -> tuple[np.ndarray, np.ndarray]: + return ( + self.x_nd[self.xy_slice], + self.y_nd[self.xy_slice], + ) + + @property + def xy_slice(self) -> slice: + return slice( + self.xy_nd_start, + self.xy_nd_stop, + ) + + # indexes which slice into the above arrays (which are allocated + # based on source data shm input size) and allow retrieving + # incrementally updated data. + xy_nd_start: int = 0 + xy_nd_stop: int = 0 + + # TODO: eventually incrementally update 1d-pre-graphics path data? + # x_1d: Optional[np.ndarray] = None + # y_1d: Optional[np.ndarray] = None + + # incremental view-change state(s) tracking + _last_vr: tuple[float, float] | None = None + _last_ivdr: tuple[float, float] | None = None + def __repr__(self) -> str: msg = ( f'{type(self)}: ->\n\n' @@ -87,8 +119,8 @@ class IncrementalFormatter(msgspec.Struct): f'last_vr={self._last_vr}\n' f'last_ivdr={self._last_ivdr}\n\n' - f'xy_nd_start={self.xy_nd_start}\n' - f'xy_nd_stop={self.xy_nd_stop}\n\n' + f'xy_slice={self.xy_slice}\n' + # f'xy_nd_stop={self.xy_nd_stop}\n\n' ) x_nd_len = 0 @@ -138,6 +170,12 @@ class IncrementalFormatter(msgspec.Struct): prepend_length = int(last_xfirst - xfirst) append_length = int(xlast - last_xlast) + if ( + prepend_length < 0 + or append_length < 0 + ): + breakpoint() + # blah blah blah # do diffing for prepend, append and last entry return ( @@ -147,26 +185,6 @@ class IncrementalFormatter(msgspec.Struct): slice(last_xlast, xlast), ) - # Incrementally updated xy ndarray formatted data, a pre-1d - # format which is updated and cached independently of the final - # pre-graphics-path 1d format. - x_nd: Optional[np.ndarray] = None - y_nd: Optional[np.ndarray] = None - - # indexes which slice into the above arrays (which are allocated - # based on source data shm input size) and allow retrieving - # incrementally updated data. - xy_nd_start: int = 0 - xy_nd_stop: int = 0 - - # TODO: eventually incrementally update 1d-pre-graphics path data? - # x_1d: Optional[np.ndarray] = None - # y_1d: Optional[np.ndarray] = None - - # incremental view-change state(s) tracking - _last_vr: tuple[float, float] | None = None - _last_ivdr: tuple[float, float] | None = None - def _track_inview_range( self, view_range: tuple[int, int], @@ -245,18 +263,18 @@ class IncrementalFormatter(msgspec.Struct): if self.y_nd is None: # we first need to allocate xy data arrays # from the source data. + self.xy_nd_start = shm._first.value + self.xy_nd_stop = shm._last.value self.x_nd, self.y_nd = self.allocate_xy_nd( shm, array_key, ) - self.xy_nd_start = shm._first.value - self.xy_nd_stop = shm._last.value profiler('allocated xy history') if prepend_len: y_prepend = shm._array[pre_slice] - if read_src_from_key: - y_prepend = y_prepend[array_key] + # if read_src_from_key: + # y_prepend = y_prepend[array_key] ( new_y_nd, @@ -293,8 +311,8 @@ class IncrementalFormatter(msgspec.Struct): if append_len: y_append = shm._array[post_slice] - if read_src_from_key: - y_append = y_append[array_key] + # if read_src_from_key: + # y_append = y_append[array_key] ( new_y_nd, @@ -315,14 +333,16 @@ class IncrementalFormatter(msgspec.Struct): # self.y_nd[post_slice] = new_y_nd # self.y_nd[xy_slice or post_slice] = xy_data self.y_nd[y_nd_slc] = new_y_nd - # if read_src_from_key: - # y_nd_view[:][array_key] = new_y_nd - # else: - # y_nd_view[:] = new_y_nd self.xy_nd_stop = shm._last.value profiler('appened xy history: {append_length}') + # TODO: eventually maybe we can implement some kind of + # transform on the ``QPainterPath`` that will more or less + # detect the diff in "elements" terms? + # update diff state since we've now rendered paths. + self._last_read = new_read + view_changed: bool = False view_range: tuple[int, int] = (ivl, ivr) if slice_to_inview: @@ -330,11 +350,14 @@ class IncrementalFormatter(msgspec.Struct): array = in_view profiler(f'{self.viz.name} view range slice {view_range}') - hist = array[:slice_to_head] + # hist = array[:slice_to_head] + + # XXX: WOA WTF TRACTOR DEBUGGING BUGGG + # assert 0 # xy-path data transform: convert source data to a format # able to be passed to a `QPainterPath` rendering routine. - if not len(hist): + if not len(array): # XXX: this might be why the profiler only has exits? return @@ -342,7 +365,7 @@ class IncrementalFormatter(msgspec.Struct): # x/y_data in the case where allocate_xy is # defined? x_1d, y_1d, connect = self.format_xy_nd_to_1d( - hist, + array, array_key, view_range, ) @@ -370,13 +393,10 @@ class IncrementalFormatter(msgspec.Struct): if len(x_1d): self._last_ivdr = x_1d[0], x_1d[slice_to_head] - # TODO: eventually maybe we can implement some kind of - # transform on the ``QPainterPath`` that will more or less - # detect the diff in "elements" terms? - # update diff state since we've now rendered paths. - self._last_read = new_read - profiler('.format_to_1d()') + if (x_1d[-1] == 0.5).any(): + breakpoint() + return ( x_1d, y_1d, @@ -430,21 +450,22 @@ class IncrementalFormatter(msgspec.Struct): nd_stop: int, is_append: bool, - index_field: str = 'index', ) -> tuple[ np.ndarray, slice, ]: # write pushed data to flattened copy - new_y_nd = new_from_src + new_y_nd = new_from_src[data_field] # XXX # TODO: this should be returned and written by caller! # XXX - # generate same-valued-per-row x support based on y shape + # generate same-valued-per-row x support with Nx1 shape + index_field = self.index_field if index_field != 'index': - self.x_nd[read_slc, :] = new_from_src[index_field] + x_nd_new = self.x_nd[read_slc] + x_nd_new[:] = new_from_src[index_field] return new_y_nd, read_slc @@ -576,6 +597,7 @@ class OHLCBarsFmtr(IncrementalFormatter): low = q['low'] close = q['close'] # index = float64(q[index_field]) + # index = float64(q['time']) index = float64(q['index']) istart = i * 6 @@ -652,7 +674,6 @@ class OHLCBarsFmtr(IncrementalFormatter): nd_stop: int, is_append: bool, - index_field: str = 'index', ) -> tuple[ np.ndarray, @@ -668,8 +689,13 @@ class OHLCBarsFmtr(IncrementalFormatter): # TODO: this should be returned and written by caller! # XXX # generate same-valued-per-row x support based on y shape + index_field: str = self.index_field if index_field != 'index': - self.x_nd[read_slc, :] = new_from_src[index_field] + x_nd_new = self.x_nd[read_slc] + x_nd_new[:] = new_from_src[index_field][:, np.newaxis] + + if (self.x_nd[self.xy_slice] == 0.5).any(): + breakpoint() return new_y_nd, read_slc @@ -715,8 +741,6 @@ class StepCurveFmtr(IncrementalFormatter): shm: ShmArray, data_field: str, - index_field: str = 'index', - ) -> tuple[ np.ndarray, # x np.nd.array # y @@ -734,11 +758,17 @@ class StepCurveFmtr(IncrementalFormatter): (i.size, 2), ) + np.array([-0.5, 0.5]) - y_out = np.empty((len(out), 2), dtype=out.dtype) + # fill out Nx2 array to hold each step's left + right vertices. + y_out = np.empty( + # (len(out), 2), + x_out.shape, + dtype=out.dtype, + ) + # fill in (current) values from source shm buffer y_out[:] = out[:, np.newaxis] # start y at origin level - y_out[0, 0] = 0 + y_out[self.xy_nd_start] = 0 return x_out, y_out def incr_update_xy_nd( @@ -747,12 +777,12 @@ class StepCurveFmtr(IncrementalFormatter): src_shm: ShmArray, array_key: str, - src_update: np.ndarray, # portion of source that was updated - slc: slice, + new_from_src: np.ndarray, # portion of source that was updated + read_slc: slice, ln: int, # len of updated - first: int, - last: int, + nd_start: int, + nd_stop: int, is_append: bool, @@ -763,20 +793,29 @@ class StepCurveFmtr(IncrementalFormatter): # for a step curve we slice from one datum prior # to the current "update slice" to get the previous # "level". - if is_append: - start = max(last - 1, 0) - end = src_shm._last.value - new_y = src_shm._array[start:end][array_key] - slc = slice(start, end) + # if is_append: + # start = max(last - 1, 0) + # end = src_shm._last.value + # new_y = src_shm._array[start:end][array_key] + # append_slc = slice(start, end) - else: - new_y = src_update + new_y = new_from_src[array_key][:, np.newaxis] + + index_field = self.index_field + if index_field != 'index': + x_nd_new = self.x_nd[read_slc] + x_nd_new[:] = new_from_src[index_field][:, np.newaxis] + + if (self.x_nd[self.xy_slice][-1] == 0.5).any(): + breakpoint() return ( - np.broadcast_to( - new_y[:, None], (new_y.size, 2), - ), - slc, + new_y, + # np.broadcast_to( + # new_x[:, None], + # (new_y.size, 2), + # ), + read_slc, ) def format_xy_nd_to_1d( @@ -791,23 +830,40 @@ class StepCurveFmtr(IncrementalFormatter): np.ndarray, str, ]: - lasts = array[['index', array_key]] - last = lasts[array_key][-1] + last_t, last = array[-1][[self.index_field, array_key]] + + start = self.xy_nd_start # 2 more datum-indexes to capture zero at end - x_step = self.x_nd[self.xy_nd_start:self.xy_nd_stop+2] - y_step = self.y_nd[self.xy_nd_start:self.xy_nd_stop+2] + # XXX: can we drop this ``extra`` bit? + extra = 2 + stop = self.xy_nd_stop + extra + + x_step = self.x_nd[start:stop] + y_step = self.y_nd[start:stop] + + # if (x_step[-1] == 0.5).any(): + # breakpoint() + + # pack in duplicate final value to complete last step level + x_step[-1] = last_t y_step[-1] = last # slice out in-view data ivl, ivr = vr - ys_iv = y_step[ivl:ivr+1] - xs_iv = x_step[ivl:ivr+1] + # ys_iv = y_step[ivl:ivr+1] + # xs_iv = x_step[ivl:ivr+1] + ys_iv = y_step[ivl:ivr] + xs_iv = x_step[ivl:ivr] # flatten to 1d y_iv = ys_iv.reshape(ys_iv.size) x_iv = xs_iv.reshape(xs_iv.size) + if (x_iv[-1] == 0.5).any(): + breakpoint() + + # s = 100 # print( # f'ys_iv : {ys_iv[-s:]}\n' # f'y_iv: {y_iv[-s:]}\n' From 166d14af69c821e83a6c25beec37f035bf8ddf96 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 29 Nov 2022 09:05:06 -0500 Subject: [PATCH 11/96] Simplify formatter update methodology Don't expect values (array + slice) to be returned and applied by `.incr_update_xy_nd()` and instead presume this will implemented internally in each (sub)formatter. Attempt to simplify some incr-update routines, (particularly in the step curve formatter, though most of it was reverted to just a simpler form of the original implementation XD) including: - dropping the need for the `slice_to_head: int` control. - using the `xy_nd_start/stop` index counters over custom lookups. --- piker/data/_pathops.py | 151 +++++++++++++++++++---------------------- 1 file changed, 68 insertions(+), 83 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index f02d0345..a28be306 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -272,15 +272,7 @@ class IncrementalFormatter(msgspec.Struct): profiler('allocated xy history') if prepend_len: - y_prepend = shm._array[pre_slice] - # if read_src_from_key: - # y_prepend = y_prepend[array_key] - - ( - new_y_nd, - y_nd_slc, - - ) = self.incr_update_xy_nd( + self.incr_update_xy_nd( shm, array_key, @@ -290,7 +282,7 @@ class IncrementalFormatter(msgspec.Struct): # step curves) the updater routine may want to do # the source history-data reading itself, so we pass # both here. - y_prepend, + shm._array[pre_slice], pre_slice, prepend_len, @@ -299,30 +291,16 @@ class IncrementalFormatter(msgspec.Struct): is_append=False, ) - # y_nd_view = self.y_nd[y_nd_slc] - self.y_nd[y_nd_slc] = new_y_nd - # if read_src_from_key: - # y_nd_view[:][array_key] = new_y_nd - # else: - # y_nd_view[:] = new_y_nd - + # self.y_nd[y_nd_slc] = new_y_nd self.xy_nd_start = shm._first.value profiler('prepended xy history: {prepend_length}') if append_len: - y_append = shm._array[post_slice] - # if read_src_from_key: - # y_append = y_append[array_key] - - ( - new_y_nd, - y_nd_slc, - - ) = self.incr_update_xy_nd( + self.incr_update_xy_nd( shm, array_key, - y_append, + shm._array[post_slice], post_slice, append_len, @@ -330,10 +308,6 @@ class IncrementalFormatter(msgspec.Struct): self.xy_nd_stop, is_append=True, ) - # self.y_nd[post_slice] = new_y_nd - # self.y_nd[xy_slice or post_slice] = xy_data - self.y_nd[y_nd_slc] = new_y_nd - self.xy_nd_stop = shm._last.value profiler('appened xy history: {append_length}') @@ -392,10 +366,10 @@ class IncrementalFormatter(msgspec.Struct): # update the last "in view data range" if len(x_1d): self._last_ivdr = x_1d[0], x_1d[slice_to_head] + if (x_1d[-1] == 0.5).any(): + breakpoint() profiler('.format_to_1d()') - if (x_1d[-1] == 0.5).any(): - breakpoint() return ( x_1d, @@ -451,10 +425,7 @@ class IncrementalFormatter(msgspec.Struct): is_append: bool, - ) -> tuple[ - np.ndarray, - slice, - ]: + ) -> None: # write pushed data to flattened copy new_y_nd = new_from_src[data_field] @@ -467,7 +438,7 @@ class IncrementalFormatter(msgspec.Struct): x_nd_new = self.x_nd[read_slc] x_nd_new[:] = new_from_src[index_field] - return new_y_nd, read_slc + self.y_nd[read_slc] = new_y_nd # XXX: was ``.format_xy()`` def format_xy_nd_to_1d( @@ -489,8 +460,8 @@ class IncrementalFormatter(msgspec.Struct): ''' return ( - array[self.index_field], - array[array_key], + array[self.index_field][:-1], + array[array_key][:-1], # 1d connection array or style-key to # ``pg.functions.arrayToQPath()`` @@ -675,10 +646,7 @@ class OHLCBarsFmtr(IncrementalFormatter): is_append: bool, - ) -> tuple[ - np.ndarray, - slice, - ]: + ) -> None: # write newly pushed data to flattened copy # a struct-arr is always passed in. new_y_nd = rfn.structured_to_unstructured( @@ -697,7 +665,7 @@ class OHLCBarsFmtr(IncrementalFormatter): if (self.x_nd[self.xy_slice] == 0.5).any(): breakpoint() - return new_y_nd, read_slc + self.y_nd[read_slc] = new_y_nd class OHLCBarsAsCurveFmtr(OHLCBarsFmtr): @@ -767,8 +735,10 @@ class StepCurveFmtr(IncrementalFormatter): # fill in (current) values from source shm buffer y_out[:] = out[:, np.newaxis] + # TODO: pretty sure we can drop this? # start y at origin level - y_out[self.xy_nd_start] = 0 + # y_out[0, 0] = 0 + # y_out[self.xy_nd_start] = 0 return x_out, y_out def incr_update_xy_nd( @@ -793,13 +763,29 @@ class StepCurveFmtr(IncrementalFormatter): # for a step curve we slice from one datum prior # to the current "update slice" to get the previous # "level". - # if is_append: - # start = max(last - 1, 0) - # end = src_shm._last.value - # new_y = src_shm._array[start:end][array_key] - # append_slc = slice(start, end) + last_2 = slice( + read_slc.start, + read_slc.stop+1, + ) + y_nd_new = self.y_nd[last_2] + y_nd_new[:] = src_shm._array[last_2][array_key][:, None] - new_y = new_from_src[array_key][:, np.newaxis] + # NOTE: we can't use the append slice since we need to "look + # forward" one step to get the current level and copy it as + # well? (though i still don't really grok why..) + # y_nd_new[:] = new_from_src[array_key][:, None] + + # XXX: old approach now duplicated above (we can probably drop + # this since the key part was the ``nd_stop + 1`` + # if is_append: + # start = max(nd_stop - 1, 0) + # end = src_shm._last.value + # y_nd_new = src_shm._array[start:end][array_key]#[:, np.newaxis] + # slc = slice(start, end) + # self.y_nd[slc] = np.broadcast_to( + # y_nd_new[:, None], + # (y_nd_new.size, 2), + # ) index_field = self.index_field if index_field != 'index': @@ -809,15 +795,6 @@ class StepCurveFmtr(IncrementalFormatter): if (self.x_nd[self.xy_slice][-1] == 0.5).any(): breakpoint() - return ( - new_y, - # np.broadcast_to( - # new_x[:, None], - # (new_y.size, 2), - # ), - read_slc, - ) - def format_xy_nd_to_1d( self, @@ -833,11 +810,7 @@ class StepCurveFmtr(IncrementalFormatter): last_t, last = array[-1][[self.index_field, array_key]] start = self.xy_nd_start - - # 2 more datum-indexes to capture zero at end - # XXX: can we drop this ``extra`` bit? - extra = 2 - stop = self.xy_nd_stop + extra + stop = self.xy_nd_stop x_step = self.x_nd[start:stop] y_step = self.y_nd[start:stop] @@ -846,32 +819,44 @@ class StepCurveFmtr(IncrementalFormatter): # breakpoint() # pack in duplicate final value to complete last step level - x_step[-1] = last_t - y_step[-1] = last + # x_step[-1] = last_t + # y_step[-1] = last + # x_step[-1, 1] = last_t + y_step[-1, 1] = last + + # if y_step.any(): + # s = 3 + # print( + # f'x_step:\n{x_step[-s:]}\n' + # f'y_step:\n{y_step[-s:]}\n\n' + # ) # slice out in-view data ivl, ivr = vr - # ys_iv = y_step[ivl:ivr+1] - # xs_iv = x_step[ivl:ivr+1] - ys_iv = y_step[ivl:ivr] - xs_iv = x_step[ivl:ivr] + # TODO: WHY do we need the extra +1 index? + x_step_iv = x_step[ivl:ivr+1] + y_step_iv = y_step[ivl:ivr+1] # flatten to 1d - y_iv = ys_iv.reshape(ys_iv.size) - x_iv = xs_iv.reshape(xs_iv.size) + x_1d = x_step_iv.reshape(x_step_iv.size) + y_1d = y_step_iv.reshape(y_step_iv.size) - if (x_iv[-1] == 0.5).any(): + if not x_1d.size == y_1d.size: breakpoint() - # s = 100 - # print( - # f'ys_iv : {ys_iv[-s:]}\n' - # f'y_iv: {y_iv[-s:]}\n' - # f'xs_iv: {xs_iv[-s:]}\n' - # f'x_iv: {x_iv[-s:]}\n' - # ) + if x_1d.any() and (x_1d[-1] == 0.5).any(): + breakpoint() - return x_iv, y_iv, 'all' + # if y_1d.any(): + # s = 6 + # print( + # f'x_step_iv:\n{x_step_iv[-s:]}\n' + # f'y_step_iv:\n{y_step_iv[-s:]}\n\n' + # f'x_1d:\n{x_1d[-s:]}\n' + # f'y_1d:\n{y_1d[-s:]}\n' + # ) + + return x_1d, y_1d, 'all' def xy_downsample( From 9133103f8ff9f15628a7a331779757e51eacaad2 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 29 Nov 2022 10:56:17 -0500 Subject: [PATCH 12/96] Attempt to make `.default_view()` time-index ready As in make the call to `Flume.slice_from_time()` to try and convert any time index values from the view range to array-indices; all untested atm. Also drop some old/unused/moved methods: - `._set_xlimits()` - `.bars_range()` - `.curve_width_pxs()` and fix some `flow` -> `viz` var naming. --- piker/ui/_chart.py | 180 +++++++++++++++++++-------------------------- 1 file changed, 75 insertions(+), 105 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 2820a0f1..1db354bf 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -60,7 +60,7 @@ from ._style import ( hcolor, CHART_MARGINS, _xaxis_at, - _min_points_to_show, + # _min_points_to_show, ) from ..data.feed import ( Feed, @@ -810,6 +810,8 @@ class LinkedSplits(QWidget): self.chart.sidepane.setMinimumWidth(sp_w) +# TODO: we should really drop using this type and instead just +# write our own wrapper around `PlotItem`.. class ChartPlotWidget(pg.PlotWidget): ''' ``GraphicsView`` subtype containing a single ``PlotItem``. @@ -921,7 +923,7 @@ class ChartPlotWidget(pg.PlotWidget): # show background grid self.showGrid(x=False, y=True, alpha=0.3) - self.cv.enable_auto_yrange() + # self.cv.enable_auto_yrange() self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem) @@ -951,41 +953,10 @@ class ChartPlotWidget(pg.PlotWidget): def focus(self) -> None: self.view.setFocus() - def _set_xlimits( - self, - xfirst: int, - xlast: int - ) -> None: - """Set view limits (what's shown in the main chart "pane") - based on max/min x/y coords. - """ - self.setLimits( - xMin=xfirst, - xMax=xlast, - minXRange=_min_points_to_show, - ) - def view_range(self) -> tuple[int, int]: vr = self.viewRect() return int(vr.left()), int(vr.right()) - def bars_range(self) -> tuple[int, int, int, int]: - ''' - Return a range tuple for the bars present in view. - - ''' - main_flow = self._vizs[self.name] - ifirst, l, lbar, rbar, r, ilast = main_flow.datums_range() - return l, lbar, rbar, r - - def curve_width_pxs( - self, - ) -> float: - _, lbar, rbar, _ = self.bars_range() - return self.view.mapViewToDevice( - QLineF(lbar, 0, rbar, 0) - ).length() - def pre_l1_xs(self) -> tuple[float, float]: ''' Return the view x-coord for the value just before @@ -1038,59 +1009,88 @@ class ChartPlotWidget(pg.PlotWidget): Set the view box to the "default" startup view of the scene. ''' - flow = self._vizs.get(self.name) - if not flow: + viz = self.get_viz(self.name) + if not viz: log.warning(f'`Viz` for {self.name} not loaded yet?') return - arr = flow.shm.array - index = arr['index'] - # times = arr['time'] + ( + _, + l, + datum_start, + datum_stop, + r, + _, + ) = viz.datums_range() - # these will be epoch time floats - xfirst, xlast = index[0], index[-1] - l, lbar, rbar, r = self.bars_range() + array = viz.shm.array + index_field = viz.index_field - view = self.view + if index_field == 'time': + vr = viz.plot.viewRect() + ( + abs_slc, + read_slc, + mask, + + ) = viz.flume.slice_from_time( + array, + start_t=vr.left(), + stop_t=vr.right(), + ) + iv_arr = array[mask] + index = iv_arr['index'] + + else: + index = array['index'] + + # these must be array-index-ints (hence the slice from time + # above). + x_start, x_stop = index[0], index[-1] + view: ChartView = viz.plot.vb if ( - rbar < 0 - or l < xfirst + datum_stop < 0 + or l < x_start or l < 0 - or (rbar - lbar) < 6 + or (datum_stop - datum_start) < 6 ): - # TODO: set fixed bars count on screen that approx includes as - # many bars as possible before a downsample line is shown. - begin = xlast - bars_from_y + begin = x_stop - bars_from_y view.setXRange( min=begin, - max=xlast, + max=x_stop, padding=0, ) # re-get range - l, lbar, rbar, r = self.bars_range() + l, datum_start, datum_stop, r = viz.bars_range() # we get the L1 spread label "length" in view coords # terms now that we've scaled either by user control # or to the default set of bars as per the immediate block # above. + debug_msg = ( + f'x_stop: {x_stop}\n' + ) + if not y_offset: marker_pos, l1_len = self.pre_l1_xs() - end = xlast + l1_len + 1 + end = x_stop + l1_len + 1 + + debug_msg += ( + f'marker pos: {marker_pos}\n' + f'l1 len: {l1_len}\n' + ) + else: - end = xlast + y_offset + 1 + end = x_stop + y_offset + 1 begin = end - (r - l) - # for debugging - # print( - # # f'bars range: {brange}\n' - # f'xlast: {xlast}\n' - # f'marker pos: {marker_pos}\n' - # f'l1 len: {l1_len}\n' - # f'begin: {begin}\n' - # f'end: {end}\n' - # ) + debug_msg += ( + f'end: {end}\n' + f'begin: {begin}\n' + ) + print(debug_msg) # remove any custom user yrange setttings if self._static_yrange == 'axis': @@ -1254,17 +1254,17 @@ class ChartPlotWidget(pg.PlotWidget): **graphics_kwargs, ) - flow = self._vizs[data_key] = Viz( + viz = self._vizs[data_key] = Viz( data_key, pi, shm, flume, is_ohlc=is_ohlc, - # register curve graphics with this flow + # register curve graphics with this viz graphics=graphics, ) - assert isinstance(flow.shm, ShmArray) + assert isinstance(viz.shm, ShmArray) # TODO: this probably needs its own method? if overlay: @@ -1321,7 +1321,7 @@ class ChartPlotWidget(pg.PlotWidget): # understand. pi.addItem(graphics) - return flow + return viz def draw_ohlc( self, @@ -1364,35 +1364,6 @@ class ChartPlotWidget(pg.PlotWidget): **kwargs, ) - # def _label_h(self, yhigh: float, ylow: float) -> float: - # # compute contents label "height" in view terms - # # to avoid having data "contents" overlap with them - # if self._labels: - # label = self._labels[self.name][0] - - # rect = label.itemRect() - # tl, br = rect.topLeft(), rect.bottomRight() - # vb = self.plotItem.vb - - # try: - # # on startup labels might not yet be rendered - # top, bottom = (vb.mapToView(tl).y(), vb.mapToView(br).y()) - - # # XXX: magic hack, how do we compute exactly? - # label_h = (top - bottom) * 0.42 - - # except np.linalg.LinAlgError: - # label_h = 0 - # else: - # label_h = 0 - - # # print(f'label height {self.name}: {label_h}') - - # if label_h > yhigh - ylow: - # label_h = 0 - - # print(f"bounds (ylow, yhigh): {(ylow, yhigh)}") - # TODO: pretty sure we can just call the cursor # directly not? i don't wee why we need special "signal proxies" # for this lul.. @@ -1429,23 +1400,22 @@ class ChartPlotWidget(pg.PlotWidget): # TODO: here we should instead look up the ``Viz.shm.array`` # and read directly from shm to avoid copying to memory first # and then reading it again here. - flow_key = name or self.name - viz = self._vizs.get(flow_key) - if ( - viz is None - ): - log.error(f"viz {flow_key} doesn't exist in chart {self.name} !?") + viz_key = name or self.name + viz = self._vizs.get(viz_key) + if viz is None: + log.error(f"viz {viz_key} doesn't exist in chart {self.name} !?") key = res = 0, 0 else: ( - first, + _, l, lbar, rbar, r, - last, + _, ) = bars_range or viz.datums_range() + profiler(f'{self.name} got bars range') key = round(lbar), round(rbar) @@ -1455,7 +1425,7 @@ class ChartPlotWidget(pg.PlotWidget): res is None ): log.warning( - f"{flow_key} no mxmn for bars_range => {key} !?" + f"{viz_key} no mxmn for bars_range => {key} !?" ) res = 0, 0 if not self._on_screen: @@ -1463,11 +1433,11 @@ class ChartPlotWidget(pg.PlotWidget): self._on_screen = True profiler(f'yrange mxmn: {key} -> {res}') - # print(f'{flow_key} yrange mxmn: {key} -> {res}') + # print(f'{viz_key} yrange mxmn: {key} -> {res}') return res def get_viz( self, key: str, ) -> Viz: - return self._vizs[key] + return self._vizs.get(key) From 344d2eeb9e3be3ce978a9a267c5dee78570a0212 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 29 Nov 2022 10:59:12 -0500 Subject: [PATCH 13/96] Facepalm: pass correct flume to each FSP chart group.. --- piker/ui/_fsp.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index c3b13cbc..8bbb0932 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -833,6 +833,7 @@ async def open_vlm_displays( names: list[str], pi: pg.PlotItem, shm: ShmArray, + flume: Flume, step_mode: bool = False, style: str = 'solid', @@ -872,6 +873,7 @@ async def open_vlm_displays( fields, dvlm_pi, dvlm_flume.rt_shm, + dvlm_flume, step_mode=True, ) @@ -940,6 +942,7 @@ async def open_vlm_displays( trade_rate_fields, tr_pi, fr_flume.rt_shm, + fr_flume, # step_mode=True, # dashed line to represent "individual trades" being From ac1f37a2c26df3d23ba80ef4d6f6aaf08b174d61 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 10:06:20 -0500 Subject: [PATCH 14/96] Expect `index_field: str` in all graphics objects --- piker/data/_compression.py | 11 ++++++++--- piker/ui/_curve.py | 15 ++++++--------- piker/ui/_ohlc.py | 29 ++++++++++++++++------------- 3 files changed, 30 insertions(+), 25 deletions(-) diff --git a/piker/data/_compression.py b/piker/data/_compression.py index aed1d7d3..e6111fca 100644 --- a/piker/data/_compression.py +++ b/piker/data/_compression.py @@ -35,13 +35,17 @@ from ..log import get_logger log = get_logger(__name__) -def hl2mxmn(ohlc: np.ndarray) -> np.ndarray: +def hl2mxmn( + ohlc: np.ndarray, + index_field: str = 'index', + +) -> np.ndarray: ''' Convert a OHLC struct-array containing 'high'/'low' columns to a "joined" max/min 1-d array. ''' - index = ohlc['index'] + index = ohlc[index_field] hls = ohlc[[ 'low', 'high', @@ -109,6 +113,7 @@ def trace_hl( def ohlc_flatten( ohlc: np.ndarray, use_mxmn: bool = True, + index_field: str = 'index', ) -> tuple[np.ndarray, np.ndarray]: ''' @@ -117,7 +122,7 @@ def ohlc_flatten( evenly (by 0.5 steps) over each index. ''' - index = ohlc['index'] + index = ohlc[index_field] if use_mxmn: # traces a line optimally over highs to lows diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 663b328f..8df1cda0 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -360,11 +360,12 @@ class Curve(pg.GraphicsObject): render_data: np.ndarray, reset: bool, array_key: str, + index_field: str, ) -> None: # default line draw last call # with self.reset_cache(): - x = render_data['index'] + x = render_data[index_field] y = render_data[array_key] # draw the "current" step graphic segment so it @@ -391,10 +392,11 @@ class FlattenedOHLC(Curve): render_data: np.ndarray, reset: bool, array_key: str, + index_field: str, ) -> None: lasts = src_data[-2:] - x = lasts['index'] + x = lasts[index_field] y = lasts['close'] # draw the "current" step graphic segment so it @@ -421,6 +423,7 @@ class StepCurve(Curve): render_data: np.ndarray, reset: bool, array_key: str, + index_field: str, w: float = 0.5, @@ -429,7 +432,7 @@ class StepCurve(Curve): # TODO: remove this and instead place all step curve # updating into pre-path data render callbacks. # full input data - x = src_data['index'] + x = src_data[index_field] y = src_data[array_key] x_last = x[-1] @@ -458,9 +461,3 @@ class StepCurve(Curve): # p.drawLines(*tuple(filter(bool, self._last_step_lines))) # p.drawRect(self._last_step_rect) p.fillRect(self._last_step_rect, self._brush) - - # def sub_br( - # self, - # parent_br: QRectF | None = None, - # ) -> QRectF: - # return self._last_step_rect diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index 2ce23d30..98ffcb85 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -59,8 +59,8 @@ def bar_from_ohlc_row( OHLC "bar" for use in the "last datum" of a series. ''' - open, high, low, close, index = row[ - ['open', 'high', 'low', 'close', 'index']] + open, high, low, close, index = row #[fields] + # ['open', 'high', 'low', 'close', 'index']] # TODO: maybe consider using `QGraphicsLineItem` ?? # gives us a ``.boundingRect()`` on the objects which may make @@ -217,30 +217,33 @@ class BarItems(pg.GraphicsObject): render_data: np.ndarray, reset: bool, array_key: str, - - fields: list[str] = [ - 'index', - 'open', - 'high', - 'low', - 'close', - ], + index_field: str, ) -> None: # relevant fields + fields: list[str] = [ + 'open', + 'high', + 'low', + 'close', + index_field, + ] ohlc = src_data[fields] # last_row = ohlc[-1:] # individual values - last_row = i, o, h, l, last = ohlc[-1] + last_row = o, h, l, last, i = ohlc[-1] # times = src_data['time'] # if times[-1] - times[-2]: # breakpoint() # generate new lines objects for updatable "current bar" - self._last_bar_lines = bar_from_ohlc_row(last_row) + self._last_bar_lines = bar_from_ohlc_row( + last_row, + # fields, + ) # assert i == graphics.start_index - 1 # assert i == last_index @@ -270,4 +273,4 @@ class BarItems(pg.GraphicsObject): # because i've seen it do this to bars i - 3 back? # return ohlc['time'], ohlc['close'] - return ohlc['index'], ohlc['close'] + return ohlc[index_field], ohlc['close'] From 702ae29a2ca12d7243e2bedde6849369afc715b6 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 10:14:04 -0500 Subject: [PATCH 15/96] Add `Viz.index_field: str`, pass to graphics objs In an effort to make it easy to override the indexing scheme. Further, this repairs the `.datums_range()` special case to handle when the view box is to-the-right-of the data set (i.e. l > datum_start). --- piker/ui/_render.py | 91 +++++++++++++++++++++++++++++++-------------- 1 file changed, 63 insertions(+), 28 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 23cbb7de..a0e4134f 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -103,6 +103,7 @@ def render_baritems( shm=viz.shm, viz=viz, _last_read=read, + index_field=viz.index_field, ), ) @@ -112,6 +113,7 @@ def render_baritems( shm=viz.shm, viz=viz, _last_read=read, + index_field=viz.index_field, ), ) @@ -230,6 +232,8 @@ class Viz(msgspec.Struct): # , frozen=True): is_ohlc: bool = False render: bool = True # toggle for display loop + _index_field: str = 'index' + # downsampling state _last_uppx: float = 0 _in_ds: bool = False @@ -254,6 +258,10 @@ class Viz(msgspec.Struct): # , frozen=True): def shm(self) -> ShmArray: return self._shm + @property + def index_field(self) -> str: + return self._index_field + def maxmin( self, lbar: int, @@ -280,25 +288,24 @@ class Viz(msgspec.Struct): # , frozen=True): arr = shm.array # get relative slice indexes into array - # ( - # abs_slc, - # read_slc, - # mask, - # ) = self.flume.slice_from_time( - # arr, - # start_t=lbar, - # stop_t=rbar, - # ) - # slice_view = arr[mask] + if self.index_field == 'time': + ( + abs_slc, + read_slc, + mask, + ) = self.flume.slice_from_time( + arr, + start_t=lbar, + stop_t=rbar, + ) + slice_view = arr[mask] - # TODO: should we just add/use a method - # on the shm to do this? - - ifirst = arr[0]['index'] - slice_view = arr[ - lbar - ifirst: - (rbar - ifirst) + 1 - ] + else: + ifirst = arr[0]['index'] + slice_view = arr[ + lbar - ifirst: + (rbar - ifirst) + 1 + ] if not slice_view.size: return None @@ -318,7 +325,7 @@ class Viz(msgspec.Struct): # , frozen=True): yhigh = np.max(view) mxmn = ylow, yhigh - # print(f'{self.name} MANUAL maxmin: {mxmin}') + # print(f'{self.name} MANUAL maxmin: {mxmn}') # cache result for input range assert mxmn @@ -348,7 +355,7 @@ class Viz(msgspec.Struct): # , frozen=True): def datums_range( self, - index_field: str = 'index', + index_field: str | None = None, ) -> tuple[ int, int, int, int, int, int @@ -359,20 +366,30 @@ class Viz(msgspec.Struct): # , frozen=True): ''' l, r = self.view_range() + index_field: str = index_field or self.index_field if index_field == 'index': l, r = round(l), round(r) - # # TODO: avoid this and have shm passed in earlier? - # if self.shm is None: - # # haven't initialized the viz yet - # return (0, l, 0, 0, r, 0) - array = self.shm.array index = array[index_field] start = index[0] stop = index[-1] - datum_start = max(l, start) - datum_stop = min(r, stop) + + if ( + l < 0 + or r < l + or l < start + ): + datum_start = start + datum_stop = stop + else: + datum_start = max(l, start) + datum_stop = r + if l < stop: + datum_stop = min(r, stop) + + assert datum_start < datum_stop + return ( start, l, # left x-in-view @@ -385,7 +402,7 @@ class Viz(msgspec.Struct): # , frozen=True): def read( self, array_field: Optional[str] = None, - index_field: str = 'index', + index_field: str | None = None, ) -> tuple[ int, int, np.ndarray, @@ -398,6 +415,8 @@ class Viz(msgspec.Struct): # , frozen=True): which has been written to. ''' + index_field: str = index_field or self.index_field + # readable data array = self.shm.array @@ -409,6 +428,8 @@ class Viz(msgspec.Struct): # , frozen=True): r, ilast, ) = self.datums_range(index_field=index_field) + # if rbar < lbar: + # breakpoint() abs_slc = slice(ifirst, ilast) @@ -425,6 +446,17 @@ class Viz(msgspec.Struct): # , frozen=True): ) in_view = array[read_slc] + # diff = rbar - lbar + # if ( + # 'btc' in self.name + # and 'hist' not in self.shm.token + # ): + # print( + # f'{self.name}: len(iv) = {len(in_view)}\n' + # f'start/stop: {lbar},{rbar}\n', + # f'diff: {diff}\n', + # ) + # array-index slicing # TODO: can we do time based indexing using arithmetic presuming # a uniform time stamp step size? @@ -534,6 +566,7 @@ class Viz(msgspec.Struct): # , frozen=True): shm=self.shm, viz=self, _last_read=read, + index_field=self.index_field, ), ) @@ -670,6 +703,7 @@ class Viz(msgspec.Struct): # , frozen=True): data, reset, array_key, + index_field=self.index_field, ) graphics.update() profiler('.update()') @@ -707,6 +741,7 @@ class Viz(msgspec.Struct): # , frozen=True): src_array, False, # never reset path array_key, + self.index_field, ) # the renderer is downsampling we choose From 25a75e5bec280435e87594ba5f08bf4c5a3d5c26 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 10:23:21 -0500 Subject: [PATCH 16/96] Fix `.default_view()` to view-left-of-data --- piker/ui/_chart.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 1db354bf..61946873 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1028,6 +1028,12 @@ class ChartPlotWidget(pg.PlotWidget): if index_field == 'time': vr = viz.plot.viewRect() + vtl, vtr = vr.left(), vr.right() + + if vtl < datum_start: + vtl = datum_start + vtr = datum_stop + ( abs_slc, read_slc, @@ -1035,11 +1041,14 @@ class ChartPlotWidget(pg.PlotWidget): ) = viz.flume.slice_from_time( array, - start_t=vr.left(), - stop_t=vr.right(), + start_t=vtl, + stop_t=vtr, ) - iv_arr = array[mask] - index = iv_arr['index'] + iv_arr = array + if mask is not None: + iv_arr = array[mask] + + index = iv_arr['time'] else: index = array['index'] @@ -1358,8 +1367,8 @@ class ChartPlotWidget(pg.PlotWidget): Update the named internal graphics from ``array``. ''' - flow = self._vizs[array_key or graphics_name] - return flow.update_graphics( + viz = self._vizs[array_key or graphics_name] + return viz.update_graphics( array_key=array_key, **kwargs, ) From 2b9ca5f805c27540931319f946c19e89a7c35f2b Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 11:00:48 -0500 Subject: [PATCH 17/96] Call `Viz.bars_range()` from display loop --- piker/ui/_display.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 07033a0c..dcc767c8 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -101,7 +101,8 @@ def chart_maxmin( Compute max and min datums "in view" for range limits. ''' - last_bars_range = chart.bars_range() + main_viz = chart.get_viz(chart.name) + last_bars_range = main_viz.bars_range() out = chart.maxmin(name=fqsn) if out is None: @@ -217,7 +218,8 @@ class DisplayState(Struct): append_diff = i_step - _i_last_append # real-time update necessary? - _, _, _, r = chart.bars_range() + main_viz = chart.get_viz(chart.name) + _, _, _, r = main_viz.bars_range() liv = r >= shm.index # update the "last datum" (aka extending the vizs graphic with From f3bab826f699fb91cdd255708fc421f11db097b9 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 11:02:50 -0500 Subject: [PATCH 18/96] Comment out bps for time indexing --- piker/data/_pathops.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index a28be306..d7d12763 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -170,11 +170,11 @@ class IncrementalFormatter(msgspec.Struct): prepend_length = int(last_xfirst - xfirst) append_length = int(xlast - last_xlast) - if ( - prepend_length < 0 - or append_length < 0 - ): - breakpoint() + # if ( + # prepend_length < 0 + # or append_length < 0 + # ): + # breakpoint() # blah blah blah # do diffing for prepend, append and last entry @@ -792,8 +792,8 @@ class StepCurveFmtr(IncrementalFormatter): x_nd_new = self.x_nd[read_slc] x_nd_new[:] = new_from_src[index_field][:, np.newaxis] - if (self.x_nd[self.xy_slice][-1] == 0.5).any(): - breakpoint() + # if (self.x_nd[self.xy_slice][-1] == 0.5).any(): + # breakpoint() def format_xy_nd_to_1d( self, From eb9ab206465c5ccb800268526cb465070d7262ae Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 11:04:18 -0500 Subject: [PATCH 19/96] Don't disable non-enabled vlm chart y-autoranging --- piker/ui/_fsp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index 8bbb0932..3d686aba 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -907,7 +907,7 @@ async def open_vlm_displays( vflow.render = False # avoid range sorting on volume once disabled - chart.view.disable_auto_yrange() + # chart.view.disable_auto_yrange() # Trade rate overlay # XXX: requires an additional overlay for From 5affad942ffe81f71e41bea5f20115d3e179d469 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 12:37:34 -0500 Subject: [PATCH 20/96] Enable/disable vlm chart yranging (TO SQUASH) --- piker/ui/_fsp.py | 53 ++++++++++++++++++++++-------------------------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py index 3d686aba..5dee7b88 100644 --- a/piker/ui/_fsp.py +++ b/piker/ui/_fsp.py @@ -79,14 +79,14 @@ def has_vlm(ohlcv: ShmArray) -> bool: def update_fsp_chart( chart: ChartPlotWidget, - flow, + viz, graphics_name: str, array_key: Optional[str], **kwargs, ) -> None: - shm = flow.shm + shm = viz.shm if not shm: return @@ -666,7 +666,7 @@ async def open_vlm_displays( shm = ohlcv ohlc_chart = linked.chart - chart = linked.add_plot( + vlm_chart = linked.add_plot( name='volume', shm=shm, flume=flume, @@ -682,10 +682,12 @@ async def open_vlm_displays( # the curve item internals are pretty convoluted. style='step', ) + vlm_chart.view.enable_auto_yrange() + # back-link the volume chart to trigger y-autoranging # in the ohlc (parent) chart. ohlc_chart.view.enable_auto_yrange( - src_vb=chart.view, + src_vb=vlm_chart.view, ) # force 0 to always be in view @@ -707,7 +709,7 @@ async def open_vlm_displays( ''' mx = 0 for name in names: - ymn, ymx = chart.maxmin(name=name) + ymn, ymx = vlm_chart.maxmin(name=name) mx = max(mx, ymx) return 0, mx @@ -715,34 +717,33 @@ async def open_vlm_displays( # TODO: fix the x-axis label issue where if you put # the axis on the left it's totally not lined up... # show volume units value on LHS (for dinkus) - # chart.hideAxis('right') - # chart.showAxis('left') + # vlm_chart.hideAxis('right') + # vlm_chart.showAxis('left') # send back new chart to caller - task_status.started(chart) + task_status.started(vlm_chart) # should **not** be the same sub-chart widget - assert chart.name != linked.chart.name + assert vlm_chart.name != linked.chart.name # sticky only on sub-charts atm - last_val_sticky = chart.plotItem.getAxis( - 'right')._stickies.get(chart.name) + last_val_sticky = vlm_chart.plotItem.getAxis( + 'right')._stickies.get(vlm_chart.name) # read from last calculated value value = shm.array['volume'][-1] last_val_sticky.update_from_data(-1, value) - vlm_curve = chart.update_graphics_from_flow( + vlm_curve = vlm_chart.update_graphics_from_flow( 'volume', - # shm.array, ) # size view to data once at outset - chart.view._set_yrange() + vlm_chart.view._set_yrange() # add axis title - axis = chart.getAxis('right') + axis = vlm_chart.getAxis('right') axis.set_title(' vlm') if dvlm: @@ -782,7 +783,7 @@ async def open_vlm_displays( # XXX: the main chart already contains a vlm "units" axis # so here we add an overlay wth a y-range in # $ liquidity-value units (normally a fiat like USD). - dvlm_pi = chart.overlay_plotitem( + dvlm_pi = vlm_chart.overlay_plotitem( 'dolla_vlm', index=0, # place axis on inside (nearest to chart) axis_title=' $vlm', @@ -850,7 +851,7 @@ async def open_vlm_displays( assert isinstance(shm, ShmArray) assert isinstance(flume, Flume) - flow = chart.draw_curve( + viz = vlm_chart.draw_curve( name, shm, flume, @@ -861,13 +862,7 @@ async def open_vlm_displays( style=style, pi=pi, ) - - # TODO: we need a better API to do this.. - # specially store ref to shm for lookup in display loop - # since only a placeholder of `None` is entered in - # ``.draw_curve()``. - # viz = chart._vizs[name] - assert flow.plot is pi + assert viz.plot is pi chart_curves( fields, @@ -902,17 +897,17 @@ async def open_vlm_displays( # displayed and the curves are effectively the same minus # liquidity events (well at least on low OHLC periods - 1s). vlm_curve.hide() - chart.removeItem(vlm_curve) - vflow = chart._vizs['volume'] - vflow.render = False + vlm_chart.removeItem(vlm_curve) + vlm_viz = vlm_chart._vizs['volume'] + vlm_viz.render = False # avoid range sorting on volume once disabled - # chart.view.disable_auto_yrange() + vlm_chart.view.disable_auto_yrange() # Trade rate overlay # XXX: requires an additional overlay for # a trades-per-period (time) y-range. - tr_pi = chart.overlay_plotitem( + tr_pi = vlm_chart.overlay_plotitem( 'trade_rates', # TODO: dynamically update period (and thus this axis?) From 3d5695f40a8dc81da58c9e768aa819e6cc09f832 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 12:39:51 -0500 Subject: [PATCH 21/96] Explicitly enable chart widget yranging in display init --- piker/ui/_chart.py | 2 -- piker/ui/_display.py | 3 +++ 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 61946873..aa068d74 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -923,8 +923,6 @@ class ChartPlotWidget(pg.PlotWidget): # show background grid self.showGrid(x=False, y=True, alpha=0.3) - # self.cv.enable_auto_yrange() - self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem) # indempotent startup flag for auto-yrange subsys diff --git a/piker/ui/_display.py b/piker/ui/_display.py index dcc767c8..0ca6477d 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -1438,5 +1438,8 @@ async def display_symbol_data( rt_linked.mode = mode rt_chart.default_view() + rt_chart.view.enable_auto_yrange() hist_chart.default_view() + hist_chart.view.enable_auto_yrange() + await trio.sleep_forever() # let the app run.. bby From 6ea04f850df5971c496eb53934794115a881bfed Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 15:28:15 -0500 Subject: [PATCH 22/96] Drop diff state tracking in formatter This was a major cause of error (particularly trying to get epoch indexing working) and really isn't necessary; instead just have `.diff()` always read from the underlying source array for current index-step diffing and append/prepend slice construction. Allows us to, - drop `._last_read` state management and thus usage. - better handle startup indexing by setting `.xy_nd_start/stop` to `None` initially so that the first update can be done in one large prepend. - better understand and document the step curve "slice back to previous level" logic which is now heavily commented B) - drop all the `slice_to_head` stuff from and instead allow each formatter to choose it's 1d segmenting. --- piker/data/_pathops.py | 168 ++++++++++++++++++++++------------------- piker/ui/_render.py | 12 --- 2 files changed, 91 insertions(+), 89 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index d7d12763..b99ee88e 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -64,18 +64,6 @@ class IncrementalFormatter(msgspec.Struct): viz: Viz index_field: str = 'index' - # last read from shm (usually due to an update call) - _last_read: tuple[ - int, - int, - np.ndarray - - ] - - @property - def last_read(self) -> tuple | None: - return self._last_read - # Incrementally updated xy ndarray formatted data, a pre-1d # format which is updated and cached independently of the final # pre-graphics-path 1d format. @@ -99,8 +87,8 @@ class IncrementalFormatter(msgspec.Struct): # indexes which slice into the above arrays (which are allocated # based on source data shm input size) and allow retrieving # incrementally updated data. - xy_nd_start: int = 0 - xy_nd_stop: int = 0 + xy_nd_start: int | None = None + xy_nd_stop: int | None = None # TODO: eventually incrementally update 1d-pre-graphics path data? # x_1d: Optional[np.ndarray] = None @@ -144,17 +132,14 @@ class IncrementalFormatter(msgspec.Struct): np.ndarray, np.ndarray, ]: - ( - last_xfirst, - last_xlast, - last_array, - last_ivl, - last_ivr, - last_in_view, - ) = self.last_read - # TODO: can the renderer just call ``Viz.read()`` directly? - # unpack latest source data read + # TODO: + # - can the renderer just call ``Viz.read()`` directly? unpack + # latest source data read + # - eventually maybe we can implement some kind of + # transform on the ``QPainterPath`` that will more or less + # detect the diff in "elements" terms? update diff state since + # we've now rendered paths. ( xfirst, xlast, @@ -164,25 +149,42 @@ class IncrementalFormatter(msgspec.Struct): in_view, ) = new_read + index = array['index'] + + # if the first index in the read array is 0 then + # it means the source buffer has bee completely backfilled to + # available space. + src_start = index[0] + src_stop = index[-1] + 1 + + # these are the "formatted output data" indices + # for the pre-graphics arrays. + nd_start = self.xy_nd_start + nd_stop = self.xy_nd_stop + + if ( + nd_start is None + ): + assert nd_stop is None + + # setup to do a prepend of all existing src history + nd_start = self.xy_nd_start = src_stop + # set us in a zero-to-append state + nd_stop = self.xy_nd_stop = src_stop + # compute the length diffs between the first/last index entry in # the input data and the last indexes we have on record from the # last time we updated the curve index. - prepend_length = int(last_xfirst - xfirst) - append_length = int(xlast - last_xlast) - - # if ( - # prepend_length < 0 - # or append_length < 0 - # ): - # breakpoint() + prepend_length = int(nd_start - src_start) + append_length = int(src_stop - nd_stop) # blah blah blah # do diffing for prepend, append and last entry return ( - slice(xfirst, last_xfirst), + slice(src_start, nd_start), prepend_length, append_length, - slice(last_xlast, xlast), + slice(nd_stop, src_stop), ) def _track_inview_range( @@ -233,7 +235,6 @@ class IncrementalFormatter(msgspec.Struct): array_key: str, profiler: Profiler, - slice_to_head: int = -1, read_src_from_key: bool = True, slice_to_inview: bool = True, @@ -311,12 +312,6 @@ class IncrementalFormatter(msgspec.Struct): self.xy_nd_stop = shm._last.value profiler('appened xy history: {append_length}') - # TODO: eventually maybe we can implement some kind of - # transform on the ``QPainterPath`` that will more or less - # detect the diff in "elements" terms? - # update diff state since we've now rendered paths. - self._last_read = new_read - view_changed: bool = False view_range: tuple[int, int] = (ivl, ivr) if slice_to_inview: @@ -365,7 +360,7 @@ class IncrementalFormatter(msgspec.Struct): # update the last "in view data range" if len(x_1d): - self._last_ivdr = x_1d[0], x_1d[slice_to_head] + self._last_ivdr = x_1d[0], x_1d[-1] if (x_1d[-1] == 0.5).any(): breakpoint() @@ -760,41 +755,62 @@ class StepCurveFmtr(IncrementalFormatter): np.ndarray, slice, ]: - # for a step curve we slice from one datum prior + # NOTE: for a step curve we slice from one datum prior # to the current "update slice" to get the previous # "level". - last_2 = slice( - read_slc.start, - read_slc.stop+1, + # + # why this is needed, + # - the current new append slice will often have a zero + # value in the latest datum-step (at least for zero-on-new + # cases like vlm in the) as per configuration of the FSP + # engine. + # - we need to look back a datum to get the last level which + # will be used to terminate/complete the last step x-width + # which will be set to pair with the last x-index THIS MEANS + # + # XXX: this means WE CAN'T USE the append slice since we need to + # "look backward" one step to get the needed back-to-zero level + # and the update data in ``new_from_src`` will only contain the + # latest new data. + back_1 = slice( + read_slc.start - 1, + read_slc.stop, ) - y_nd_new = self.y_nd[last_2] - y_nd_new[:] = src_shm._array[last_2][array_key][:, None] - # NOTE: we can't use the append slice since we need to "look - # forward" one step to get the current level and copy it as - # well? (though i still don't really grok why..) - # y_nd_new[:] = new_from_src[array_key][:, None] + to_write = src_shm._array[back_1] + y_nd_new = self.y_nd[back_1] + y_nd_new[:] = to_write[array_key][:, None] - # XXX: old approach now duplicated above (we can probably drop - # this since the key part was the ``nd_stop + 1`` - # if is_append: - # start = max(nd_stop - 1, 0) - # end = src_shm._last.value - # y_nd_new = src_shm._array[start:end][array_key]#[:, np.newaxis] - # slc = slice(start, end) - # self.y_nd[slc] = np.broadcast_to( - # y_nd_new[:, None], - # (y_nd_new.size, 2), + x_nd_new = self.x_nd[read_slc] + x_nd_new[:] = ( + new_from_src[self.index_field][:, None] + + + np.array([-0.5, 0.5]) + ) + + # XXX: uncomment for debugging + # x_nd = self.x_nd[self.xy_slice] + # y_nd = self.y_nd[self.xy_slice] + # name = self.viz.name + # if 'dolla_vlm' in name: + # s = 4 + # print( + # f'{name}:\n' + # 'NEW_FROM_SRC:\n' + # f'new_from_src: {new_from_src}\n\n' + + # f'PRE self.x_nd:' + # f'\n{x_nd[-s:]}\n' + # f'PRE self.y_nd:\n' + # f'{y_nd[-s:]}\n\n' + + # f'TO WRITE:\n' + # f'x_nd_new:\n' + # f'{x_nd_new}\n' + # f'y_nd_new:\n' + # f'{y_nd_new}\n' # ) - index_field = self.index_field - if index_field != 'index': - x_nd_new = self.x_nd[read_slc] - x_nd_new[:] = new_from_src[index_field][:, np.newaxis] - - # if (self.x_nd[self.xy_slice][-1] == 0.5).any(): - # breakpoint() - def format_xy_nd_to_1d( self, @@ -815,15 +831,10 @@ class StepCurveFmtr(IncrementalFormatter): x_step = self.x_nd[start:stop] y_step = self.y_nd[start:stop] - # if (x_step[-1] == 0.5).any(): - # breakpoint() - # pack in duplicate final value to complete last step level - # x_step[-1] = last_t - # y_step[-1] = last - # x_step[-1, 1] = last_t y_step[-1, 1] = last + # debugging # if y_step.any(): # s = 3 # print( @@ -833,7 +844,9 @@ class StepCurveFmtr(IncrementalFormatter): # slice out in-view data ivl, ivr = vr - # TODO: WHY do we need the extra +1 index? + + # NOTE: add an extra step to get the vertical-line-down-to-zero + # adjacent to the last-datum graphic (filled rect). x_step_iv = x_step[ivl:ivr+1] y_step_iv = y_step[ivl:ivr+1] @@ -847,6 +860,7 @@ class StepCurveFmtr(IncrementalFormatter): if x_1d.any() and (x_1d[-1] == 0.5).any(): breakpoint() + # debugging # if y_1d.any(): # s = 6 # print( diff --git a/piker/ui/_render.py b/piker/ui/_render.py index a0e4134f..8245c2d9 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -102,7 +102,6 @@ def render_baritems( fmtr=OHLCBarsFmtr( shm=viz.shm, viz=viz, - _last_read=read, index_field=viz.index_field, ), ) @@ -112,7 +111,6 @@ def render_baritems( fmtr=OHLCBarsAsCurveFmtr( shm=viz.shm, viz=viz, - _last_read=read, index_field=viz.index_field, ), ) @@ -528,7 +526,6 @@ class Viz(msgspec.Struct): # , frozen=True): # print('exiting early') return graphics - slice_to_head: int = -1 should_redraw: bool = False should_line: bool = False rkwargs = {} @@ -565,7 +562,6 @@ class Viz(msgspec.Struct): # , frozen=True): fmtr=StepCurveFmtr( shm=self.shm, viz=self, - _last_read=read, index_field=self.index_field, ), ) @@ -573,7 +569,6 @@ class Viz(msgspec.Struct): # , frozen=True): # TODO: append logic inside ``.render()`` isn't # correct yet for step curves.. remove this to see it. should_redraw = True - slice_to_head = -2 else: r = self._src_r @@ -584,13 +579,9 @@ class Viz(msgspec.Struct): # , frozen=True): fmtr=IncrementalFormatter( shm=self.shm, viz=self, - _last_read=read, ), ) - if isinstance(graphics, StepCurve): - slice_to_head = -2 - # ``Curve`` derivative case(s): array_key = array_key or self.name # print(array_key) @@ -654,7 +645,6 @@ class Viz(msgspec.Struct): # , frozen=True): should_ds=should_ds, showing_src_data=showing_src_data, - slice_to_head=slice_to_head, do_append=do_append, **rkwargs, @@ -881,7 +871,6 @@ class Renderer(msgspec.Struct): showing_src_data: bool = True, do_append: bool = True, - slice_to_head: int = -1, use_fpath: bool = True, # only render datums "in view" of the ``ChartView`` @@ -921,7 +910,6 @@ class Renderer(msgspec.Struct): array_key, profiler, - slice_to_head=slice_to_head, read_src_from_key=read_from_key, slice_to_inview=use_vr, ) From 7f3f6f871aafdae7dc4f9461277e9a7df0d0b530 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 15:47:06 -0500 Subject: [PATCH 23/96] Move path ops routines to top of mod Planning to put the formatters into a new mod and aggregate all path gen/op helpers into this module. Further tweak include: - moving `path_arrays_from_ohlc()` back to module level - slice out the last xy datum for `OHLCBarsAsCurveFmtr` 1d formatting - always copy the new x-value from the source to `.x_nd` --- piker/data/_pathops.py | 324 ++++++++++++++++++++--------------------- 1 file changed, 156 insertions(+), 168 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index b99ee88e..a62ee93b 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -50,6 +50,129 @@ if TYPE_CHECKING: from .._profile import Profiler +def xy_downsample( + x, + y, + uppx, + + x_spacer: float = 0.5, + +) -> tuple[ + np.ndarray, + np.ndarray, + float, + float, +]: + ''' + Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input + ``uppx`` (units-per-pixel) and add space between discreet datums. + + ''' + # downsample whenever more then 1 pixels per datum can be shown. + # always refresh data bounds until we get diffing + # working properly, see above.. + bins, x, y, ymn, ymx = ds_m4( + x, + y, + uppx, + ) + + # flatten output to 1d arrays suitable for path-graphics generation. + x = np.broadcast_to(x[:, None], y.shape) + x = (x + np.array( + [-x_spacer, 0, 0, x_spacer] + )).flatten() + y = y.flatten() + + return x, y, ymn, ymx + + +@njit( + # NOTE: need to construct this manually for readonly + # arrays, see https://github.com/numba/numba/issues/4511 + # ( + # types.Array( + # numba_ohlc_dtype, + # 1, + # 'C', + # readonly=True, + # ), + # int64, + # types.unicode_type, + # optional(float64), + # ), + nogil=True +) +def path_arrays_from_ohlc( + data: np.ndarray, + start: int64, + bar_gap: float64 = 0.43, + # index_field: str, + +) -> tuple[ + np.ndarray, + np.ndarray, + np.ndarray, +]: + ''' + Generate an array of lines objects from input ohlc data. + + ''' + size = int(data.shape[0] * 6) + + # XXX: see this for why the dtype might have to be defined outside + # the routine. + # https://github.com/numba/numba/issues/4098#issuecomment-493914533 + x = np.zeros( + shape=size, + dtype=float64, + ) + y, c = x.copy(), x.copy() + + # TODO: report bug for assert @ + # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991 + for i, q in enumerate(data[start:], start): + + # TODO: ask numba why this doesn't work.. + # open, high, low, close, index = q[ + # ['open', 'high', 'low', 'close', 'index']] + + open = q['open'] + high = q['high'] + low = q['low'] + close = q['close'] + # index = float64(q[index_field]) + index = float64(q['index']) + + istart = i * 6 + istop = istart + 6 + + # x,y detail the 6 points which connect all vertexes of a ohlc bar + x[istart:istop] = ( + index - bar_gap, + index, + index, + index, + index, + index + bar_gap, + ) + y[istart:istop] = ( + open, + open, + low, + high, + close, + close, + ) + + # specifies that the first edge is never connected to the + # prior bars last edge thus providing a small "gap"/"space" + # between bars determined by ``bar_gap``. + c[istart:istop] = (1, 1, 1, 1, 1, 0) + + return x, y, c + + class IncrementalFormatter(msgspec.Struct): ''' Incrementally updating, pre-path-graphics tracking, formatter. @@ -132,7 +255,6 @@ class IncrementalFormatter(msgspec.Struct): np.ndarray, np.ndarray, ]: - # TODO: # - can the renderer just call ``Viz.read()`` directly? unpack # latest source data read @@ -423,18 +545,11 @@ class IncrementalFormatter(msgspec.Struct): ) -> None: # write pushed data to flattened copy new_y_nd = new_from_src[data_field] - - # XXX - # TODO: this should be returned and written by caller! - # XXX - # generate same-valued-per-row x support with Nx1 shape - index_field = self.index_field - if index_field != 'index': - x_nd_new = self.x_nd[read_slc] - x_nd_new[:] = new_from_src[index_field] - self.y_nd[read_slc] = new_y_nd + x_nd_new = self.x_nd[read_slc] + x_nd_new[:] = new_from_src[self.index_field] + # XXX: was ``.format_xy()`` def format_xy_nd_to_1d( self, @@ -454,6 +569,8 @@ class IncrementalFormatter(msgspec.Struct): Return single field column data verbatim ''' + # NOTE: we don't include the very last datum which is filled in + # normally by another graphics object. return ( array[self.index_field][:-1], array[array_key][:-1], @@ -507,92 +624,37 @@ class OHLCBarsFmtr(IncrementalFormatter): y_nd, ) - @staticmethod - @njit( - # NOTE: need to construct this manually for readonly - # arrays, see https://github.com/numba/numba/issues/4511 - # ( - # types.Array( - # numba_ohlc_dtype, - # 1, - # 'C', - # readonly=True, - # ), - # int64, - # types.unicode_type, - # optional(float64), - # ), - nogil=True - ) - def path_arrays_from_ohlc( - data: np.ndarray, - start: int64, - bar_gap: float64 = 0.43, - # index_field: str, + def incr_update_xy_nd( + self, - ) -> tuple[ - np.ndarray, - np.ndarray, - np.ndarray, - ]: - ''' - Generate an array of lines objects from input ohlc data. + src_shm: ShmArray, + data_field: str, - ''' - size = int(data.shape[0] * 6) + new_from_src: np.ndarray, # portion of source that was updated - # XXX: see this for why the dtype might have to be defined outside - # the routine. - # https://github.com/numba/numba/issues/4098#issuecomment-493914533 - x = np.zeros( - shape=size, - dtype=float64, + read_slc: slice, + ln: int, # len of updated + + nd_start: int, + nd_stop: int, + + is_append: bool, + + ) -> None: + # write newly pushed data to flattened copy + # a struct-arr is always passed in. + new_y_nd = rfn.structured_to_unstructured( + new_from_src[self.fields] ) - y, c = x.copy(), x.copy() + self.y_nd[read_slc] = new_y_nd - # TODO: report bug for assert @ - # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991 - for i, q in enumerate(data[start:], start): + # generate same-valued-per-row x support based on y shape + x_nd_new = self.x_nd[read_slc] + x_nd_new[:] = np.broadcast_to( + new_from_src[self.index_field][:, None], + new_y_nd.shape, + ) + np.array([-0.5, 0, 0, 0.5]) - # TODO: ask numba why this doesn't work.. - # open, high, low, close, index = q[ - # ['open', 'high', 'low', 'close', 'index']] - - open = q['open'] - high = q['high'] - low = q['low'] - close = q['close'] - # index = float64(q[index_field]) - # index = float64(q['time']) - index = float64(q['index']) - - istart = i * 6 - istop = istart + 6 - - # x,y detail the 6 points which connect all vertexes of a ohlc bar - x[istart:istop] = ( - index - bar_gap, - index, - index, - index, - index, - index + bar_gap, - ) - y[istart:istop] = ( - open, - open, - low, - high, - close, - close, - ) - - # specifies that the first edge is never connected to the - # prior bars last edge thus providing a small "gap"/"space" - # between bars determined by ``bar_gap``. - c[istart:istop] = (1, 1, 1, 1, 1, 0) - - return x, y, c # TODO: can we drop this frame and just use the above? def format_xy_nd_to_1d( @@ -617,7 +679,7 @@ class OHLCBarsFmtr(IncrementalFormatter): for line spacing. ''' - x, y, c = self.path_arrays_from_ohlc( + x, y, c = path_arrays_from_ohlc( array, start, # self.index_field, @@ -625,43 +687,6 @@ class OHLCBarsFmtr(IncrementalFormatter): ) return x, y, c - def incr_update_xy_nd( - self, - - src_shm: ShmArray, - data_field: str, - - new_from_src: np.ndarray, # portion of source that was updated - - read_slc: slice, - ln: int, # len of updated - - nd_start: int, - nd_stop: int, - - is_append: bool, - - ) -> None: - # write newly pushed data to flattened copy - # a struct-arr is always passed in. - new_y_nd = rfn.structured_to_unstructured( - new_from_src[self.fields] - ) - - # XXX - # TODO: this should be returned and written by caller! - # XXX - # generate same-valued-per-row x support based on y shape - index_field: str = self.index_field - if index_field != 'index': - x_nd_new = self.x_nd[read_slc] - x_nd_new[:] = new_from_src[index_field][:, np.newaxis] - - if (self.x_nd[self.xy_slice] == 0.5).any(): - breakpoint() - - self.y_nd[read_slc] = new_y_nd - class OHLCBarsAsCurveFmtr(OHLCBarsFmtr): @@ -681,8 +706,8 @@ class OHLCBarsAsCurveFmtr(OHLCBarsFmtr): # should we be passing in array as an xy arrays tuple? # 2 more datum-indexes to capture zero at end - x_flat = self.x_nd[self.xy_nd_start:self.xy_nd_stop] - y_flat = self.y_nd[self.xy_nd_start:self.xy_nd_stop] + x_flat = self.x_nd[self.xy_nd_start:self.xy_nd_stop-1] + y_flat = self.y_nd[self.xy_nd_start:self.xy_nd_stop-1] # slice to view ivl, ivr = vr @@ -871,40 +896,3 @@ class StepCurveFmtr(IncrementalFormatter): # ) return x_1d, y_1d, 'all' - - -def xy_downsample( - x, - y, - uppx, - - x_spacer: float = 0.5, - -) -> tuple[ - np.ndarray, - np.ndarray, - float, - float, -]: - ''' - Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input - ``uppx`` (units-per-pixel) and add space between discreet datums. - - ''' - # downsample whenever more then 1 pixels per datum can be shown. - # always refresh data bounds until we get diffing - # working properly, see above.. - bins, x, y, ymn, ymx = ds_m4( - x, - y, - uppx, - ) - - # flatten output to 1d arrays suitable for path-graphics generation. - x = np.broadcast_to(x[:, None], y.shape) - x = (x + np.array( - [-x_spacer, 0, 0, x_spacer] - )).flatten() - y = y.flatten() - - return x, y, ymn, ymx From 382a619a03c18b6fd83beb20391b07e58ea10c5e Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 16:13:26 -0500 Subject: [PATCH 24/96] Fix from-time index slicing? Apparently we want an `|` for the advanced indexing logic? Also, fix `read_slc` start to not always be 0 XD --- piker/data/flows.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/piker/data/flows.py b/piker/data/flows.py index 655f9ff7..86166c2a 100644 --- a/piker/data/flows.py +++ b/piker/data/flows.py @@ -274,7 +274,7 @@ class Flume(Struct): # time range to the index range. mask: np.ndarray = ( (times >= start_t) - & + | # fml, i guess it's not an `&` ?? (times < stop_t) ) @@ -313,7 +313,7 @@ class Flume(Struct): # slice data by offset from the first index # available in the passed datum set. read_slc = slice( - 0, + i_0 - index[0], i_by_t[-1] - i_0, ) @@ -324,6 +324,8 @@ class Flume(Struct): mask, ) + # TODO: maybe move this our ``Viz`` type to avoid + # the shm lookup discrepancy? def view_data( self, plot: PlotItem, @@ -352,7 +354,6 @@ class Flume(Struct): arr, start_t=vr.left(), stop_t=vr.right(), - timeframe_s=timeframe_s, ) return ( abs_slc, From 309ae240cf90d3b408903d3dddb8a8514efcf06c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 18:41:54 -0500 Subject: [PATCH 25/96] Look up "index field" in display cycles Again, to make epoch indexing a flip-of-switch for testing look up the `Viz.index_field: str` value when updating labels. Also, drops the legacy tick-type set tracking which we no longer use thanks to the new throttler subsys and it's framing msgs. --- piker/ui/_display.py | 47 ++++++++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 0ca6477d..86b33e1e 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -303,17 +303,25 @@ async def graphics_update_loop( fqsn = symbol.fqsn # update last price sticky - fast_pi = fast_chart._vizs[fqsn].plot + fast_viz = fast_chart._vizs[fqsn] + index_field = fast_viz.index_field + fast_pi = fast_viz.plot last_price_sticky = fast_pi.getAxis('right')._stickies[fqsn] last_price_sticky.update_from_data( - *ohlcv.array[-1][['index', 'close']] + *ohlcv.array[-1][[ + index_field, + 'close', + ]] ) last_price_sticky.show() slow_pi = hist_chart._vizs[fqsn].plot hist_last_price_sticky = slow_pi.getAxis('right')._stickies[fqsn] hist_last_price_sticky.update_from_data( - *hist_ohlcv.array[-1][['index', 'close']] + *hist_ohlcv.array[-1][[ + index_field, + 'close', + ]] ) vlm_chart = vlm_charts[fqsn] @@ -515,12 +523,12 @@ def graphics_update_cycle( chart = ds.chart # TODO: just pass this as a direct ref to avoid so many attr accesses? hist_chart = ds.godwidget.hist_linked.chart - assert hist_chart flume = ds.flume sym = flume.symbol fqsn = sym.fqsn main_viz = chart._vizs[fqsn] + index_field = main_viz.index_field profiler = Profiler( msg=f'Graphics loop cycle for: `{chart.name}`', @@ -607,7 +615,7 @@ def graphics_update_cycle( # if the segment of history that is being prepended # isn't in view there is no reason to do a graphics # update. - log.debug('Skipping prepend graphics cycle: frame not in view') + log.info('Skipping prepend graphics cycle: frame not in view') return # TODO: eventually we want to separate out the utrade (aka @@ -639,11 +647,6 @@ def graphics_update_cycle( # do_append=do_append, ) - # NOTE: we always update the "last" datum - # since the current range should at least be updated - # to it's max/min on the last pixel. - typs: set[str] = set() - # from pprint import pformat # frame_counts = { # typ: len(frame) for typ, frame in frames_by_type.items() @@ -667,11 +670,6 @@ def graphics_update_cycle( price = tick.get('price') size = tick.get('size') - if typ in typs: - continue - - typs.add(typ) - # compute max and min prices (including bid/ask) from # tick frames to determine the y-range for chart # auto-scaling. @@ -681,7 +679,6 @@ def graphics_update_cycle( mn = min(price - tick_margin, mn) if typ in clear_types: - # XXX: if we only wanted to update graphics from the # "current"/"latest received" clearing price tick # once (see alt iteration order above). @@ -694,7 +691,10 @@ def graphics_update_cycle( # set. # update price sticky(s) - end_ic = array[-1][['index', 'close']] + end_ic = array[-1][[ + index_field, + 'close', + ]] ds.last_price_sticky.update_from_data(*end_ic) ds.hist_last_price_sticky.update_from_data(*end_ic) @@ -827,7 +827,10 @@ def graphics_update_cycle( # always update y-label ds.vlm_sticky.update_from_data( - *array[-1][['index', 'volume']] + *array[-1][[ + index_field, + 'volume', + ]] ) if ( @@ -1124,7 +1127,7 @@ async def display_symbol_data( # avoiding needless Qt-in-guest-mode context switches tick_throttle=min( round(_quote_throttle_rate/len(fqsns)), - 22, + 22, # aka 6 + 16 ), ) as feed: @@ -1374,8 +1377,7 @@ async def display_symbol_data( # trigger another view reset if no sub-chart hist_chart.default_view() rt_chart.default_view() - - # let Qt run to render all widgets and make sure the + # let qt run to render all widgets and make sure the # sidepanes line up vertically. await trio.sleep(0) @@ -1423,9 +1425,6 @@ async def display_symbol_data( vlm_charts, ) - rt_chart.default_view() - await trio.sleep(0) - mode: OrderMode async with ( open_order_mode( From 7ec21c7f3b2d1091150ca8060d2c1bc4eaf441f4 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 18:48:04 -0500 Subject: [PATCH 26/96] Rename `.ui._pathops.py` -> `.ui._formatters.py --- piker/data/{_pathops.py => _formatters.py} | 0 piker/ui/_interaction.py | 2 +- piker/ui/_render.py | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename piker/data/{_pathops.py => _formatters.py} (100%) diff --git a/piker/data/_pathops.py b/piker/data/_formatters.py similarity index 100% rename from piker/data/_pathops.py rename to piker/data/_formatters.py diff --git a/piker/ui/_interaction.py b/piker/ui/_interaction.py index 9e9b2d3b..e9a7089f 100644 --- a/piker/ui/_interaction.py +++ b/piker/ui/_interaction.py @@ -494,7 +494,7 @@ class ChartView(ViewBox): chart = self.linked.chart # don't zoom more then the min points setting - l, lbar, rbar, r = chart.get_viz(chart.name).bars_range() + out = l, lbar, rbar, r = chart.get_viz(chart.name).bars_range() # vl = r - l # if ev.delta() > 0 and vl <= _min_points_to_show: diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 8245c2d9..96c7e069 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -37,7 +37,7 @@ from ..data._sharedmem import ( ShmArray, ) from ..data.feed import Flume -from ..data._pathops import ( +from ..data._formatters import ( IncrementalFormatter, OHLCBarsFmtr, # Plain OHLC renderer OHLCBarsAsCurveFmtr, # OHLC converted to line From 9052ed5ddfe78e07c7db86892d3a25f933bf7e81 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 18:54:42 -0500 Subject: [PATCH 27/96] Move qpath-ops routines back to separate mod --- piker/data/_formatters.py | 138 +-------------------------------- piker/data/_pathops.py | 155 ++++++++++++++++++++++++++++++++++++++ piker/ui/_render.py | 2 +- 3 files changed, 159 insertions(+), 136 deletions(-) create mode 100644 piker/data/_pathops.py diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index a62ee93b..523875a3 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -14,7 +14,7 @@ # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . """ -Super fast ``QPainterPath`` generation related operator routines. +Pre-(path)-graphics formatted x/y nd/1d rendering subsystem. """ from __future__ import annotations @@ -27,20 +27,12 @@ import msgspec from msgspec import field import numpy as np from numpy.lib import recfunctions as rfn -from numba import ( - # types, - njit, - float64, - int64, - # optional, -) from ._sharedmem import ( ShmArray, ) -# from ._source import numba_ohlc_dtype -from ._compression import ( - ds_m4, +from ._pathops import ( + path_arrays_from_ohlc, ) if TYPE_CHECKING: @@ -50,129 +42,6 @@ if TYPE_CHECKING: from .._profile import Profiler -def xy_downsample( - x, - y, - uppx, - - x_spacer: float = 0.5, - -) -> tuple[ - np.ndarray, - np.ndarray, - float, - float, -]: - ''' - Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input - ``uppx`` (units-per-pixel) and add space between discreet datums. - - ''' - # downsample whenever more then 1 pixels per datum can be shown. - # always refresh data bounds until we get diffing - # working properly, see above.. - bins, x, y, ymn, ymx = ds_m4( - x, - y, - uppx, - ) - - # flatten output to 1d arrays suitable for path-graphics generation. - x = np.broadcast_to(x[:, None], y.shape) - x = (x + np.array( - [-x_spacer, 0, 0, x_spacer] - )).flatten() - y = y.flatten() - - return x, y, ymn, ymx - - -@njit( - # NOTE: need to construct this manually for readonly - # arrays, see https://github.com/numba/numba/issues/4511 - # ( - # types.Array( - # numba_ohlc_dtype, - # 1, - # 'C', - # readonly=True, - # ), - # int64, - # types.unicode_type, - # optional(float64), - # ), - nogil=True -) -def path_arrays_from_ohlc( - data: np.ndarray, - start: int64, - bar_gap: float64 = 0.43, - # index_field: str, - -) -> tuple[ - np.ndarray, - np.ndarray, - np.ndarray, -]: - ''' - Generate an array of lines objects from input ohlc data. - - ''' - size = int(data.shape[0] * 6) - - # XXX: see this for why the dtype might have to be defined outside - # the routine. - # https://github.com/numba/numba/issues/4098#issuecomment-493914533 - x = np.zeros( - shape=size, - dtype=float64, - ) - y, c = x.copy(), x.copy() - - # TODO: report bug for assert @ - # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991 - for i, q in enumerate(data[start:], start): - - # TODO: ask numba why this doesn't work.. - # open, high, low, close, index = q[ - # ['open', 'high', 'low', 'close', 'index']] - - open = q['open'] - high = q['high'] - low = q['low'] - close = q['close'] - # index = float64(q[index_field]) - index = float64(q['index']) - - istart = i * 6 - istop = istart + 6 - - # x,y detail the 6 points which connect all vertexes of a ohlc bar - x[istart:istop] = ( - index - bar_gap, - index, - index, - index, - index, - index + bar_gap, - ) - y[istart:istop] = ( - open, - open, - low, - high, - close, - close, - ) - - # specifies that the first edge is never connected to the - # prior bars last edge thus providing a small "gap"/"space" - # between bars determined by ``bar_gap``. - c[istart:istop] = (1, 1, 1, 1, 1, 0) - - return x, y, c - - class IncrementalFormatter(msgspec.Struct): ''' Incrementally updating, pre-path-graphics tracking, formatter. @@ -655,7 +524,6 @@ class OHLCBarsFmtr(IncrementalFormatter): new_y_nd.shape, ) + np.array([-0.5, 0, 0, 0.5]) - # TODO: can we drop this frame and just use the above? def format_xy_nd_to_1d( self, diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py new file mode 100644 index 00000000..b2026f17 --- /dev/null +++ b/piker/data/_pathops.py @@ -0,0 +1,155 @@ +# piker: trading gear for hackers +# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0) + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +""" +Super fast ``QPainterPath`` generation related operator routines. + +""" +import numpy as np +from numba import ( + # types, + njit, + float64, + int64, + # optional, +) + +# from ._source import numba_ohlc_dtype +from ._compression import ( + ds_m4, +) + + +def xy_downsample( + x, + y, + uppx, + + x_spacer: float = 0.5, + +) -> tuple[ + np.ndarray, + np.ndarray, + float, + float, +]: + ''' + Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input + ``uppx`` (units-per-pixel) and add space between discreet datums. + + ''' + # downsample whenever more then 1 pixels per datum can be shown. + # always refresh data bounds until we get diffing + # working properly, see above.. + bins, x, y, ymn, ymx = ds_m4( + x, + y, + uppx, + ) + + # flatten output to 1d arrays suitable for path-graphics generation. + x = np.broadcast_to(x[:, None], y.shape) + x = (x + np.array( + [-x_spacer, 0, 0, x_spacer] + )).flatten() + y = y.flatten() + + return x, y, ymn, ymx + + +@njit( + # NOTE: need to construct this manually for readonly + # arrays, see https://github.com/numba/numba/issues/4511 + # ( + # types.Array( + # numba_ohlc_dtype, + # 1, + # 'C', + # readonly=True, + # ), + # int64, + # types.unicode_type, + # optional(float64), + # ), + nogil=True +) +def path_arrays_from_ohlc( + data: np.ndarray, + start: int64, + bar_gap: float64 = 0.43, + # index_field: str, + +) -> tuple[ + np.ndarray, + np.ndarray, + np.ndarray, +]: + ''' + Generate an array of lines objects from input ohlc data. + + ''' + size = int(data.shape[0] * 6) + + # XXX: see this for why the dtype might have to be defined outside + # the routine. + # https://github.com/numba/numba/issues/4098#issuecomment-493914533 + x = np.zeros( + shape=size, + dtype=float64, + ) + y, c = x.copy(), x.copy() + + # TODO: report bug for assert @ + # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991 + for i, q in enumerate(data[start:], start): + + # TODO: ask numba why this doesn't work.. + # open, high, low, close, index = q[ + # ['open', 'high', 'low', 'close', 'index']] + + open = q['open'] + high = q['high'] + low = q['low'] + close = q['close'] + # index = float64(q[index_field]) + index = float64(q['index']) + + istart = i * 6 + istop = istart + 6 + + # x,y detail the 6 points which connect all vertexes of a ohlc bar + x[istart:istop] = ( + index - bar_gap, + index, + index, + index, + index, + index + bar_gap, + ) + y[istart:istop] = ( + open, + open, + low, + high, + close, + close, + ) + + # specifies that the first edge is never connected to the + # prior bars last edge thus providing a small "gap"/"space" + # between bars determined by ``bar_gap``. + c[istart:istop] = (1, 1, 1, 1, 1, 0) + + return x, y, c diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 96c7e069..1caa8365 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -42,8 +42,8 @@ from ..data._formatters import ( OHLCBarsFmtr, # Plain OHLC renderer OHLCBarsAsCurveFmtr, # OHLC converted to line StepCurveFmtr, # "step" curve (like for vlm) - xy_downsample, ) +from ..data._pathops import xy_downsample from .._profile import ( pg_profile_enabled, # ms_slower_then, From 7124a131dd60235b3a9275f0cf5878755d3364b9 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 19:14:36 -0500 Subject: [PATCH 28/96] Move (unused) path gen routines to `.ui._pathops` --- piker/data/_compression.py | 114 +------------------------------------ piker/data/_pathops.py | 108 +++++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+), 112 deletions(-) diff --git a/piker/data/_compression.py b/piker/data/_compression.py index e6111fca..fde92500 100644 --- a/piker/data/_compression.py +++ b/piker/data/_compression.py @@ -23,9 +23,8 @@ import math from typing import Optional import numpy as np -from numpy.lib import recfunctions as rfn from numba import ( - jit, + njit, # float64, optional, int64, ) @@ -35,114 +34,6 @@ from ..log import get_logger log = get_logger(__name__) -def hl2mxmn( - ohlc: np.ndarray, - index_field: str = 'index', - -) -> np.ndarray: - ''' - Convert a OHLC struct-array containing 'high'/'low' columns - to a "joined" max/min 1-d array. - - ''' - index = ohlc[index_field] - hls = ohlc[[ - 'low', - 'high', - ]] - - mxmn = np.empty(2*hls.size, dtype=np.float64) - x = np.empty(2*hls.size, dtype=np.float64) - trace_hl(hls, mxmn, x, index[0]) - x = x + index[0] - - return mxmn, x - - -@jit( - # TODO: the type annots.. - # float64[:](float64[:],), - nopython=True, -) -def trace_hl( - hl: 'np.ndarray', - out: np.ndarray, - x: np.ndarray, - start: int, - - # the "offset" values in the x-domain which - # place the 2 output points around each ``int`` - # master index. - margin: float = 0.43, - -) -> None: - ''' - "Trace" the outline of the high-low values of an ohlc sequence - as a line such that the maximum deviation (aka disperaion) between - bars if preserved. - - This routine is expected to modify input arrays in-place. - - ''' - last_l = hl['low'][0] - last_h = hl['high'][0] - - for i in range(hl.size): - row = hl[i] - l, h = row['low'], row['high'] - - up_diff = h - last_l - down_diff = last_h - l - - if up_diff > down_diff: - out[2*i + 1] = h - out[2*i] = last_l - else: - out[2*i + 1] = l - out[2*i] = last_h - - last_l = l - last_h = h - - x[2*i] = int(i) - margin - x[2*i + 1] = int(i) + margin - - return out - - -def ohlc_flatten( - ohlc: np.ndarray, - use_mxmn: bool = True, - index_field: str = 'index', - -) -> tuple[np.ndarray, np.ndarray]: - ''' - Convert an OHLCV struct-array into a flat ready-for-line-plotting - 1-d array that is 4 times the size with x-domain values distributed - evenly (by 0.5 steps) over each index. - - ''' - index = ohlc[index_field] - - if use_mxmn: - # traces a line optimally over highs to lows - # using numba. NOTE: pretty sure this is faster - # and looks about the same as the below output. - flat, x = hl2mxmn(ohlc) - - else: - flat = rfn.structured_to_unstructured( - ohlc[['open', 'high', 'low', 'close']] - ).flatten() - - x = np.linspace( - start=index[0] - 0.5, - stop=index[-1] + 0.5, - num=len(flat), - ) - return x, flat - - def ds_m4( x: np.ndarray, y: np.ndarray, @@ -263,8 +154,7 @@ def ds_m4( return nb, x_out, y_out, ymn, ymx -@jit( - nopython=True, +@njit( nogil=True, ) def _m4( diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index b2026f17..acc02be4 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -18,6 +18,7 @@ Super fast ``QPainterPath`` generation related operator routines. """ import numpy as np +from numpy.lib import recfunctions as rfn from numba import ( # types, njit, @@ -153,3 +154,110 @@ def path_arrays_from_ohlc( c[istart:istop] = (1, 1, 1, 1, 1, 0) return x, y, c + + +def hl2mxmn( + ohlc: np.ndarray, + index_field: str = 'index', + +) -> np.ndarray: + ''' + Convert a OHLC struct-array containing 'high'/'low' columns + to a "joined" max/min 1-d array. + + ''' + index = ohlc[index_field] + hls = ohlc[[ + 'low', + 'high', + ]] + + mxmn = np.empty(2*hls.size, dtype=np.float64) + x = np.empty(2*hls.size, dtype=np.float64) + trace_hl(hls, mxmn, x, index[0]) + x = x + index[0] + + return mxmn, x + + +@njit( + # TODO: the type annots.. + # float64[:](float64[:],), +) +def trace_hl( + hl: 'np.ndarray', + out: np.ndarray, + x: np.ndarray, + start: int, + + # the "offset" values in the x-domain which + # place the 2 output points around each ``int`` + # master index. + margin: float = 0.43, + +) -> None: + ''' + "Trace" the outline of the high-low values of an ohlc sequence + as a line such that the maximum deviation (aka disperaion) between + bars if preserved. + + This routine is expected to modify input arrays in-place. + + ''' + last_l = hl['low'][0] + last_h = hl['high'][0] + + for i in range(hl.size): + row = hl[i] + l, h = row['low'], row['high'] + + up_diff = h - last_l + down_diff = last_h - l + + if up_diff > down_diff: + out[2*i + 1] = h + out[2*i] = last_l + else: + out[2*i + 1] = l + out[2*i] = last_h + + last_l = l + last_h = h + + x[2*i] = int(i) - margin + x[2*i + 1] = int(i) + margin + + return out + + +def ohlc_flatten( + ohlc: np.ndarray, + use_mxmn: bool = True, + index_field: str = 'index', + +) -> tuple[np.ndarray, np.ndarray]: + ''' + Convert an OHLCV struct-array into a flat ready-for-line-plotting + 1-d array that is 4 times the size with x-domain values distributed + evenly (by 0.5 steps) over each index. + + ''' + index = ohlc[index_field] + + if use_mxmn: + # traces a line optimally over highs to lows + # using numba. NOTE: pretty sure this is faster + # and looks about the same as the below output. + flat, x = hl2mxmn(ohlc) + + else: + flat = rfn.structured_to_unstructured( + ohlc[['open', 'high', 'low', 'close']] + ).flatten() + + x = np.linspace( + start=index[0] - 0.5, + stop=index[-1] + 0.5, + num=len(flat), + ) + return x, flat From 2e67e98b4d0e7442f923937f175be6f72d3eeb6a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 30 Nov 2022 19:18:27 -0500 Subject: [PATCH 29/96] Go with explicit `.data._m4` mod name Since it's a notable and self-contained graphics compression algo, might as well give it a dedicated module B) --- piker/data/{_compression.py => _m4.py} | 28 +++++++++++++++----------- piker/data/_pathops.py | 5 ++--- piker/ui/_curve.py | 4 ---- 3 files changed, 18 insertions(+), 19 deletions(-) rename piker/data/{_compression.py => _m4.py} (91%) diff --git a/piker/data/_compression.py b/piker/data/_m4.py similarity index 91% rename from piker/data/_compression.py rename to piker/data/_m4.py index fde92500..26d5e0b0 100644 --- a/piker/data/_compression.py +++ b/piker/data/_m4.py @@ -15,8 +15,22 @@ # along with this program. If not, see . ''' -Graphics related downsampling routines for compressing to pixel -limits on the display device. +Graphics downsampling using the infamous M4 algorithm. + +This is one of ``piker``'s secret weapons allowing us to boss all other +charting platforms B) + +(AND DON'T YOU DARE TAKE THIS CODE WITHOUT CREDIT OR WE'LL SUE UR F#&@* ASS). + +NOTES: this method is a so called "visualization driven data +aggregation" approach. It gives error-free line chart +downsampling, see +further scientific paper resources: +- http://www.vldb.org/pvldb/vol7/p797-jugel.pdf +- http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf + +Details on implementation of this algo are based in, +https://github.com/pikers/piker/issues/109 ''' import math @@ -56,16 +70,6 @@ def ds_m4( This is more or less an OHLC style sampling of a line-style series. ''' - # NOTE: this method is a so called "visualization driven data - # aggregation" approach. It gives error-free line chart - # downsampling, see - # further scientific paper resources: - # - http://www.vldb.org/pvldb/vol7/p797-jugel.pdf - # - http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf - - # Details on implementation of this algo are based in, - # https://github.com/pikers/piker/issues/109 - # XXX: from infinite on downsampling viewable graphics: # "one thing i remembered about the binning - if you are # picking a range within your timeseries the start and end bin diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index acc02be4..144bea33 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -27,10 +27,9 @@ from numba import ( # optional, ) +# TODO: for ``numba`` typing.. # from ._source import numba_ohlc_dtype -from ._compression import ( - ds_m4, -) +from ._m4 import ds_m4 def xy_downsample( diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 8df1cda0..ad415031 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -36,10 +36,6 @@ from PyQt5.QtGui import ( ) from .._profile import pg_profile_enabled, ms_slower_then from ._style import hcolor -# from ._compression import ( -# # ohlc_to_m4_line, -# ds_m4, -# ) from ..log import get_logger from .._profile import Profiler From 031d7967de8ffde664bfe64ef3521cd07fc2eadc Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 1 Dec 2022 09:33:53 -0500 Subject: [PATCH 30/96] Facepalm: actually return latest index on time slice fail.. --- piker/data/flows.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/data/flows.py b/piker/data/flows.py index 86166c2a..f665e5a8 100644 --- a/piker/data/flows.py +++ b/piker/data/flows.py @@ -232,7 +232,7 @@ class Flume(Struct): return array['index'][mask][0] # just the latest index - array['index'][-1] + return array['index'][-1] def slice_from_time( self, From cca3417c57f49814f8899eb3fed9cc5feb56a959 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 1 Dec 2022 19:37:31 -0500 Subject: [PATCH 31/96] Facepalm: put graphics cycle in `do_ds: bool` block.. --- piker/ui/_chart.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index aa068d74..4dfcdc80 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1112,11 +1112,7 @@ class ChartPlotWidget(pg.PlotWidget): if do_ds: self.view.maybe_downsample_graphics() view._set_yrange() - - try: self.linked.graphics_cycle() - except IndexError: - pass def increment_view( self, From e4a0d4ecea13ff23b1f07a5e46357574e383d15a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 2 Dec 2022 11:20:17 -0500 Subject: [PATCH 32/96] Markup OHLC->path gen with `numba` issue # --- piker/data/_pathops.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 144bea33..23df989d 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -89,6 +89,8 @@ def path_arrays_from_ohlc( data: np.ndarray, start: int64, bar_gap: float64 = 0.43, + + # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 # index_field: str, ) -> tuple[ @@ -115,17 +117,18 @@ def path_arrays_from_ohlc( # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991 for i, q in enumerate(data[start:], start): - # TODO: ask numba why this doesn't work.. - # open, high, low, close, index = q[ - # ['open', 'high', 'low', 'close', 'index']] - open = q['open'] high = q['high'] low = q['low'] close = q['close'] - # index = float64(q[index_field]) index = float64(q['index']) + # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 + # index = float64(q[index_field]) + # AND this (probably) + # open, high, low, close, index = q[ + # ['open', 'high', 'low', 'close', 'index']] + istart = i * 6 istop = istart + 6 From bf88b40a50d097aa620df9005d229c52045147bc Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 2 Dec 2022 18:49:12 -0500 Subject: [PATCH 33/96] Facepalm**2: fix array-read-slice, like actually.. We need to subtract the first index in the array segment read, not the first index value in the time-sliced output, to get the correct offset into the non-absolute (`ShmArray.array` read) array.. Further we **do** need the `&` between the advance indexing conditions and this adds profiling to see that it is indeed real slow (like 20ms ish even when using `np.where()`). --- piker/data/flows.py | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/piker/data/flows.py b/piker/data/flows.py index f665e5a8..7a07fe38 100644 --- a/piker/data/flows.py +++ b/piker/data/flows.py @@ -48,6 +48,10 @@ from ._sharedmem import ( from ._sampling import ( open_sample_stream, ) +from .._profile import ( + Profiler, + pg_profile_enabled, +) if TYPE_CHECKING: from pyqtgraph import PlotItem @@ -251,6 +255,13 @@ class Flume(Struct): for the caller to use to slice the input array if needed. ''' + profiler = Profiler( + msg='Flume.slice_from_time()', + disabled=not pg_profile_enabled(), + ms_threshold=4, + # ms_threshold=ms_slower_then, + ) + times = arr['time'] index = arr['index'] @@ -272,12 +283,12 @@ class Flume(Struct): # use advanced indexing to map the # time range to the index range. - mask: np.ndarray = ( + mask: np.ndarray = np.where( (times >= start_t) - | # fml, i guess it's not an `&` ?? + & (times < stop_t) ) - + profiler('advanced indexing slice') # TODO: if we can ensure each time field has a uniform # step we can instead do some arithmetic to determine # the equivalent index like we used to? @@ -289,6 +300,8 @@ class Flume(Struct): i_by_t = index[mask] try: i_0 = i_by_t[0] + i_last = i_by_t[-1] + i_first_read = index[0] except IndexError: if ( start_t < times[0] @@ -306,17 +319,20 @@ class Flume(Struct): None, ) - abs_slc = slice( - i_0, - i_by_t[-1], - ) + abs_slc = slice(i_0, i_last) + # slice data by offset from the first index # available in the passed datum set. read_slc = slice( - i_0 - index[0], - i_by_t[-1] - i_0, + i_0 - i_first_read, + i_last - i_first_read + 1, ) + profiler( + 'slicing complete' + f'{start_t} -> {abs_slc.start} | {read_slc.start}\n' + f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n' + ) # also return the readable data from the timerange return ( abs_slc, From d5844ce8ffba3fda851299cd671b2758ea860f40 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 2 Dec 2022 19:58:19 -0500 Subject: [PATCH 34/96] Delegate formatter `.index_field` to the parent `Viz` --- piker/data/_formatters.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 523875a3..173d7552 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -54,7 +54,16 @@ class IncrementalFormatter(msgspec.Struct): ''' shm: ShmArray viz: Viz - index_field: str = 'index' + + @property + def index_field(self) -> 'str': + ''' + Value (``str``) used to look up the "index series" from the + underlying source ``numpy`` struct-array; delegate directly to + the managing ``Viz``. + + ''' + return self.viz.index_field # Incrementally updated xy ndarray formatted data, a pre-1d # format which is updated and cached independently of the final From a4392696a1d2ff1d9920cb404eb7977809e8e581 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 2 Dec 2022 20:05:49 -0500 Subject: [PATCH 35/96] Drop `index_field` input to renders, add `.read()` profiling --- piker/ui/_render.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 1caa8365..5a45cfbe 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -44,10 +44,6 @@ from ..data._formatters import ( StepCurveFmtr, # "step" curve (like for vlm) ) from ..data._pathops import xy_downsample -from .._profile import ( - pg_profile_enabled, - # ms_slower_then, -) from ._ohlc import ( BarItems, # bar_from_ohlc_row, @@ -58,7 +54,10 @@ from ._curve import ( FlattenedOHLC, ) from ..log import get_logger -from .._profile import Profiler +from .._profile import ( + Profiler, + pg_profile_enabled, +) log = get_logger(__name__) @@ -102,7 +101,7 @@ def render_baritems( fmtr=OHLCBarsFmtr( shm=viz.shm, viz=viz, - index_field=viz.index_field, + # index_field=viz.index_field, ), ) @@ -111,7 +110,7 @@ def render_baritems( fmtr=OHLCBarsAsCurveFmtr( shm=viz.shm, viz=viz, - index_field=viz.index_field, + # index_field=viz.index_field, ), ) @@ -192,6 +191,7 @@ def render_baritems( curve.hide() bars.show() bars.update() + # breakpoint() return ( graphics, @@ -401,6 +401,7 @@ class Viz(msgspec.Struct): # , frozen=True): self, array_field: Optional[str] = None, index_field: str | None = None, + profiler: None | Profiler = None, ) -> tuple[ int, int, np.ndarray, @@ -418,6 +419,9 @@ class Viz(msgspec.Struct): # , frozen=True): # readable data array = self.shm.array + if profiler: + profiler('self.shm.array READ') + ( ifirst, l, @@ -429,6 +433,9 @@ class Viz(msgspec.Struct): # , frozen=True): # if rbar < lbar: # breakpoint() + if profiler: + profiler('self.datums_range()') + abs_slc = slice(ifirst, ilast) # TODO: support time slicing @@ -454,6 +461,12 @@ class Viz(msgspec.Struct): # , frozen=True): # f'start/stop: {lbar},{rbar}\n', # f'diff: {diff}\n', # ) + if profiler: + profiler( + '`Flume.slice_from_time(' + f'start_t={lbar}' + f'stop_t={rbar})' + ) # array-index slicing # TODO: can we do time based indexing using arithmetic presuming @@ -470,6 +483,8 @@ class Viz(msgspec.Struct): # , frozen=True): # XXX: same as ^ # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] + if profiler: + profiler('index arithmetic for slicing') if array_field: array = array[array_field] @@ -513,7 +528,7 @@ class Viz(msgspec.Struct): # , frozen=True): read = ( xfirst, xlast, src_array, ivl, ivr, in_view, - ) = self.read() + ) = self.read(profiler=profiler) profiler('read src shm data') @@ -562,7 +577,6 @@ class Viz(msgspec.Struct): # , frozen=True): fmtr=StepCurveFmtr( shm=self.shm, viz=self, - index_field=self.index_field, ), ) From a33f58a61aac5e49748af21b9f8949430adeef9c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 2 Dec 2022 20:13:17 -0500 Subject: [PATCH 36/96] Move `Flume.slice_from_time()` to `.data._pathops` mod func --- piker/data/_pathops.py | 108 ++++++++++++++++++++++++++++++++++++++++- piker/data/flows.py | 105 +-------------------------------------- piker/ui/_chart.py | 3 +- piker/ui/_render.py | 13 ++--- 4 files changed, 118 insertions(+), 111 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 23df989d..91169e73 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -30,6 +30,10 @@ from numba import ( # TODO: for ``numba`` typing.. # from ._source import numba_ohlc_dtype from ._m4 import ds_m4 +from .._profile import ( + Profiler, + pg_profile_enabled, +) def xy_downsample( @@ -121,7 +125,7 @@ def path_arrays_from_ohlc( high = q['high'] low = q['low'] close = q['close'] - index = float64(q['index']) + index = float64(q['time']) # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 # index = float64(q[index_field]) @@ -263,3 +267,105 @@ def ohlc_flatten( num=len(flat), ) return x, flat + + +def slice_from_time( + arr: np.ndarray, + start_t: float, + stop_t: float, + +) -> tuple[ + slice, + slice, + np.ndarray | None, +]: + ''' + Slice an input struct array to a time range and return the absolute + and "readable" slices for that array as well as the indexing mask + for the caller to use to slice the input array if needed. + + ''' + profiler = Profiler( + msg='slice_from_time()', + disabled=not pg_profile_enabled(), + ms_threshold=4, + # ms_threshold=ms_slower_then, + ) + + times = arr['time'] + index = arr['index'] + + if ( + start_t < 0 + or start_t >= stop_t + ): + return ( + slice( + index[0], + index[-1], + ), + slice( + 0, + len(arr), + ), + None, + ) + + # use advanced indexing to map the + # time range to the index range. + mask: np.ndarray = np.where( + (times >= start_t) + & + (times < stop_t) + ) + profiler('advanced indexing slice') + # TODO: if we can ensure each time field has a uniform + # step we can instead do some arithmetic to determine + # the equivalent index like we used to? + # return array[ + # lbar - ifirst: + # (rbar - ifirst) + 1 + # ] + + i_by_t = index[mask] + try: + i_0 = i_by_t[0] + i_last = i_by_t[-1] + i_first_read = index[0] + except IndexError: + if ( + start_t < times[0] + or stop_t >= times[-1] + ): + return ( + slice( + index[0], + index[-1], + ), + slice( + 0, + len(arr), + ), + None, + ) + + abs_slc = slice(i_0, i_last) + + # slice data by offset from the first index + # available in the passed datum set. + read_slc = slice( + i_0 - i_first_read, + i_last - i_first_read + 1, + ) + + profiler( + 'slicing complete' + f'{start_t} -> {abs_slc.start} | {read_slc.start}\n' + f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n' + ) + # also return the readable data from the timerange + return ( + abs_slc, + read_slc, + mask, + ) diff --git a/piker/data/flows.py b/piker/data/flows.py index 7a07fe38..cf3a028a 100644 --- a/piker/data/flows.py +++ b/piker/data/flows.py @@ -48,6 +48,7 @@ from ._sharedmem import ( from ._sampling import ( open_sample_stream, ) +from ._pathops import slice_from_time from .._profile import ( Profiler, pg_profile_enabled, @@ -238,108 +239,6 @@ class Flume(Struct): # just the latest index return array['index'][-1] - def slice_from_time( - self, - arr: np.ndarray, - start_t: float, - stop_t: float, - - ) -> tuple[ - slice, - slice, - np.ndarray | None, - ]: - ''' - Slice an input struct array to a time range and return the absolute - and "readable" slices for that array as well as the indexing mask - for the caller to use to slice the input array if needed. - - ''' - profiler = Profiler( - msg='Flume.slice_from_time()', - disabled=not pg_profile_enabled(), - ms_threshold=4, - # ms_threshold=ms_slower_then, - ) - - times = arr['time'] - index = arr['index'] - - if ( - start_t < 0 - or start_t >= stop_t - ): - return ( - slice( - index[0], - index[-1], - ), - slice( - 0, - len(arr), - ), - None, - ) - - # use advanced indexing to map the - # time range to the index range. - mask: np.ndarray = np.where( - (times >= start_t) - & - (times < stop_t) - ) - profiler('advanced indexing slice') - # TODO: if we can ensure each time field has a uniform - # step we can instead do some arithmetic to determine - # the equivalent index like we used to? - # return array[ - # lbar - ifirst: - # (rbar - ifirst) + 1 - # ] - - i_by_t = index[mask] - try: - i_0 = i_by_t[0] - i_last = i_by_t[-1] - i_first_read = index[0] - except IndexError: - if ( - start_t < times[0] - or stop_t >= times[-1] - ): - return ( - slice( - index[0], - index[-1], - ), - slice( - 0, - len(arr), - ), - None, - ) - - abs_slc = slice(i_0, i_last) - - # slice data by offset from the first index - # available in the passed datum set. - read_slc = slice( - i_0 - i_first_read, - i_last - i_first_read + 1, - ) - - profiler( - 'slicing complete' - f'{start_t} -> {abs_slc.start} | {read_slc.start}\n' - f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n' - ) - # also return the readable data from the timerange - return ( - abs_slc, - read_slc, - mask, - ) - # TODO: maybe move this our ``Viz`` type to avoid # the shm lookup discrepancy? def view_data( @@ -366,7 +265,7 @@ class Flume(Struct): abs_slc, read_slc, mask, - ) = self.slice_from_time( + ) = slice_from_time( arr, start_t=vr.left(), stop_t=vr.right(), diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 4dfcdc80..3e661e51 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -66,6 +66,7 @@ from ..data.feed import ( Feed, Flume, ) +from ..data._pathops import slice_from_time from ..data._source import Symbol from ..log import get_logger from ._interaction import ChartView @@ -1037,7 +1038,7 @@ class ChartPlotWidget(pg.PlotWidget): read_slc, mask, - ) = viz.flume.slice_from_time( + ) = slice_from_time( array, start_t=vtl, stop_t=vtr, diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 5a45cfbe..ab07237e 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -43,7 +43,10 @@ from ..data._formatters import ( OHLCBarsAsCurveFmtr, # OHLC converted to line StepCurveFmtr, # "step" curve (like for vlm) ) -from ..data._pathops import xy_downsample +from ..data._pathops import ( + xy_downsample, + slice_from_time, +) from ._ohlc import ( BarItems, # bar_from_ohlc_row, @@ -101,7 +104,6 @@ def render_baritems( fmtr=OHLCBarsFmtr( shm=viz.shm, viz=viz, - # index_field=viz.index_field, ), ) @@ -110,7 +112,6 @@ def render_baritems( fmtr=OHLCBarsAsCurveFmtr( shm=viz.shm, viz=viz, - # index_field=viz.index_field, ), ) @@ -291,7 +292,7 @@ class Viz(msgspec.Struct): # , frozen=True): abs_slc, read_slc, mask, - ) = self.flume.slice_from_time( + ) = slice_from_time( arr, start_t=lbar, stop_t=rbar, @@ -444,7 +445,7 @@ class Viz(msgspec.Struct): # , frozen=True): abs_slc, read_slc, mask, - ) = self.flume.slice_from_time( + ) = slice_from_time( array, start_t=lbar, stop_t=rbar, @@ -463,7 +464,7 @@ class Viz(msgspec.Struct): # , frozen=True): # ) if profiler: profiler( - '`Flume.slice_from_time(' + '`slice_from_time(' f'start_t={lbar}' f'stop_t={rbar})' ) From 58b36db2e56b84a4591d24ec32cffd21cb56a333 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 2 Dec 2022 21:04:44 -0500 Subject: [PATCH 37/96] Use step size to determine last datum bar gap --- piker/ui/_ohlc.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index 98ffcb85..9a37bbfa 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -51,7 +51,7 @@ log = get_logger(__name__) def bar_from_ohlc_row( row: np.ndarray, # 0.5 is no overlap between arms, 1.0 is full overlap - w: float = 0.43 + bar_gap: float = 0.43 ) -> tuple[QLineF]: ''' @@ -59,8 +59,7 @@ def bar_from_ohlc_row( OHLC "bar" for use in the "last datum" of a series. ''' - open, high, low, close, index = row #[fields] - # ['open', 'high', 'low', 'close', 'index']] + open, high, low, close, index = row # TODO: maybe consider using `QGraphicsLineItem` ?? # gives us a ``.boundingRect()`` on the objects which may make @@ -81,10 +80,10 @@ def bar_from_ohlc_row( # the index's range according to the view mapping coordinates. # open line - o = QLineF(index - w, open, index, open) + o = QLineF(index - bar_gap, open, index, open) # close line - c = QLineF(index, close, index + w, close) + c = QLineF(index, close, index + bar_gap, close) return [hl, o, c] @@ -239,9 +238,13 @@ class BarItems(pg.GraphicsObject): # if times[-1] - times[-2]: # breakpoint() + index = src_data[index_field] + step_size = index[-1] - index[-2] + # generate new lines objects for updatable "current bar" self._last_bar_lines = bar_from_ohlc_row( last_row, + bar_gap=step_size * 0.43 # fields, ) From a3844f9922b9bdb835571b875f4698999ae75435 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 2 Dec 2022 21:05:24 -0500 Subject: [PATCH 38/96] Use step size to determine bar gaps --- piker/data/_formatters.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 173d7552..5d1d436b 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -99,6 +99,16 @@ class IncrementalFormatter(msgspec.Struct): _last_vr: tuple[float, float] | None = None _last_ivdr: tuple[float, float] | None = None + _index_step_size: float = None + + @property + def index_step_size(self) -> float: + ''' + Readonly value computed on first ``.diff()`` call. + + ''' + return self._index_step_size + def __repr__(self) -> str: msg = ( f'{type(self)}: ->\n\n' @@ -172,6 +182,9 @@ class IncrementalFormatter(msgspec.Struct): # set us in a zero-to-append state nd_stop = self.xy_nd_stop = src_stop + align_index = array[self.index_field] + self._index_step_size = align_index[-1] - align_index[-2] + # compute the length diffs between the first/last index entry in # the input data and the last indexes we have on record from the # last time we updated the curve index. @@ -559,8 +572,7 @@ class OHLCBarsFmtr(IncrementalFormatter): x, y, c = path_arrays_from_ohlc( array, start, - # self.index_field, - bar_gap=w, + bar_gap=w * self.index_step_size, ) return x, y, c From 6ca8334253c68ff62f53a3d72e701bfa28de2fb9 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 2 Dec 2022 21:07:42 -0500 Subject: [PATCH 39/96] Use index (time) step to calc OHLC bar/line uppx threshold --- piker/ui/_render.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index ab07237e..9fb1592e 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -138,7 +138,7 @@ def render_baritems( # - if we're **not** downsampling then we simply want to # render the bars graphics curve and update.. # - if instead we are in a downsamplig state then we to - x_gt = 6 + x_gt = 6 * (r.fmtr.index_step_size or 1) uppx = curve.x_uppx() # print(f'BARS UPPX: {uppx}') in_line = should_line = curve.isVisible() From 46808fbb89230978aca93302e94e3b4a12c1fb51 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 3 Dec 2022 15:36:13 -0500 Subject: [PATCH 40/96] Rewrite `slice_from_time()` using `numba` Gives approx a 3-4x speedup using plain old iterate-with-for-loop style though still not really happy with this .5 to 1 ms latency.. Move the core `@njit` part to a `_slice_from_time()` with a pure python func with orig name around it. Also, drop the output `mask` array since we can generally just use the slices in the caller to accomplish the same input array slicing, duh.. --- piker/data/_pathops.py | 166 +++++++++++++++++++++++------------------ 1 file changed, 92 insertions(+), 74 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 91169e73..3e4c7834 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -33,6 +33,7 @@ from ._m4 import ds_m4 from .._profile import ( Profiler, pg_profile_enabled, + ms_slower_then, ) @@ -269,6 +270,87 @@ def ohlc_flatten( return x, flat +@njit +def _slice_from_time( + arr: np.ndarray, + start_t: float, + stop_t: float, + +) -> tuple[ + tuple[int, int], + tuple[int, int], + np.ndarray | None, +]: + ''' + Slice an input struct array to a time range and return the absolute + and "readable" slices for that array as well as the indexing mask + for the caller to use to slice the input array if needed. + + ''' + times = arr['time'] + index = arr['index'] + + if ( + start_t < 0 + or start_t >= stop_t + ): + return ( + ( + index[0], + index[-1], + ), + ( + 0, + len(arr), + ), + ) + + # TODO: if we can ensure each time field has a uniform + # step we can instead do some arithmetic to determine + # the equivalent index like we used to? + # return array[ + # lbar - ifirst: + # (rbar - ifirst) + 1 + # ] + + read_i_0: int = 0 + read_i_last: int = 0 + + for i in range(times.shape[0]): + time = times[i] + if time >= start_t: + read_i_0 = i + break + + for i in range(read_i_0, times.shape[0]): + time = times[i] + if time > stop_t: + read_i_last = time + break + + abs_i_0 = int(index[0]) + read_i_0 + abs_i_last = int(index[0]) + read_i_last + + if read_i_last == 0: + read_i_last = times.shape[0] + + abs_slc = ( + int(abs_i_0), + int(abs_i_last), + ) + + read_slc = ( + int(read_i_0), + int(read_i_last), + ) + + # also return the readable data from the timerange + return ( + abs_slc, + read_slc, + ) + + def slice_from_time( arr: np.ndarray, start_t: float, @@ -279,93 +361,29 @@ def slice_from_time( slice, np.ndarray | None, ]: - ''' - Slice an input struct array to a time range and return the absolute - and "readable" slices for that array as well as the indexing mask - for the caller to use to slice the input array if needed. - - ''' profiler = Profiler( msg='slice_from_time()', disabled=not pg_profile_enabled(), - ms_threshold=4, - # ms_threshold=ms_slower_then, + ms_threshold=ms_slower_then, ) - times = arr['time'] - index = arr['index'] - - if ( - start_t < 0 - or start_t >= stop_t - ): - return ( - slice( - index[0], - index[-1], - ), - slice( - 0, - len(arr), - ), - None, - ) - - # use advanced indexing to map the - # time range to the index range. - mask: np.ndarray = np.where( - (times >= start_t) - & - (times < stop_t) - ) - profiler('advanced indexing slice') - # TODO: if we can ensure each time field has a uniform - # step we can instead do some arithmetic to determine - # the equivalent index like we used to? - # return array[ - # lbar - ifirst: - # (rbar - ifirst) + 1 - # ] - - i_by_t = index[mask] - try: - i_0 = i_by_t[0] - i_last = i_by_t[-1] - i_first_read = index[0] - except IndexError: - if ( - start_t < times[0] - or stop_t >= times[-1] - ): - return ( - slice( - index[0], - index[-1], - ), - slice( - 0, - len(arr), - ), - None, - ) - - abs_slc = slice(i_0, i_last) - - # slice data by offset from the first index - # available in the passed datum set. - read_slc = slice( - i_0 - i_first_read, - i_last - i_first_read + 1, + ( + abs_slc_tuple, + read_slc_tuple, + ) = _slice_from_time( + arr, + start_t, + stop_t, ) + abs_slc = slice(*abs_slc_tuple) + read_slc = slice(*read_slc_tuple) profiler( 'slicing complete' f'{start_t} -> {abs_slc.start} | {read_slc.start}\n' f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n' ) - # also return the readable data from the timerange return ( abs_slc, read_slc, - mask, ) From 12857a258bbd855a30ed51c6414ca435ad82ed8c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 3 Dec 2022 16:58:26 -0500 Subject: [PATCH 41/96] Adjust all `slice_from_time()` calls to not expect mask --- piker/data/_formatters.py | 6 +++++- piker/data/_pathops.py | 2 +- piker/ui/_chart.py | 7 +------ piker/ui/_render.py | 4 +--- 4 files changed, 8 insertions(+), 11 deletions(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 5d1d436b..7ffe8872 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -183,7 +183,11 @@ class IncrementalFormatter(msgspec.Struct): nd_stop = self.xy_nd_stop = src_stop align_index = array[self.index_field] - self._index_step_size = align_index[-1] - align_index[-2] + self._index_step_size = ( + align_index[-1] + - + align_index[-2] + ) # compute the length diffs between the first/last index entry in # the input data and the last indexes we have on record from the diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 3e4c7834..a489dcc3 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -126,7 +126,7 @@ def path_arrays_from_ohlc( high = q['high'] low = q['low'] close = q['close'] - index = float64(q['time']) + index = float64(q['index']) # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 # index = float64(q[index_field]) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 3e661e51..b7be583f 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1036,17 +1036,12 @@ class ChartPlotWidget(pg.PlotWidget): ( abs_slc, read_slc, - mask, - ) = slice_from_time( array, start_t=vtl, stop_t=vtr, ) - iv_arr = array - if mask is not None: - iv_arr = array[mask] - + iv_arr = array[read_slc] index = iv_arr['time'] else: diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 9fb1592e..4383cd00 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -291,13 +291,12 @@ class Viz(msgspec.Struct): # , frozen=True): ( abs_slc, read_slc, - mask, ) = slice_from_time( arr, start_t=lbar, stop_t=rbar, ) - slice_view = arr[mask] + slice_view = arr[read_slc] else: ifirst = arr[0]['index'] @@ -444,7 +443,6 @@ class Viz(msgspec.Struct): # , frozen=True): ( abs_slc, read_slc, - mask, ) = slice_from_time( array, start_t=lbar, From 0bdb7261d169420d48846e1c4580956e42fba52e Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 4 Dec 2022 17:01:22 -0500 Subject: [PATCH 42/96] Flip over to epoch-time based x-domain indexing --- piker/data/_pathops.py | 6 +++--- piker/ui/_render.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index a489dcc3..41dd6ad4 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -126,7 +126,7 @@ def path_arrays_from_ohlc( high = q['high'] low = q['low'] close = q['close'] - index = float64(q['index']) + index = float64(q['time']) # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 # index = float64(q[index_field]) @@ -380,8 +380,8 @@ def slice_from_time( read_slc = slice(*read_slc_tuple) profiler( 'slicing complete' - f'{start_t} -> {abs_slc.start} | {read_slc.start}\n' - f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n' + # f'{start_t} -> {abs_slc.start} | {read_slc.start}\n' + # f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n' ) return ( abs_slc, diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 4383cd00..56157dd9 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -231,7 +231,7 @@ class Viz(msgspec.Struct): # , frozen=True): is_ohlc: bool = False render: bool = True # toggle for display loop - _index_field: str = 'index' + _index_field: str = 'time' # downsampling state _last_uppx: float = 0 From bb84715bf03cf9e280fd88091e133f02865f30d7 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 5 Dec 2022 10:07:12 -0500 Subject: [PATCH 43/96] Make `.default_view()` time step aware When we use an epoch index and any sample rate > 1s we need to scale the "number of bars" to that step in order to place the view correctly in x-domain terms. For now we're calcing the step in-method but likely, longer run, we'll pull this from elsewhere (like a ``Viz`` attr). --- piker/ui/_chart.py | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index b7be583f..492d420d 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1026,20 +1026,13 @@ class ChartPlotWidget(pg.PlotWidget): index_field = viz.index_field if index_field == 'time': - vr = viz.plot.viewRect() - vtl, vtr = vr.left(), vr.right() - - if vtl < datum_start: - vtl = datum_start - vtr = datum_stop - ( abs_slc, read_slc, ) = slice_from_time( array, - start_t=vtl, - stop_t=vtr, + start_t=l, + stop_t=r, ) iv_arr = array[read_slc] index = iv_arr['time'] @@ -1049,16 +1042,20 @@ class ChartPlotWidget(pg.PlotWidget): # these must be array-index-ints (hence the slice from time # above). - x_start, x_stop = index[0], index[-1] + x_stop = index[-1] view: ChartView = viz.plot.vb + times = viz.shm.array['time'] + step = times[-1] - times[-2] + if ( datum_stop < 0 - or l < x_start + or r < datum_start + or l > datum_stop or l < 0 or (datum_stop - datum_start) < 6 ): - begin = x_stop - bars_from_y + begin = x_stop - (bars_from_y * step) view.setXRange( min=begin, max=x_stop, @@ -1067,6 +1064,12 @@ class ChartPlotWidget(pg.PlotWidget): # re-get range l, datum_start, datum_stop, r = viz.bars_range() + # print( + # f'l: {l}\n' + # f'datum_start: {datum_start}\n' + # f'datum_stop: {datum_stop}\n\n' + # f'r: {r}\n' + # ) # we get the L1 spread label "length" in view coords # terms now that we've scaled either by user control # or to the default set of bars as per the immediate block @@ -1085,15 +1088,15 @@ class ChartPlotWidget(pg.PlotWidget): ) else: - end = x_stop + y_offset + 1 + end = x_stop + (y_offset * step) + 1 begin = end - (r - l) debug_msg += ( - f'end: {end}\n' f'begin: {begin}\n' + f'end: {end}\n' ) - print(debug_msg) + # print(debug_msg) # remove any custom user yrange setttings if self._static_yrange == 'axis': From f2c0987a043e2de0c4166420d8136ba8b673e910 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 6 Dec 2022 15:32:13 -0500 Subject: [PATCH 44/96] Use uniform step arithmetic in `slice_from_time()` If we presume that time indexing using a uniform step we can calculate the exact index (using `//`) for the input time presuming the data set has zero gaps. This gives a massive speedup over `numpy` fancy indexing and (naive) `numba` iteration. Further in the case where time gaps are detected, we can use `numpy.searchsorted()` to binary search for the nearest expected index at lower latency. Deatz, - comment-disable the call to the naive `numba` scan impl. - add a optional `step: int` input (calced if not provided). - add todos for caching binary search results in the gap detection cases. - drop returning the "absolute buffer indexing" slice since the caller can always just use the read-relative slice to acquire it. --- piker/data/_pathops.py | 183 ++++++++++++++++++++++++++++++----------- 1 file changed, 135 insertions(+), 48 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 41dd6ad4..d83752c3 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -29,6 +29,7 @@ from numba import ( # TODO: for ``numba`` typing.. # from ._source import numba_ohlc_dtype +from ._sharedmem import ShmArray from ._m4 import ds_m4 from .._profile import ( Profiler, @@ -126,6 +127,7 @@ def path_arrays_from_ohlc( high = q['high'] low = q['low'] close = q['close'] + # index = float64(q['index']) index = float64(q['time']) # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 @@ -276,11 +278,7 @@ def _slice_from_time( start_t: float, stop_t: float, -) -> tuple[ - tuple[int, int], - tuple[int, int], - np.ndarray | None, -]: +) -> tuple[int, int]: ''' Slice an input struct array to a time range and return the absolute and "readable" slices for that array as well as the indexing mask @@ -305,14 +303,6 @@ def _slice_from_time( ), ) - # TODO: if we can ensure each time field has a uniform - # step we can instead do some arithmetic to determine - # the equivalent index like we used to? - # return array[ - # lbar - ifirst: - # (rbar - ifirst) + 1 - # ] - read_i_0: int = 0 read_i_last: int = 0 @@ -328,62 +318,159 @@ def _slice_from_time( read_i_last = time break - abs_i_0 = int(index[0]) + read_i_0 - abs_i_last = int(index[0]) + read_i_last - - if read_i_last == 0: - read_i_last = times.shape[0] - - abs_slc = ( - int(abs_i_0), - int(abs_i_last), - ) - - read_slc = ( - int(read_i_0), - int(read_i_last), - ) - - # also return the readable data from the timerange - return ( - abs_slc, - read_slc, - ) + return read_i_0, read_i_last def slice_from_time( arr: np.ndarray, start_t: float, stop_t: float, + step: int | None = None, ) -> tuple[ slice, slice, - np.ndarray | None, ]: + ''' + Calculate array indices mapped from a time range and return them in + a slice. + + Given an input array with an epoch `'time'` series entry, calculate + the indices which span the time range and return in a slice. Presume + each `'time'` step increment is uniform and when the time stamp + series contains gaps (the uniform presumption is untrue) use + ``np.searchsorted()`` binary search to look up the appropriate + index. + + ''' profiler = Profiler( msg='slice_from_time()', disabled=not pg_profile_enabled(), ms_threshold=ms_slower_then, ) - ( - abs_slc_tuple, - read_slc_tuple, - ) = _slice_from_time( - arr, - start_t, - stop_t, + times = arr['time'] + t_first = round(times[0]) + t_last = round(times[-1]) + + index = arr['index'] + i_first = index[0] + read_i_max = arr.shape[0] + + if ( + start_t < t_first + and stop_t > t_last + ): + read_i_start = 0 + read_i_stop = read_i_max + read_slc = slice( + 0, + read_i_max, + ) + return read_slc + + if step is None: + step = round(times[-1] - times[-2]) + if step == 0: + # XXX: HOW TF is this happening? + step = 1 + + # compute (presumed) uniform-time-step index offsets + i_start_t = round(start_t) + read_i_start = (i_start_t - t_first) // step + + i_stop_t = round(stop_t) + read_i_stop = (i_stop_t - t_first) // step + + # always clip outputs to array support + # for read start: + # - never allow a start < the 0 index + # - never allow an end index > the read array len + read_i_start = min( + max(0, read_i_start), + read_i_max, + ) + read_i_stop = max( + 0, + min(read_i_stop, read_i_max), + ) + + # check for larger-then-latest calculated index for given start + # time, in which case we do a binary search for the correct index. + # NOTE: this is usually the result of a time series with time gaps + # where it is expected that each index step maps to a uniform step + # in the time stamp series. + i_iv_start = index[read_i_start - 1] + t_iv_start = times[read_i_start - 1] + if ( + i_iv_start >= i_first + and t_iv_start > i_start_t + ): + # do a binary search for the best index mapping to ``start_t`` + # given we measured an overshoot using the uniform-time-step + # calculation from above. + + # TODO: once we start caching these per source-array, + # we can just overwrite ``read_i_start`` directly. + new_read_i_start = np.searchsorted( + times, + i_start_t, + side='left', + ) + + # TODO: minimize binary search work as much as possible: + # - cache these remap values which compensate for gaps in the + # uniform time step basis where we calc a later start + # index for the given input ``start_t``. + # - can we shorten the input search sequence by heuristic? + # up_to_arith_start = index[:read_i_start] + + if ( + new_read_i_start < read_i_start + ): + # t_diff = t_iv_start - start_t + # print( + # f"WE'RE CUTTING OUT TIME - STEP:{step}\n" + # f'start_t:{start_t} -> 0index start_t:{t_iv_start}\n' + # f'diff: {t_diff}\n' + # f'REMAPPED START i: {read_i_start} -> {new_read_i_start}\n' + # ) + read_i_start = new_read_i_start + + # old much slower non-bin-search ``numba`` approach.. + # ( + # read_i_start, + # read_i_stop, + # ) = _slice_from_time( + # arr, + # start_t, + # stop_t, + # ) + # abs_i_start = int(index[0]) + read_i_0 + # abs_i_stop = int(index[0]) + read_i_last + # if read_i_stop == 0: + # read_i_stop = times.shape[0] + + # read-relative indexes: gives a slice where `shm.array[read_slc]` + # will be the data spanning the input time range `start_t` -> + # `stop_t` + read_slc = slice( + int(read_i_start), + int(read_i_stop), ) - abs_slc = slice(*abs_slc_tuple) - read_slc = slice(*read_slc_tuple) profiler( 'slicing complete' # f'{start_t} -> {abs_slc.start} | {read_slc.start}\n' # f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n' ) - return ( - abs_slc, - read_slc, - ) + + # NOTE: if caller needs absolute buffer indices they can + # slice the buffer abs index like so: + # abs_indx = index[read_slc] + # abs_slc = slice( + # int(abs_indx[0]), + # int(abs_indx[-1]), + # ) + + return read_slc From 2669ced629c7fc543752597014809dc57c6347de Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 6 Dec 2022 15:43:44 -0500 Subject: [PATCH 45/96] Drop `_slice_from_time()` --- piker/data/_pathops.py | 63 ------------------------------------------ 1 file changed, 63 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index d83752c3..a56b95da 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -272,55 +272,6 @@ def ohlc_flatten( return x, flat -@njit -def _slice_from_time( - arr: np.ndarray, - start_t: float, - stop_t: float, - -) -> tuple[int, int]: - ''' - Slice an input struct array to a time range and return the absolute - and "readable" slices for that array as well as the indexing mask - for the caller to use to slice the input array if needed. - - ''' - times = arr['time'] - index = arr['index'] - - if ( - start_t < 0 - or start_t >= stop_t - ): - return ( - ( - index[0], - index[-1], - ), - ( - 0, - len(arr), - ), - ) - - read_i_0: int = 0 - read_i_last: int = 0 - - for i in range(times.shape[0]): - time = times[i] - if time >= start_t: - read_i_0 = i - break - - for i in range(read_i_0, times.shape[0]): - time = times[i] - if time > stop_t: - read_i_last = time - break - - return read_i_0, read_i_last - - def slice_from_time( arr: np.ndarray, start_t: float, @@ -437,20 +388,6 @@ def slice_from_time( # ) read_i_start = new_read_i_start - # old much slower non-bin-search ``numba`` approach.. - # ( - # read_i_start, - # read_i_stop, - # ) = _slice_from_time( - # arr, - # start_t, - # stop_t, - # ) - # abs_i_start = int(index[0]) + read_i_0 - # abs_i_stop = int(index[0]) + read_i_last - # if read_i_stop == 0: - # read_i_stop = times.shape[0] - # read-relative indexes: gives a slice where `shm.array[read_slc]` # will be the data spanning the input time range `start_t` -> # `stop_t` From d649a7d1fa45d6ef9328b432b79f347d8b421d39 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 6 Dec 2022 15:44:14 -0500 Subject: [PATCH 46/96] Drop old breakpoint --- piker/data/_m4.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/piker/data/_m4.py b/piker/data/_m4.py index 26d5e0b0..f75d3209 100644 --- a/piker/data/_m4.py +++ b/piker/data/_m4.py @@ -90,8 +90,6 @@ def ds_m4( if xrange is None: x_end = x[-1] # x end value/highest in domain xrange = (x_end - x_start) - if xrange < 0: - breakpoint() # XXX: always round up on the input pixels # lnx = len(x) From 98438e29ef19b808076344e0c9ad79677388058c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 6 Dec 2022 15:44:32 -0500 Subject: [PATCH 47/96] Drop `Flume.view_data()` --- piker/data/flows.py | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/piker/data/flows.py b/piker/data/flows.py index cf3a028a..f1b8eabf 100644 --- a/piker/data/flows.py +++ b/piker/data/flows.py @@ -48,7 +48,6 @@ from ._sharedmem import ( from ._sampling import ( open_sample_stream, ) -from ._pathops import slice_from_time from .._profile import ( Profiler, pg_profile_enabled, @@ -238,40 +237,3 @@ class Flume(Struct): # just the latest index return array['index'][-1] - - # TODO: maybe move this our ``Viz`` type to avoid - # the shm lookup discrepancy? - def view_data( - self, - plot: PlotItem, - timeframe_s: int = 1, - - ) -> np.ndarray: - ''' - Return sliced-to-view source data along with absolute - (``ShmArray._array['index']``) and read-relative - (``ShmArray.array``) slices. - - ''' - # get far-side x-indices plot view - vr = plot.viewRect() - - if timeframe_s > 1: - arr = self.hist_shm.array - else: - arr = self.rt_shm.array - - ( - abs_slc, - read_slc, - mask, - ) = slice_from_time( - arr, - start_t=vr.left(), - stop_t=vr.right(), - ) - return ( - abs_slc, - read_slc, - arr[mask] if mask is not None else arr, - ) From e252f702534746ea96d6a0ff90b987a1f4970023 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 7 Dec 2022 12:22:07 -0500 Subject: [PATCH 48/96] Add `.x_last()` meth to flow graphics --- piker/ui/_curve.py | 9 ++++++++- piker/ui/_ohlc.py | 10 +++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index ad415031..cb696257 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -133,7 +133,7 @@ class Curve(pg.GraphicsObject): # self.last_step_pen = pg.mkPen(hcolor(color), width=2) self.last_step_pen = pg.mkPen(pen, width=2) - self._last_line = QLineF() + self._last_line: QLineF = QLineF() # flat-top style histogram-like discrete curve # self._step_mode: bool = step_mode @@ -182,6 +182,13 @@ class Curve(pg.GraphicsObject): else: return 0 + def x_last(self) -> float: + ''' + Return the last most x value of the last line segment. + + ''' + return self._last_line.x2() + def px_width(self) -> float: vb = self.getViewBox() diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index 9a37bbfa..a7e36146 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -114,12 +114,20 @@ class BarItems(pg.GraphicsObject): self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache) self.path = QPainterPath() - self._last_bar_lines: Optional[tuple[QLineF, ...]] = None + + self._last_bar_lines: tuple[QLineF, ...] | None = None def x_uppx(self) -> int: # we expect the downsample curve report this. return 0 + def x_last(self) -> float: + ''' + Return the last most x value of the close line segment. + + ''' + return self._last_bar_lines[-1].x2() + # Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect def boundingRect(self): # profiler = Profiler( From 5ab4e5493e1f3911c3dea6d0be0b471c4665b782 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 7 Dec 2022 15:58:43 -0500 Subject: [PATCH 49/96] Add gap detection for `stop_t`, though only report atm --- piker/data/_pathops.py | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index a56b95da..72405cd8 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -29,7 +29,6 @@ from numba import ( # TODO: for ``numba`` typing.. # from ._source import numba_ohlc_dtype -from ._sharedmem import ShmArray from ._m4 import ds_m4 from .._profile import ( Profiler, @@ -328,7 +327,7 @@ def slice_from_time( # compute (presumed) uniform-time-step index offsets i_start_t = round(start_t) - read_i_start = (i_start_t - t_first) // step + read_i_start = ((i_start_t - t_first) // step) - 1 i_stop_t = round(stop_t) read_i_stop = (i_stop_t - t_first) // step @@ -339,20 +338,25 @@ def slice_from_time( # - never allow an end index > the read array len read_i_start = min( max(0, read_i_start), - read_i_max, + read_i_max - 1, ) read_i_stop = max( 0, min(read_i_stop, read_i_max), ) + samples = (i_stop_t - i_start_t) // step + index_diff = read_i_stop - read_i_start + 1 + if index_diff > (samples + 3): + breakpoint() + # check for larger-then-latest calculated index for given start # time, in which case we do a binary search for the correct index. # NOTE: this is usually the result of a time series with time gaps # where it is expected that each index step maps to a uniform step # in the time stamp series. - i_iv_start = index[read_i_start - 1] - t_iv_start = times[read_i_start - 1] + i_iv_start = index[read_i_start] + t_iv_start = times[read_i_start] if ( i_iv_start >= i_first and t_iv_start > i_start_t @@ -386,7 +390,20 @@ def slice_from_time( # f'diff: {t_diff}\n' # f'REMAPPED START i: {read_i_start} -> {new_read_i_start}\n' # ) - read_i_start = new_read_i_start + read_i_start = new_read_i_start - 1 + + t_iv_stop = times[read_i_stop - 1] + if ( + i_stop_t <= t_last + and t_iv_stop < i_stop_t + ): + t_diff = stop_t - t_iv_stop + print( + f"WE'RE CUTTING OUT TIME - STEP:{step}\n" + f'calced iv stop:{t_iv_stop} -> stop_t:{stop_t}\n' + f'diff: {t_diff}\n' + # f'SHOULD REMAP STOP: {read_i_start} -> {new_read_i_start}\n' + ) # read-relative indexes: gives a slice where `shm.array[read_slc]` # will be the data spanning the input time range `start_t` -> From 50209752c3577f6a17a7c39de5a182f1ff0f0787 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 7 Dec 2022 16:31:32 -0500 Subject: [PATCH 50/96] Re-implement `.default_view()` on `Viz` Since we don't really need it defined on the "chart widget" move it to a viz method and rework it to hell: - always discard the invalid view l > r case. - use the graphic's UPPX to determine UI-to-scene coordinate scaling for the L1-label collision detection, if there is no L1 just offset by a few (index step scaled) datums; this allows us to drop the 2x x-range calls as was hacked previous. - handle no-data-in-view cases explicitly and error if we get any ostensibly impossible cases. - expect caller to trigger a graphics cycle if needed. Further support this includes a rework a slew of other important details: - add `Viz.index_step`, an idempotent computed, index (presumably uniform) step value which is needed for variable sample rate graphics displayed on an epoch (second) time index. - rework `Viz.datums_range()` to pass view x-endpoints as first and last elements in return `tuple`; tighten up snap-to-data edge case logic using `max()`/`min()` calls and better internal var naming. - adjust all calls to `slice_from_time()` to not expect an "abs" slice. - drop all `.yrange` resetting since we can just have the `Renderer` do it when necessary. --- piker/ui/_render.py | 292 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 231 insertions(+), 61 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 56157dd9..852844f9 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -25,6 +25,7 @@ incremental update. from __future__ import annotations from typing import ( Optional, + TYPE_CHECKING, ) import msgspec @@ -49,7 +50,6 @@ from ..data._pathops import ( ) from ._ohlc import ( BarItems, - # bar_from_ohlc_row, ) from ._curve import ( Curve, @@ -63,6 +63,11 @@ from .._profile import ( ) +if TYPE_CHECKING: + from ._interaction import ChartView + from ._chart import ChartPlotWidget + + log = get_logger(__name__) @@ -231,11 +236,13 @@ class Viz(msgspec.Struct): # , frozen=True): is_ohlc: bool = False render: bool = True # toggle for display loop + # _index_field: str = 'index' _index_field: str = 'time' # downsampling state _last_uppx: float = 0 _in_ds: bool = False + _index_step: float | None = None # map from uppx -> (downsampled data, incremental graphics) _src_r: Optional[Renderer] = None @@ -244,12 +251,6 @@ class Viz(msgspec.Struct): # , frozen=True): tuple[Renderer, pg.GraphicsItem], ] = (None, None) - # TODO: hackery to be able to set a shm later - # but whilst also allowing this type to hashable, - # likely will require serializable token that is used to attach - # to the underlying shm ref after startup? - # _shm: Optional[ShmArray] = None # currently, may be filled in "later" - # cache of y-range values per x-range input. _mxmns: dict[tuple[int, int], tuple[float, float]] = {} @@ -261,6 +262,17 @@ class Viz(msgspec.Struct): # , frozen=True): def index_field(self) -> str: return self._index_field + def index_step( + self, + reset: bool = False, + + ) -> float: + if self._index_step is None: + index = self.shm.array[self.index_field] + self._index_step = index[-1] - index[-2] + + return self._index_step + def maxmin( self, lbar: int, @@ -275,23 +287,35 @@ class Viz(msgspec.Struct): # , frozen=True): ''' # TODO: hash the slice instead maybe? # https://stackoverflow.com/a/29980872 - rkey = (lbar, rbar) + rkey = (round(lbar), round(rbar)) cached_result = self._mxmns.get(rkey) + do_print = 'btc' in self.name if cached_result: + + # if do_print: + # print( + # f'{self.name} CACHED maxmin\n' + # f'{rkey} -> {cached_result}' + # ) return cached_result shm = self.shm if shm is None: + breakpoint() return None arr = shm.array + # times = arr['time'] + # step = round(times[-1] - times[-2]) + # if ( + # do_print + # and step == 60 + # ): + # breakpoint() # get relative slice indexes into array if self.index_field == 'time': - ( - abs_slc, - read_slc, - ) = slice_from_time( + read_slc = slice_from_time( arr, start_t=lbar, stop_t=rbar, @@ -306,11 +330,17 @@ class Viz(msgspec.Struct): # , frozen=True): ] if not slice_view.size: + log.warning(f'{self.name} no maxmin in view?') + # breakpoint() return None elif self.yrange: mxmn = self.yrange - # print(f'{self.name} M4 maxmin: {mxmn}') + if do_print: + print( + f'{self.name} M4 maxmin:\n' + f'{rkey} -> {mxmn}' + ) else: if self.is_ohlc: @@ -323,7 +353,11 @@ class Viz(msgspec.Struct): # , frozen=True): yhigh = np.max(view) mxmn = ylow, yhigh - # print(f'{self.name} MANUAL maxmin: {mxmn}') + if do_print: + print( + f'{self.name} MANUAL ohlc={self.is_ohlc} maxmin:\n' + f'{rkey} -> {mxmn}' + ) # cache result for input range assert mxmn @@ -333,8 +367,7 @@ class Viz(msgspec.Struct): # , frozen=True): def view_range(self) -> tuple[int, int]: ''' - Return the indexes in view for the associated - plot displaying this viz's data. + Return the start and stop x-indexes for the managed ``ViewBox``. ''' vr = self.plot.viewRect() @@ -345,15 +378,18 @@ class Viz(msgspec.Struct): # , frozen=True): def bars_range(self) -> tuple[int, int, int, int]: ''' - Return a range tuple for the bars present in view. + Return a range tuple for the left-view, left-datum, right-datum + and right-view x-indices. ''' - start, l, datum_start, datum_stop, r, stop = self.datums_range() + l, start, datum_start, datum_stop, stop, r = self.datums_range() return l, datum_start, datum_stop, r def datums_range( self, + view_range: None | tuple[float, float] = None, index_field: str | None = None, + array: None | np.ndarray = None, ) -> tuple[ int, int, int, int, int, int @@ -362,39 +398,54 @@ class Viz(msgspec.Struct): # , frozen=True): Return a range tuple for the datums present in view. ''' - l, r = self.view_range() + l, r = view_range or self.view_range() index_field: str = index_field or self.index_field if index_field == 'index': l, r = round(l), round(r) - array = self.shm.array + if array is None: + array = self.shm.array + index = array[index_field] - start = index[0] - stop = index[-1] + first = round(index[0]) + last = round(index[-1]) + # first and last datums in view determined by + # l / r view range. + leftmost = round(l) + rightmost = round(r) + + # invalid view state if ( - l < 0 - or r < l - or l < start + r < l + or l < 0 + or r < 0 + or (l > last and r > last) ): - datum_start = start - datum_stop = stop + leftmost = first + rightmost = last else: - datum_start = max(l, start) - datum_stop = r - if l < stop: - datum_stop = min(r, stop) + rightmost = max( + min(last, rightmost), + first, + ) - assert datum_start < datum_stop + leftmost = min( + max(first, leftmost), + last, + rightmost - 1, + ) + + assert leftmost < rightmost return ( - start, l, # left x-in-view - datum_start, - datum_stop, + first, # first datum + leftmost, + rightmost, + last, # last_datum r, # right-x-in-view - stop, ) def read( @@ -415,6 +466,7 @@ class Viz(msgspec.Struct): # , frozen=True): ''' index_field: str = index_field or self.index_field + vr = l, r = self.view_range() # readable data array = self.shm.array @@ -423,13 +475,17 @@ class Viz(msgspec.Struct): # , frozen=True): profiler('self.shm.array READ') ( - ifirst, l, + ifirst, lbar, rbar, - r, ilast, - ) = self.datums_range(index_field=index_field) + r, + ) = self.datums_range( + view_range=vr, + index_field=index_field, + array=array, + ) # if rbar < lbar: # breakpoint() @@ -440,26 +496,22 @@ class Viz(msgspec.Struct): # , frozen=True): # TODO: support time slicing if index_field == 'time': - ( - abs_slc, - read_slc, - ) = slice_from_time( + read_slc = slice_from_time( array, start_t=lbar, stop_t=rbar, ) - in_view = array[read_slc] - # diff = rbar - lbar - # if ( - # 'btc' in self.name - # and 'hist' not in self.shm.token - # ): - # print( - # f'{self.name}: len(iv) = {len(in_view)}\n' - # f'start/stop: {lbar},{rbar}\n', - # f'diff: {diff}\n', - # ) + # TODO: maybe we should return this from the slicer call + # above? + in_view = array[read_slc] + if in_view.size: + abs_indx = in_view['index'] + abs_slc = slice( + int(abs_indx[0]), + int(abs_indx[-1]), + ) + if profiler: profiler( '`slice_from_time(' @@ -635,8 +687,6 @@ class Viz(msgspec.Struct): # , frozen=True): should_redraw = True showing_src_data = True - # reset yrange to be computed from source data - self.yrange = None # MAIN RENDER LOGIC: # - determine in view data and redraw on range change @@ -662,10 +712,6 @@ class Viz(msgspec.Struct): # , frozen=True): **rkwargs, ) - if showing_src_data: - # print(f"{self.name} SHOWING SOURCE") - # reset yrange to be computed from source data - self.yrange = None if not out: log.warning(f'{self.name} failed to render!?') @@ -678,7 +724,6 @@ class Viz(msgspec.Struct): # , frozen=True): # XXX: SUPER UGGGHHH... without this we get stale cache # graphics that don't update until you downsampler again.. - # reset = False # if reset: # with graphics.reset_cache(): # # assign output paths to graphicis obj @@ -798,6 +843,129 @@ class Viz(msgspec.Struct): # , frozen=True): ) ).length() + def default_view( + self, + bars_from_y: int = int(616 * 3/8), + y_offset: int = 0, + do_ds: bool = True, + + ) -> None: + ''' + Set the plot's viewbox to a "default" startup setting where + we try to show the underlying data range sanely. + + ''' + shm: ShmArray = self.shm + array: np.ndarray = shm.array + view: ChartView = self.plot.vb + ( + vl, + first_datum, + datum_start, + datum_stop, + last_datum, + vr, + ) = self.datums_range(array=array) + + # invalid case: view is not ordered correctly + # return and expect caller to sort it out. + if ( + vl > vr + ): + log.warning( + 'Skipping `.default_view()` viewbox not initialized..\n' + f'l -> r: {vl} -> {vr}\n' + f'datum_start -> datum_stop: {datum_start} -> {datum_stop}\n' + ) + return + + chartw: ChartPlotWidget = self.plot.getViewWidget() + index_field = self.index_field + step = self.index_step() + + if index_field == 'time': + # transform l -> r view range values into + # data index domain to determine how view + # should be reset to better match data. + read_slc = slice_from_time( + array, + start_t=vl, + stop_t=vr, + step=step, + ) + + index_iv = array[index_field][read_slc] + uppx: float = self.graphics.x_uppx() or 1 + + # l->r distance in scene units, no larger then data span + data_diff = last_datum - first_datum + rl_diff = min(vr - vl, data_diff) + + # orient by offset from the y-axis including + # space to compensate for the L1 labels. + if not y_offset: + + # we get the L1 spread label "length" in view coords and + # make sure it doesn't colide with the right-most datum in + # view. + _, l1_len = chartw.pre_l1_xs() + offset = l1_len/(uppx*step) + + # if no L1 label is present just offset by a few datums + # from the y-axis. + if chartw._max_l1_line_len == 0: + offset += 3*step + else: + offset = (y_offset * step) + uppx*step + + # align right side of view to the rightmost datum + the selected + # offset from above. + r_reset = last_datum + offset + + # no data is in view so check for the only 2 sane cases: + # - entire view is LEFT of data + # - entire view is RIGHT of data + if index_iv.size == 0: + log.warning(f'No data in view for {vl} -> {vr}') + + # 2 cases either the view is to the left or right of the + # data set. + if ( + vl <= first_datum + and vr <= first_datum + ): + l_reset = first_datum + + elif ( + vl >= last_datum + and vr >= last_datum + ): + l_reset = r_reset - rl_diff + + else: + raise RuntimeError(f'Unknown view state {vl} -> {vr}') + + else: + # maintain the l->r view distance + l_reset = r_reset - rl_diff + + # remove any custom user yrange setttings + if chartw._static_yrange == 'axis': + chartw._static_yrange = None + + view.setXRange( + min=l_reset, + max=r_reset, + padding=0, + ) + + if do_ds: + view.maybe_downsample_graphics() + view._set_yrange() + + # caller should do this! + # self.linked.graphics_cycle() + class Renderer(msgspec.Struct): @@ -959,6 +1127,8 @@ class Renderer(msgspec.Struct): fast_path = self.fast_path reset = False + self.viz.yrange = None + # redraw the entire source data if we have either of: # - no prior path graphic rendered or, # - we always intend to re-render the data only in view From d2aad74dfca819c523c988a8a894034ca4e0a2c9 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 7 Dec 2022 17:04:15 -0500 Subject: [PATCH 51/96] Delegate to `Viz.default_view()` on chart Also add a rage print to not forget about the global index tracking/diffing in the display loop we still need to change. --- piker/ui/_chart.py | 133 ++++++++++----------------------------------- 1 file changed, 28 insertions(+), 105 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 492d420d..70435ec7 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -66,7 +66,6 @@ from ..data.feed import ( Feed, Flume, ) -from ..data._pathops import slice_from_time from ..data._source import Symbol from ..log import get_logger from ._interaction import ChartView @@ -964,11 +963,16 @@ class ChartPlotWidget(pg.PlotWidget): ''' line_end, marker_right, yaxis_x = self.marker_right_points() - view = self.view - line = view.mapToView( + line = self.view.mapToView( QLineF(line_end, 0, yaxis_x, 0) ) - return line.x1(), line.length() + linex, linelen = line.x1(), line.length() + # print( + # f'line: {line}\n' + # f'linex: {linex}\n' + # f'linelen: {linelen}\n' + # ) + return linex, linelen def marker_right_points( self, @@ -990,11 +994,16 @@ class ChartPlotWidget(pg.PlotWidget): ryaxis = self.getAxis('right') r_axis_x = ryaxis.pos().x() - up_to_l1_sc = r_axis_x - l1_len - 10 - + up_to_l1_sc = r_axis_x - l1_len marker_right = up_to_l1_sc - (1.375 * 2 * marker_size) line_end = marker_right - (6/16 * marker_size) + # print( + # f'r_axis_x: {r_axis_x}\n' + # f'up_to_l1_sc: {up_to_l1_sc}\n' + # f'marker_right: {marker_right}\n' + # f'line_end: {line_end}\n' + # ) return line_end, marker_right, r_axis_x def default_view( @@ -1009,108 +1018,18 @@ class ChartPlotWidget(pg.PlotWidget): ''' viz = self.get_viz(self.name) + if not viz: log.warning(f'`Viz` for {self.name} not loaded yet?') return - ( - _, - l, - datum_start, - datum_stop, - r, - _, - ) = viz.datums_range() - - array = viz.shm.array - index_field = viz.index_field - - if index_field == 'time': - ( - abs_slc, - read_slc, - ) = slice_from_time( - array, - start_t=l, - stop_t=r, - ) - iv_arr = array[read_slc] - index = iv_arr['time'] - - else: - index = array['index'] - - # these must be array-index-ints (hence the slice from time - # above). - x_stop = index[-1] - view: ChartView = viz.plot.vb - - times = viz.shm.array['time'] - step = times[-1] - times[-2] - - if ( - datum_stop < 0 - or r < datum_start - or l > datum_stop - or l < 0 - or (datum_stop - datum_start) < 6 - ): - begin = x_stop - (bars_from_y * step) - view.setXRange( - min=begin, - max=x_stop, - padding=0, - ) - # re-get range - l, datum_start, datum_stop, r = viz.bars_range() - - # print( - # f'l: {l}\n' - # f'datum_start: {datum_start}\n' - # f'datum_stop: {datum_stop}\n\n' - # f'r: {r}\n' - # ) - # we get the L1 spread label "length" in view coords - # terms now that we've scaled either by user control - # or to the default set of bars as per the immediate block - # above. - debug_msg = ( - f'x_stop: {x_stop}\n' - ) - - if not y_offset: - marker_pos, l1_len = self.pre_l1_xs() - end = x_stop + l1_len + 1 - - debug_msg += ( - f'marker pos: {marker_pos}\n' - f'l1 len: {l1_len}\n' - ) - - else: - end = x_stop + (y_offset * step) + 1 - - begin = end - (r - l) - - debug_msg += ( - f'begin: {begin}\n' - f'end: {end}\n' - ) - # print(debug_msg) - - # remove any custom user yrange setttings - if self._static_yrange == 'axis': - self._static_yrange = None - - view.setXRange( - min=begin, - max=end, - padding=0, + viz.default_view( + bars_from_y, + y_offset, + do_ds, ) if do_ds: - self.view.maybe_downsample_graphics() - view._set_yrange() self.linked.graphics_cycle() def increment_view( @@ -1126,6 +1045,11 @@ class ChartPlotWidget(pg.PlotWidget): """ l, r = self.view_range() view = vb or self.view + if steps >= 300: + print("FUCKING FIX THE GLOBAL STEP BULLSHIT") + # breakpoint() + return + view.setXRange( min=l + steps, max=r + steps, @@ -1410,17 +1334,16 @@ class ChartPlotWidget(pg.PlotWidget): else: ( - _, l, + _, lbar, rbar, - r, _, + r, ) = bars_range or viz.datums_range() profiler(f'{self.name} got bars range') - - key = round(lbar), round(rbar) + key = lbar, rbar res = viz.maxmin(*key) if ( From 96b871c4d746004ae9893c7ff406808b21ef30ad Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 7 Dec 2022 17:08:07 -0500 Subject: [PATCH 52/96] Draw last datums on boot Ensures that a "last datum" graphics object exists so that zooming can read it using `.x_last()`. Also, disable the linked region stuff for now since it's totally borked after flipping to the time indexing. --- piker/ui/_display.py | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 86b33e1e..676b27a5 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -1204,6 +1204,13 @@ async def display_symbol_data( # sidepane=False, sidepane=godwidget.search, ) + + # ensure the last datum graphic is generated + # for zoom-interaction purposes. + hist_chart.get_viz(fqsn).draw_last( + array_key=fqsn, + only_last_uppx=True, + ) pis.setdefault(fqsn, [None, None])[1] = hist_chart.plotItem # don't show when not focussed @@ -1297,6 +1304,13 @@ async def display_symbol_data( last_bar_color=bg_last_bar_color, ) + # ensure the last datum graphic is generated + # for zoom-interaction purposes. + viz.draw_last( + array_key=fqsn, + only_last_uppx=True, + ) + hist_pi.vb.maxmin = partial( hist_chart.maxmin, name=fqsn, @@ -1408,11 +1422,12 @@ async def display_symbol_data( ) godwidget.resize_all() - await link_views_with_region( - rt_chart, - hist_chart, - flume, - ) + # hist_chart.hide() + # await link_views_with_region( + # rt_chart, + # hist_chart, + # flume, + # ) # start graphics update loop after receiving first live quote ln.start_soon( From 44f50e3d0e31aee5a6a9653a41c5de0f719a1967 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 7 Dec 2022 19:58:55 -0500 Subject: [PATCH 53/96] Implement `stop_t` gap adjustments; the good lord said it is the problem --- piker/data/_pathops.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 72405cd8..cfb2e1cb 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -305,6 +305,7 @@ def slice_from_time( index = arr['index'] i_first = index[0] + i_last = index[-1] read_i_max = arr.shape[0] if ( @@ -392,18 +393,30 @@ def slice_from_time( # ) read_i_start = new_read_i_start - 1 - t_iv_stop = times[read_i_stop - 1] + i_iv_stop = index[read_i_stop - 1] + t_iv_stop = times[read_i_stop - 1] + if ( + i_iv_stop < i_last + # i_stop_t <= t_last + and t_iv_stop > i_stop_t + ): + # t_diff = stop_t - t_iv_stop + # print( + # f"WE'RE CUTTING OUT TIME - STEP:{step}\n" + # f'calced iv stop:{t_iv_stop} -> stop_t:{stop_t}\n' + # f'diff: {t_diff}\n' + # # f'SHOULD REMAP STOP: {read_i_start} -> {new_read_i_start}\n' + # ) + new_read_i_stop = np.searchsorted( + times[read_i_start:], + i_stop_t, + side='right', + ) + if ( - i_stop_t <= t_last - and t_iv_stop < i_stop_t + new_read_i_stop < read_i_stop ): - t_diff = stop_t - t_iv_stop - print( - f"WE'RE CUTTING OUT TIME - STEP:{step}\n" - f'calced iv stop:{t_iv_stop} -> stop_t:{stop_t}\n' - f'diff: {t_diff}\n' - # f'SHOULD REMAP STOP: {read_i_start} -> {new_read_i_start}\n' - ) + read_i_stop = read_i_start + new_read_i_stop # read-relative indexes: gives a slice where `shm.array[read_slc]` # will be the data spanning the input time range `start_t` -> From 35a16ded2d23ca34ac35a90f2e4ee0638ab886eb Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 7 Dec 2022 19:59:27 -0500 Subject: [PATCH 54/96] Block out `do_print` stuff inside `Viz.maxmin()` --- piker/ui/_render.py | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 852844f9..04a15890 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -289,29 +289,21 @@ class Viz(msgspec.Struct): # , frozen=True): # https://stackoverflow.com/a/29980872 rkey = (round(lbar), round(rbar)) cached_result = self._mxmns.get(rkey) - do_print = 'btc' in self.name + do_print = False if cached_result: - # if do_print: - # print( - # f'{self.name} CACHED maxmin\n' - # f'{rkey} -> {cached_result}' - # ) + if do_print: + print( + f'{self.name} CACHED maxmin\n' + f'{rkey} -> {cached_result}' + ) return cached_result shm = self.shm if shm is None: - breakpoint() return None arr = shm.array - # times = arr['time'] - # step = round(times[-1] - times[-2]) - # if ( - # do_print - # and step == 60 - # ): - # breakpoint() # get relative slice indexes into array if self.index_field == 'time': @@ -331,7 +323,6 @@ class Viz(msgspec.Struct): # , frozen=True): if not slice_view.size: log.warning(f'{self.name} no maxmin in view?') - # breakpoint() return None elif self.yrange: @@ -353,10 +344,18 @@ class Viz(msgspec.Struct): # , frozen=True): yhigh = np.max(view) mxmn = ylow, yhigh - if do_print: + if ( + do_print + # and self.index_step() > 1 + ): + s = 3 print( f'{self.name} MANUAL ohlc={self.is_ohlc} maxmin:\n' - f'{rkey} -> {mxmn}' + f'{rkey} -> {mxmn}\n' + f'read_slc: {read_slc}\n' + f'abs_slc: {slice_view["index"]}\n' + f'first {s}:\n{slice_view[:s]}\n' + f'last {s}:\n{slice_view[-s:]}\n' ) # cache result for input range From 2a797d32dc390eaf15ef82bf8452ac30883fdf84 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 8 Dec 2022 09:46:52 -0500 Subject: [PATCH 55/96] Add back `.default_view()` slice logic for `int` indexing --- piker/ui/_render.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 04a15890..1e77ce03 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -892,6 +892,8 @@ class Viz(msgspec.Struct): # , frozen=True): stop_t=vr, step=step, ) + else: + read_slc = slice(0, datum_stop - datum_start + 1) index_iv = array[index_field][read_slc] uppx: float = self.graphics.x_uppx() or 1 From 5216a6b732523515f4ab2ef30ebead560d074ccd Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 8 Dec 2022 11:00:06 -0500 Subject: [PATCH 56/96] Drop passing `render_data` to `Curve.draw_last_datum()` --- piker/ui/_curve.py | 7 ++----- piker/ui/_ohlc.py | 1 - piker/ui/_render.py | 3 --- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index cb696257..065887af 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -360,7 +360,6 @@ class Curve(pg.GraphicsObject): self, path: QPainterPath, src_data: np.ndarray, - render_data: np.ndarray, reset: bool, array_key: str, index_field: str, @@ -368,8 +367,8 @@ class Curve(pg.GraphicsObject): ) -> None: # default line draw last call # with self.reset_cache(): - x = render_data[index_field] - y = render_data[array_key] + x = src_data[index_field] + y = src_data[array_key] # draw the "current" step graphic segment so it # lines up with the "middle" of the current @@ -392,7 +391,6 @@ class FlattenedOHLC(Curve): self, path: QPainterPath, src_data: np.ndarray, - render_data: np.ndarray, reset: bool, array_key: str, index_field: str, @@ -423,7 +421,6 @@ class StepCurve(Curve): self, path: QPainterPath, src_data: np.ndarray, - render_data: np.ndarray, reset: bool, array_key: str, index_field: str, diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index a7e36146..d935bd5c 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -221,7 +221,6 @@ class BarItems(pg.GraphicsObject): self, path: QPainterPath, src_data: np.ndarray, - render_data: np.ndarray, reset: bool, array_key: str, index_field: str, diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 1e77ce03..9f03c189 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -733,7 +733,6 @@ class Viz(msgspec.Struct): # , frozen=True): # # graphics.draw_last_datum( # # path, # # src_array, - # # data, # # reset, # # array_key, # # ) @@ -747,7 +746,6 @@ class Viz(msgspec.Struct): # , frozen=True): graphics.draw_last_datum( path, src_array, - data, reset, array_key, index_field=self.index_field, @@ -785,7 +783,6 @@ class Viz(msgspec.Struct): # , frozen=True): x, y = g.draw_last_datum( g.path, src_array, - src_array, False, # never reset path array_key, self.index_field, From 135627e1422df725657b24e7b7f4f11b3f19162c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 8 Dec 2022 15:43:11 -0500 Subject: [PATCH 57/96] Slicec to an extra index around each timestamp input --- piker/data/_pathops.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index cfb2e1cb..da51e9c1 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -328,10 +328,10 @@ def slice_from_time( # compute (presumed) uniform-time-step index offsets i_start_t = round(start_t) - read_i_start = ((i_start_t - t_first) // step) - 1 + read_i_start = round(((i_start_t - t_first) // step)) - 1 i_stop_t = round(stop_t) - read_i_stop = (i_stop_t - t_first) // step + read_i_stop = round((i_stop_t - t_first) // step) + 1 # always clip outputs to array support # for read start: @@ -346,11 +346,6 @@ def slice_from_time( min(read_i_stop, read_i_max), ) - samples = (i_stop_t - i_start_t) // step - index_diff = read_i_stop - read_i_start + 1 - if index_diff > (samples + 3): - breakpoint() - # check for larger-then-latest calculated index for given start # time, in which case we do a binary search for the correct index. # NOTE: this is usually the result of a time series with time gaps @@ -418,6 +413,12 @@ def slice_from_time( ): read_i_stop = read_i_start + new_read_i_stop + # sanity checks for range size + # samples = (i_stop_t - i_start_t) // step + # index_diff = read_i_stop - read_i_start + 1 + # if index_diff > (samples + 3): + # breakpoint() + # read-relative indexes: gives a slice where `shm.array[read_slc]` # will be the data spanning the input time range `start_t` -> # `stop_t` From 7aef31701bfdef987da0fbbd4c449d2c73736b8a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 8 Dec 2022 15:45:54 -0500 Subject: [PATCH 58/96] Add some commented debug prints for default fmtr --- piker/data/_formatters.py | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 7ffe8872..0c402aa4 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -439,12 +439,37 @@ class IncrementalFormatter(msgspec.Struct): ) -> None: # write pushed data to flattened copy - new_y_nd = new_from_src[data_field] - self.y_nd[read_slc] = new_y_nd + y_nd_new = new_from_src[data_field] + self.y_nd[read_slc] = y_nd_new x_nd_new = self.x_nd[read_slc] x_nd_new[:] = new_from_src[self.index_field] + # x_nd = self.x_nd[self.xy_slice] + # y_nd = self.y_nd[self.xy_slice] + # name = self.viz.name + # if 'trade_rate' == name: + # s = 4 + # print( + # f'{name.upper()}:\n' + # 'NEW_FROM_SRC:\n' + # f'new_from_src: {new_from_src}\n\n' + + # f'PRE self.x_nd:' + # f'\n{list(x_nd[-s:])}\n' + + # f'PRE self.y_nd:\n' + # f'{list(y_nd[-s:])}\n\n' + + # f'TO WRITE:\n' + + # f'x_nd_new:\n' + # f'{x_nd_new[0]}\n' + + # f'y_nd_new:\n' + # f'{y_nd_new}\n' + # ) + # XXX: was ``.format_xy()`` def format_xy_nd_to_1d( self, @@ -749,9 +774,6 @@ class StepCurveFmtr(IncrementalFormatter): x_step = self.x_nd[start:stop] y_step = self.y_nd[start:stop] - # pack in duplicate final value to complete last step level - y_step[-1, 1] = last - # debugging # if y_step.any(): # s = 3 From 9fcc6f9c44f83c10799ca4cb266bd59bc00f980a Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 8 Dec 2022 15:58:11 -0500 Subject: [PATCH 59/96] Restore coord-cache resetting Turns out we can't seem to avoid the artefacts when click-drag-scrolling (results in weird repeated "smeared" curve segments) so just go back to the original code. --- piker/ui/_render.py | 39 ++++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 21 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 9f03c189..18219be6 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -718,30 +718,27 @@ class Viz(msgspec.Struct): # , frozen=True): path, data, reset = out - # if self.yrange: - # print(f'viz {self.name} yrange from m4: {self.yrange}') - # XXX: SUPER UGGGHHH... without this we get stale cache # graphics that don't update until you downsampler again.. - # if reset: - # with graphics.reset_cache(): - # # assign output paths to graphicis obj - # graphics.path = r.path - # graphics.fast_path = r.fast_path + if reset: + with graphics.reset_cache(): + # assign output paths to graphicis obj + graphics.path = r.path + graphics.fast_path = r.fast_path - # # XXX: we don't need this right? - # # graphics.draw_last_datum( - # # path, - # # src_array, - # # reset, - # # array_key, - # # ) - # # graphics.update() - # # profiler('.update()') - # else: - # assign output paths to graphicis obj - graphics.path = r.path - graphics.fast_path = r.fast_path + # XXX: we don't need this right? + # graphics.draw_last_datum( + # path, + # src_array, + # reset, + # array_key, + # ) + # graphics.update() + # profiler('.update()') + else: + # assign output paths to graphicis obj + graphics.path = r.path + graphics.fast_path = r.fast_path graphics.draw_last_datum( path, From 3bed142d15f44961c46b5b4289968750d791479c Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 9 Dec 2022 14:17:36 -0500 Subject: [PATCH 60/96] Handle time-indexing for fill arrows Call into a reworked `Flume.get_index()` for both the slow and fast chart and do time index clipping to last datum where necessary. --- piker/data/flows.py | 28 ++++++++++++++-------------- piker/ui/order_mode.py | 40 ++++++++++++++++++++++++---------------- 2 files changed, 38 insertions(+), 30 deletions(-) diff --git a/piker/data/flows.py b/piker/data/flows.py index f1b8eabf..01ed7851 100644 --- a/piker/data/flows.py +++ b/piker/data/flows.py @@ -48,13 +48,13 @@ from ._sharedmem import ( from ._sampling import ( open_sample_stream, ) -from .._profile import ( - Profiler, - pg_profile_enabled, -) +# from .._profile import ( +# Profiler, +# pg_profile_enabled, +# ) if TYPE_CHECKING: - from pyqtgraph import PlotItem + # from pyqtgraph import PlotItem from .feed import Feed @@ -222,18 +222,18 @@ class Flume(Struct): def get_index( self, time_s: float, + array: np.ndarray, - ) -> int: + ) -> int | float: ''' Return array shm-buffer index for for epoch time. ''' - array = self.rt_shm.array times = array['time'] - mask = (times >= time_s) - - if any(mask): - return array['index'][mask][0] - - # just the latest index - return array['index'][-1] + first = np.searchsorted( + times, + time_s, + side='left', + ) + imx = times.shape[0] - 1 + return min(first, imx) diff --git a/piker/ui/order_mode.py b/piker/ui/order_mode.py index 1dd49872..ee0196f7 100644 --- a/piker/ui/order_mode.py +++ b/piker/ui/order_mode.py @@ -494,7 +494,7 @@ class OrderMode: uuid: str, price: float, - arrow_index: float, + time_s: float, pointing: Optional[str] = None, @@ -513,18 +513,26 @@ class OrderMode: ''' dialog = self.dialogs[uuid] lines = dialog.lines + chart = self.chart + # XXX: seems to fail on certain types of races? # assert len(lines) == 2 if lines: - flume: Flume = self.feed.flumes[self.chart.linked.symbol.fqsn] + flume: Flume = self.feed.flumes[chart.linked.symbol.fqsn] _, _, ratio = flume.get_ds_info() - for i, chart in [ - (arrow_index, self.chart), - (flume.izero_hist - + - round((arrow_index - flume.izero_rt)/ratio), - self.hist_chart) + + for chart, shm in [ + (self.chart, flume.rt_shm), + (self.hist_chart, flume.hist_shm), ]: + viz = chart.get_viz(chart.name) + index_field = viz.index_field + arr = shm.array + index = flume.get_index(time_s, arr) + + if index_field == 'time': + i = arr['time'][index] + self.arrows.add( chart.plotItem, uuid, @@ -933,6 +941,8 @@ async def process_trade_msg( fmsg = pformat(msg) log.debug(f'Received order msg:\n{fmsg}') name = msg['name'] + viz = mode.chart.get_viz(mode.chart.name) + index_field = viz.index_field if name in ( 'position', @@ -1037,11 +1047,11 @@ async def process_trade_msg( # should only be one "fill" for an alert # add a triangle and remove the level line req = Order(**req) - index = flume.get_index(time.time()) + tm = time.time() mode.on_fill( oid, price=req.price, - arrow_index=index, + time_s=tm, ) mode.lines.remove_line(uuid=oid) msg.req = req @@ -1070,6 +1080,8 @@ async def process_trade_msg( details = msg.brokerd_msg # TODO: put the actual exchange timestamp? + # TODO: some kinda progress system? + # NOTE: currently the ``kraken`` openOrders sub # doesn't deliver their engine timestamp as part of # it's schema, so this value is **not** from them @@ -1080,15 +1092,11 @@ async def process_trade_msg( # a true backend one? This will require finagling # with how each backend tracks/summarizes time # stamps for the downstream API. - index = flume.get_index( - details['broker_time'] - ) - - # TODO: some kinda progress system + tm = details['broker_time'] mode.on_fill( oid, price=details['price'], - arrow_index=index, + time_s=tm, pointing='up' if action == 'buy' else 'down', ) From 0663880a6db421f39dcd9b4b39b70976b8eed9f9 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 13 Dec 2022 13:05:56 -0500 Subject: [PATCH 61/96] Fix formatter xy ndarray first prepend case First allocation vs. first "prepend" of source data to an xy `ndarray` format **must be mutex** in order to avoid a double prepend. Previously when both blocks were executed we'd end up with a `.xy_nd_start` that was decremented (at least) twice as much as it should be on the first `.format_to_1d()` call which is obviously incorrect (and causes problems for m4 downsampling as discussed below). Further, since the underlying `ShmArray` buffer indexing is managed (i.e. write-updated) completely independently from the incremental formatter updates and internal xy indexing, we can't use `ShmArray._first.value` and instead need to use the particular `.diff()` output's prepend length value to decrement the `.xy_nd_start` on updates after initial alloc. Problems this resolves with m4: - m4 uses a x-domain diff to calculate the number of "frames" to downsample to, this is normally based on the ratio of pixel columns on screen vs. the size of the input xy data. - previously using an int-index (not epoch time) the max diff between first and last index would be the size of the input buffer and thus would never cause a large mem allocation issue (though it may have been inefficient in terms of needed size). - with an epoch time index this max diff could explode if you had some near-now epoch time stamp **minus** an x-allocation value: generally some value in `[0.5, -0.5]` which would result in a massive frames and thus internal `np.ndarray()` allocation causing either a crash in `numba` code or actual system mem over allocation. Further, put in some more x value checks that trigger breakpoints if we detect values that caused this issue - we'll remove em after this has been tested enough. --- piker/data/_formatters.py | 88 ++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 38 deletions(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 0c402aa4..9d8fc00d 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -278,9 +278,9 @@ class IncrementalFormatter(msgspec.Struct): post_slice, ) = self.diff(new_read) + # we first need to allocate xy data arrays + # from the source data. if self.y_nd is None: - # we first need to allocate xy data arrays - # from the source data. self.xy_nd_start = shm._first.value self.xy_nd_stop = shm._last.value self.x_nd, self.y_nd = self.allocate_xy_nd( @@ -289,45 +289,52 @@ class IncrementalFormatter(msgspec.Struct): ) profiler('allocated xy history') - if prepend_len: - self.incr_update_xy_nd( - shm, - array_key, + # once allocated we do incremental pre/append + # updates from the diff with the source buffer. + else: + if prepend_len: - # this is the pre-sliced, "normally expected" - # new data that an updater would normally be - # expected to process, however in some cases (like - # step curves) the updater routine may want to do - # the source history-data reading itself, so we pass - # both here. - shm._array[pre_slice], - pre_slice, - prepend_len, + self.incr_update_xy_nd( + shm, + array_key, - self.xy_nd_start, - self.xy_nd_stop, - is_append=False, - ) + # this is the pre-sliced, "normally expected" + # new data that an updater would normally be + # expected to process, however in some cases (like + # step curves) the updater routine may want to do + # the source history-data reading itself, so we pass + # both here. + shm._array[pre_slice], + pre_slice, + prepend_len, - # self.y_nd[y_nd_slc] = new_y_nd - self.xy_nd_start = shm._first.value - profiler('prepended xy history: {prepend_length}') + self.xy_nd_start, + self.xy_nd_stop, + is_append=False, + ) - if append_len: - self.incr_update_xy_nd( - shm, - array_key, + self.xy_nd_start -= prepend_len + profiler('prepended xy history: {prepend_length}') - shm._array[post_slice], - post_slice, - append_len, + xndall = self.x_nd[self.xy_slice] + if xndall.any() and (xndall == 0.5).any(): + breakpoint() - self.xy_nd_start, - self.xy_nd_stop, - is_append=True, - ) - self.xy_nd_stop = shm._last.value - profiler('appened xy history: {append_length}') + if append_len: + self.incr_update_xy_nd( + shm, + array_key, + + shm._array[post_slice], + post_slice, + append_len, + + self.xy_nd_start, + self.xy_nd_stop, + is_append=True, + ) + self.xy_nd_stop += append_len + profiler('appened xy history: {append_length}') view_changed: bool = False view_range: tuple[int, int] = (ivl, ivr) @@ -491,9 +498,14 @@ class IncrementalFormatter(msgspec.Struct): ''' # NOTE: we don't include the very last datum which is filled in # normally by another graphics object. + x_1d = array[self.index_field][:-1] + if x_1d.any() and (x_1d[-1] == 0.5).any(): + breakpoint() + + y_1d = array[array_key][:-1] return ( - array[self.index_field][:-1], - array[array_key][:-1], + x_1d, + y_1d, # 1d connection array or style-key to # ``pg.functions.arrayToQPath()`` @@ -797,7 +809,7 @@ class StepCurveFmtr(IncrementalFormatter): if not x_1d.size == y_1d.size: breakpoint() - if x_1d.any() and (x_1d[-1] == 0.5).any(): + if x_1d.any() and (x_1d == 0.5).any(): breakpoint() # debugging From c5dd67e63c0c59c2833e20f10c461e4c32feae25 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 13 Dec 2022 16:02:34 -0500 Subject: [PATCH 62/96] Right, do index lookup for int-index as well.. --- piker/ui/order_mode.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/piker/ui/order_mode.py b/piker/ui/order_mode.py index ee0196f7..4ac2f457 100644 --- a/piker/ui/order_mode.py +++ b/piker/ui/order_mode.py @@ -528,15 +528,17 @@ class OrderMode: viz = chart.get_viz(chart.name) index_field = viz.index_field arr = shm.array + + # TODO: borked for int index based.. index = flume.get_index(time_s, arr) - if index_field == 'time': - i = arr['time'][index] + # get absolute index for arrow placement + arrow_index = arr[index_field][index] self.arrows.add( chart.plotItem, uuid, - i, + arrow_index, price, pointing=pointing, color=lines[0].color @@ -941,8 +943,6 @@ async def process_trade_msg( fmsg = pformat(msg) log.debug(f'Received order msg:\n{fmsg}') name = msg['name'] - viz = mode.chart.get_viz(mode.chart.name) - index_field = viz.index_field if name in ( 'position', @@ -976,7 +976,6 @@ async def process_trade_msg( if dialog: fqsn = dialog.symbol - flume = mode.feed.flumes[fqsn] match msg: case Status( From 3638ae8d3e6424e901f3c6a512c27cc93f5883b5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 13 Dec 2022 16:33:49 -0500 Subject: [PATCH 63/96] Drop unused `read_src_from_key: bool` to `.format_to_1d()` --- piker/data/_formatters.py | 1 - 1 file changed, 1 deletion(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 9d8fc00d..88fed7bf 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -252,7 +252,6 @@ class IncrementalFormatter(msgspec.Struct): array_key: str, profiler: Profiler, - read_src_from_key: bool = True, slice_to_inview: bool = True, ) -> tuple[ From 4d74bc29b4572cc11d5accc1e35bcb412faf4ed5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 14 Dec 2022 09:49:43 -0500 Subject: [PATCH 64/96] Fix line -> bars on 6x UPPX Read the `Viz.index_step()` directly to avoid always reading 1 on the slow chart; this was completely broken before and resulting in not rendering the bars graphic on the slow chart until at a true uppx of 1 which obviously doesn't work for 60 width bars XD Further cleanups to `._render` module: - drop `array` output from `Renderer.render()`, `read_from_key` input and fix type annot. - drop `should_line`, `changed_to_line` and `render_kwargs` from `render_baritems()` outputs and instead calc `should_redraw` logic inside the func body and return as output. --- piker/ui/_render.py | 84 +++++++++++++++++++++------------------------ 1 file changed, 39 insertions(+), 45 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index 18219be6..ae737875 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -95,13 +95,13 @@ def render_baritems( ''' bars = graphics - # if no source data renderer exists create one. - self = viz - show_bars: bool = False - + self = viz # TODO: make this a ``Viz`` method? r = self._src_r + first_render: bool = False + + # if no source data renderer exists create one. if not r: - show_bars = True + first_render = True # OHLC bars path renderer r = self._src_r = Renderer( @@ -143,10 +143,11 @@ def render_baritems( # - if we're **not** downsampling then we simply want to # render the bars graphics curve and update.. # - if instead we are in a downsamplig state then we to - x_gt = 6 * (r.fmtr.index_step_size or 1) + x_gt = 6 * (self.index_step() or 1) uppx = curve.x_uppx() # print(f'BARS UPPX: {uppx}') in_line = should_line = curve.isVisible() + if ( in_line and uppx < x_gt @@ -174,7 +175,7 @@ def render_baritems( else: graphics = bars - if show_bars: + if first_render: bars.show() changed_to_line = False @@ -191,20 +192,30 @@ def render_baritems( curve.update() changed_to_line = True - elif in_line and not should_line: + elif ( + in_line + and not should_line + ): # change to bars graphic - log.info(f'showing bars graphic {self.name}') + log.info( + f'showing bars graphic {self.name}\n' + f'first bars render?: {first_render}' + ) curve.hide() bars.show() bars.update() - # breakpoint() + # XXX: is this required? + viz._in_ds = should_line + + should_redraw = ( + changed_to_line + or not should_line + ) return ( graphics, r, - {'read_from_key': False}, - should_line, - changed_to_line, + should_redraw, ) @@ -485,8 +496,6 @@ class Viz(msgspec.Struct): # , frozen=True): index_field=index_field, array=array, ) - # if rbar < lbar: - # breakpoint() if profiler: profiler('self.datums_range()') @@ -555,9 +564,9 @@ class Viz(msgspec.Struct): # , frozen=True): self, use_vr: bool = True, render: bool = True, - array_key: Optional[str] = None, + array_key: str | None = None, - profiler: Optional[Profiler] = None, + profiler: Profiler | None = None, do_append: bool = True, **kwargs, @@ -592,8 +601,6 @@ class Viz(msgspec.Struct): # , frozen=True): return graphics should_redraw: bool = False - should_line: bool = False - rkwargs = {} # TODO: probably specialize ``Renderer`` types instead of # these logic checks? @@ -606,9 +613,7 @@ class Viz(msgspec.Struct): # , frozen=True): ( graphics, r, - rkwargs, - should_line, - changed_to_line, + should_redraw, ) = render_baritems( self, graphics, @@ -616,8 +621,6 @@ class Viz(msgspec.Struct): # , frozen=True): profiler, **kwargs, ) - should_redraw = changed_to_line or not should_line - self._in_ds = should_line elif not r: if isinstance(graphics, StepCurve): @@ -630,10 +633,6 @@ class Viz(msgspec.Struct): # , frozen=True): ), ) - # TODO: append logic inside ``.render()`` isn't - # correct yet for step curves.. remove this to see it. - should_redraw = True - else: r = self._src_r if not r: @@ -684,7 +683,6 @@ class Viz(msgspec.Struct): # , frozen=True): new_sample_rate = True should_ds = False should_redraw = True - showing_src_data = True # MAIN RENDER LOGIC: @@ -708,15 +706,13 @@ class Viz(msgspec.Struct): # , frozen=True): showing_src_data=showing_src_data, do_append=do_append, - - **rkwargs, ) if not out: log.warning(f'{self.name} failed to render!?') return graphics - path, data, reset = out + path, reset = out # XXX: SUPER UGGGHHH... without this we get stale cache # graphics that don't update until you downsampler again.. @@ -732,6 +728,7 @@ class Viz(msgspec.Struct): # , frozen=True): # src_array, # reset, # array_key, + # index_field=self.index_field, # ) # graphics.update() # profiler('.update()') @@ -786,14 +783,17 @@ class Viz(msgspec.Struct): # , frozen=True): ) # the renderer is downsampling we choose - # to always try and updadte a single (interpolating) + # to always try and update a single (interpolating) # line segment that spans and tries to display - # the las uppx's worth of datums. + # the last uppx's worth of datums. # we only care about the last pixel's # worth of data since that's all the screen # can represent on the last column where # the most recent datum is being drawn. - if self._in_ds or only_last_uppx: + if ( + self._in_ds + or only_last_uppx + ): dsg = self.ds_graphics or self.graphics # XXX: pretty sure we don't need this? @@ -821,12 +821,10 @@ class Viz(msgspec.Struct): # , frozen=True): # print(f'updating NOT DS curve {self.name}') g.update() - def curve_width_pxs( - self, - ) -> float: + def curve_width_pxs(self) -> float: ''' - Return the width of the current datums in view in pixel units. + ''' _, lbar, rbar, _ = self.bars_range() return self.view.mapViewToDevice( @@ -1006,8 +1004,6 @@ class Renderer(msgspec.Struct): if self.fast_path: self.fast_path.clear() - # profiler('cleared paths due to `should_redraw=True`') - path = pg.functions.arrayToQPath( x, y, @@ -1051,9 +1047,8 @@ class Renderer(msgspec.Struct): # only render datums "in view" of the ``ChartView`` use_vr: bool = True, - read_from_key: bool = True, - ) -> list[QPainterPath]: + ) -> tuple[QPainterPath, bool]: ''' Render the current graphics path(s) @@ -1086,7 +1081,6 @@ class Renderer(msgspec.Struct): array_key, profiler, - read_src_from_key=read_from_key, slice_to_inview=use_vr, ) @@ -1244,4 +1238,4 @@ class Renderer(msgspec.Struct): self.path = path self.fast_path = fast_path - return self.path, array, reset + return self.path, reset From 3019c35e306c7367e8e14e3ef11387aa149a9233 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 14 Dec 2022 12:05:35 -0500 Subject: [PATCH 65/96] Move `Viz` layer to new `.ui` mod --- piker/data/_formatters.py | 2 +- piker/ui/_chart.py | 2 +- piker/ui/_dataviz.py | 955 ++++++++++++++++++++++++++++++++++++++ piker/ui/_render.py | 915 +----------------------------------- 4 files changed, 960 insertions(+), 914 deletions(-) create mode 100644 piker/ui/_dataviz.py diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 88fed7bf..458810aa 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -36,7 +36,7 @@ from ._pathops import ( ) if TYPE_CHECKING: - from ._render import ( + from ._dataviz import ( Viz, ) from .._profile import Profiler diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index 70435ec7..aea1d927 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -72,7 +72,7 @@ from ._interaction import ChartView from ._forms import FieldsForm from .._profile import pg_profile_enabled, ms_slower_then from ._overlay import PlotItemOverlay -from ._render import Viz +from ._dataviz import Viz from ._search import SearchWidget from . import _pg_overrides as pgo from .._profile import Profiler diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py new file mode 100644 index 00000000..d2cb9c23 --- /dev/null +++ b/piker/ui/_dataviz.py @@ -0,0 +1,955 @@ +# piker: trading gear for hackers +# Copyright (C) Tyler Goodlet (in stewardship for pikers) + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +''' +Data vizualization APIs + +''' +from __future__ import annotations +from typing import ( + Optional, + TYPE_CHECKING, +) + +import msgspec +import numpy as np +import pyqtgraph as pg +from PyQt5.QtCore import QLineF + +from ..data._sharedmem import ( + ShmArray, +) +from ..data.feed import Flume +from ..data._formatters import ( + IncrementalFormatter, + OHLCBarsFmtr, # Plain OHLC renderer + OHLCBarsAsCurveFmtr, # OHLC converted to line + StepCurveFmtr, # "step" curve (like for vlm) +) +from ..data._pathops import ( + slice_from_time, +) +from ._ohlc import ( + BarItems, +) +from ._curve import ( + Curve, + StepCurve, + FlattenedOHLC, +) +from ._render import Renderer +from ..log import get_logger +from .._profile import ( + Profiler, + pg_profile_enabled, +) + + +if TYPE_CHECKING: + from ._interaction import ChartView + from ._chart import ChartPlotWidget + + +log = get_logger(__name__) + + +def render_baritems( + viz: Viz, + graphics: BarItems, + read: tuple[ + int, int, np.ndarray, + int, int, np.ndarray, + ], + profiler: Profiler, + **kwargs, + +) -> None: + ''' + Graphics management logic for a ``BarItems`` object. + + Mostly just logic to determine when and how to downsample an OHLC + lines curve into a flattened line graphic and when to display one + graphic or the other. + + TODO: this should likely be moved into some kind of better abstraction + layer, if not a `Renderer` then something just above it? + + ''' + bars = graphics + + self = viz # TODO: make this a ``Viz`` method? + r = self._src_r + first_render: bool = False + + # if no source data renderer exists create one. + if not r: + first_render = True + + # OHLC bars path renderer + r = self._src_r = Renderer( + viz=self, + fmtr=OHLCBarsFmtr( + shm=viz.shm, + viz=viz, + ), + ) + + ds_curve_r = Renderer( + viz=self, + fmtr=OHLCBarsAsCurveFmtr( + shm=viz.shm, + viz=viz, + ), + ) + + curve = FlattenedOHLC( + name=f'{viz.name}_ds_ohlc', + color=bars._color, + ) + viz.ds_graphics = curve + curve.hide() + self.plot.addItem(curve) + + # baseline "line" downsampled OHLC curve that should + # kick on only when we reach a certain uppx threshold. + self._render_table = (ds_curve_r, curve) + + ds_r, curve = self._render_table + + # print( + # f'r: {r.fmtr.xy_slice}\n' + # f'ds_r: {ds_r.fmtr.xy_slice}\n' + # ) + + # do checks for whether or not we require downsampling: + # - if we're **not** downsampling then we simply want to + # render the bars graphics curve and update.. + # - if instead we are in a downsamplig state then we to + x_gt = 6 * (self.index_step() or 1) + uppx = curve.x_uppx() + # print(f'BARS UPPX: {uppx}') + in_line = should_line = curve.isVisible() + + if ( + in_line + and uppx < x_gt + ): + # print('FLIPPING TO BARS') + should_line = False + viz._in_ds = False + + elif ( + not in_line + and uppx >= x_gt + ): + # print('FLIPPING TO LINE') + should_line = True + viz._in_ds = True + + profiler(f'ds logic complete line={should_line}') + + # do graphics updates + if should_line: + r = ds_r + graphics = curve + profiler('updated ds curve') + + else: + graphics = bars + + if first_render: + bars.show() + + changed_to_line = False + if ( + not in_line + and should_line + ): + # change to line graphic + log.info( + f'downsampling to line graphic {self.name}' + ) + bars.hide() + curve.show() + curve.update() + changed_to_line = True + + elif ( + in_line + and not should_line + ): + # change to bars graphic + log.info( + f'showing bars graphic {self.name}\n' + f'first bars render?: {first_render}' + ) + curve.hide() + bars.show() + bars.update() + + # XXX: is this required? + viz._in_ds = should_line + + should_redraw = ( + changed_to_line + or not should_line + ) + return ( + graphics, + r, + should_redraw, + ) + + +class Viz(msgspec.Struct): # , frozen=True): + ''' + (Data) "Visualization" compound type which wraps a real-time + shm array stream with displayed graphics (curves, charts) + for high level access and control as well as efficient incremental + update. + + The intention is for this type to eventually be capable of shm-passing + of incrementally updated graphics stream data between actors. + + ''' + name: str + plot: pg.PlotItem + _shm: ShmArray + flume: Flume + graphics: Curve | BarItems + + # for tracking y-mn/mx for y-axis auto-ranging + yrange: tuple[float, float] = None + + # in some cases a viz may want to change its + # graphical "type" or, "form" when downsampling, to + # start this is only ever an interpolation line. + ds_graphics: Optional[Curve] = None + + is_ohlc: bool = False + render: bool = True # toggle for display loop + + # _index_field: str = 'index' + _index_field: str = 'time' + + # downsampling state + _last_uppx: float = 0 + _in_ds: bool = False + _index_step: float | None = None + + # map from uppx -> (downsampled data, incremental graphics) + _src_r: Optional[Renderer] = None + _render_table: dict[ + Optional[int], + tuple[Renderer, pg.GraphicsItem], + ] = (None, None) + + # cache of y-range values per x-range input. + _mxmns: dict[tuple[int, int], tuple[float, float]] = {} + + @property + def shm(self) -> ShmArray: + return self._shm + + @property + def index_field(self) -> str: + return self._index_field + + def index_step( + self, + reset: bool = False, + + ) -> float: + if self._index_step is None: + index = self.shm.array[self.index_field] + self._index_step = index[-1] - index[-2] + + return self._index_step + + def maxmin( + self, + lbar: int, + rbar: int, + + ) -> Optional[tuple[float, float]]: + ''' + Compute the cached max and min y-range values for a given + x-range determined by ``lbar`` and ``rbar`` or ``None`` + if no range can be determined (yet). + + ''' + # TODO: hash the slice instead maybe? + # https://stackoverflow.com/a/29980872 + rkey = (round(lbar), round(rbar)) + cached_result = self._mxmns.get(rkey) + do_print = False + if cached_result: + + if do_print: + print( + f'{self.name} CACHED maxmin\n' + f'{rkey} -> {cached_result}' + ) + return cached_result + + shm = self.shm + if shm is None: + return None + + arr = shm.array + + # get relative slice indexes into array + if self.index_field == 'time': + read_slc = slice_from_time( + arr, + start_t=lbar, + stop_t=rbar, + ) + slice_view = arr[read_slc] + + else: + ifirst = arr[0]['index'] + slice_view = arr[ + lbar - ifirst: + (rbar - ifirst) + 1 + ] + + if not slice_view.size: + log.warning(f'{self.name} no maxmin in view?') + return None + + elif self.yrange: + mxmn = self.yrange + if do_print: + print( + f'{self.name} M4 maxmin:\n' + f'{rkey} -> {mxmn}' + ) + + else: + if self.is_ohlc: + ylow = np.min(slice_view['low']) + yhigh = np.max(slice_view['high']) + + else: + view = slice_view[self.name] + ylow = np.min(view) + yhigh = np.max(view) + + mxmn = ylow, yhigh + if ( + do_print + # and self.index_step() > 1 + ): + s = 3 + print( + f'{self.name} MANUAL ohlc={self.is_ohlc} maxmin:\n' + f'{rkey} -> {mxmn}\n' + f'read_slc: {read_slc}\n' + f'abs_slc: {slice_view["index"]}\n' + f'first {s}:\n{slice_view[:s]}\n' + f'last {s}:\n{slice_view[-s:]}\n' + ) + + # cache result for input range + assert mxmn + self._mxmns[rkey] = mxmn + + return mxmn + + def view_range(self) -> tuple[int, int]: + ''' + Return the start and stop x-indexes for the managed ``ViewBox``. + + ''' + vr = self.plot.viewRect() + return ( + vr.left(), + vr.right(), + ) + + def bars_range(self) -> tuple[int, int, int, int]: + ''' + Return a range tuple for the left-view, left-datum, right-datum + and right-view x-indices. + + ''' + l, start, datum_start, datum_stop, stop, r = self.datums_range() + return l, datum_start, datum_stop, r + + def datums_range( + self, + view_range: None | tuple[float, float] = None, + index_field: str | None = None, + array: None | np.ndarray = None, + + ) -> tuple[ + int, int, int, int, int, int + ]: + ''' + Return a range tuple for the datums present in view. + + ''' + l, r = view_range or self.view_range() + + index_field: str = index_field or self.index_field + if index_field == 'index': + l, r = round(l), round(r) + + if array is None: + array = self.shm.array + + index = array[index_field] + first = round(index[0]) + last = round(index[-1]) + + # first and last datums in view determined by + # l / r view range. + leftmost = round(l) + rightmost = round(r) + + # invalid view state + if ( + r < l + or l < 0 + or r < 0 + or (l > last and r > last) + ): + leftmost = first + rightmost = last + else: + rightmost = max( + min(last, rightmost), + first, + ) + + leftmost = min( + max(first, leftmost), + last, + rightmost - 1, + ) + + assert leftmost < rightmost + + return ( + l, # left x-in-view + first, # first datum + leftmost, + rightmost, + last, # last_datum + r, # right-x-in-view + ) + + def read( + self, + array_field: Optional[str] = None, + index_field: str | None = None, + profiler: None | Profiler = None, + + ) -> tuple[ + int, int, np.ndarray, + int, int, np.ndarray, + ]: + ''' + Read the underlying shm array buffer and + return the data plus indexes for the first + and last + which has been written to. + + ''' + index_field: str = index_field or self.index_field + vr = l, r = self.view_range() + + # readable data + array = self.shm.array + + if profiler: + profiler('self.shm.array READ') + + ( + l, + ifirst, + lbar, + rbar, + ilast, + r, + ) = self.datums_range( + view_range=vr, + index_field=index_field, + array=array, + ) + + if profiler: + profiler('self.datums_range()') + + abs_slc = slice(ifirst, ilast) + + # TODO: support time slicing + if index_field == 'time': + read_slc = slice_from_time( + array, + start_t=lbar, + stop_t=rbar, + ) + + # TODO: maybe we should return this from the slicer call + # above? + in_view = array[read_slc] + if in_view.size: + abs_indx = in_view['index'] + abs_slc = slice( + int(abs_indx[0]), + int(abs_indx[-1]), + ) + + if profiler: + profiler( + '`slice_from_time(' + f'start_t={lbar}' + f'stop_t={rbar})' + ) + + # array-index slicing + # TODO: can we do time based indexing using arithmetic presuming + # a uniform time stamp step size? + else: + # get read-relative indices adjusting for master shm index. + lbar_i = max(l, ifirst) - ifirst + rbar_i = min(r, ilast) - ifirst + + # NOTE: the slice here does NOT include the extra ``+ 1`` + # BUT the ``in_view`` slice DOES.. + read_slc = slice(lbar_i, rbar_i) + in_view = array[lbar_i: rbar_i + 1] + + # XXX: same as ^ + # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] + if profiler: + profiler('index arithmetic for slicing') + + if array_field: + array = array[array_field] + + return ( + # abs indices + full data set + abs_slc.start, + abs_slc.stop, + array, + + # relative (read) indices + in view data + read_slc.start, + read_slc.stop, + in_view, + ) + + def update_graphics( + self, + use_vr: bool = True, + render: bool = True, + array_key: str | None = None, + + profiler: Profiler | None = None, + do_append: bool = True, + + **kwargs, + + ) -> pg.GraphicsObject: + ''' + Read latest datums from shm and render to (incrementally) + render to graphics. + + ''' + profiler = Profiler( + msg=f'Viz.update_graphics() for {self.name}', + disabled=not pg_profile_enabled(), + ms_threshold=4, + # ms_threshold=ms_slower_then, + ) + # shm read and slice to view + read = ( + xfirst, xlast, src_array, + ivl, ivr, in_view, + ) = self.read(profiler=profiler) + + profiler('read src shm data') + + graphics = self.graphics + + if ( + not in_view.size + or not render + ): + # print('exiting early') + return graphics + + should_redraw: bool = False + + # TODO: probably specialize ``Renderer`` types instead of + # these logic checks? + # - put these blocks into a `.load_renderer()` meth? + # - consider a OHLCRenderer, StepCurveRenderer, Renderer? + r = self._src_r + if isinstance(graphics, BarItems): + # XXX: special case where we change out graphics + # to a line after a certain uppx threshold. + ( + graphics, + r, + should_redraw, + ) = render_baritems( + self, + graphics, + read, + profiler, + **kwargs, + ) + + elif not r: + if isinstance(graphics, StepCurve): + + r = self._src_r = Renderer( + viz=self, + fmtr=StepCurveFmtr( + shm=self.shm, + viz=self, + ), + ) + + else: + r = self._src_r + if not r: + # just using for ``.diff()`` atm.. + r = self._src_r = Renderer( + viz=self, + fmtr=IncrementalFormatter( + shm=self.shm, + viz=self, + ), + ) + + # ``Curve`` derivative case(s): + array_key = array_key or self.name + # print(array_key) + + # ds update config + new_sample_rate: bool = False + should_ds: bool = r._in_ds + showing_src_data: bool = not r._in_ds + + # downsampling incremental state checking + # check for and set std m4 downsample conditions + uppx = graphics.x_uppx() + uppx_diff = (uppx - self._last_uppx) + profiler(f'diffed uppx {uppx}') + if ( + uppx > 1 + and abs(uppx_diff) >= 1 + ): + log.debug( + f'{array_key} sampler change: {self._last_uppx} -> {uppx}' + ) + self._last_uppx = uppx + + new_sample_rate = True + showing_src_data = False + should_ds = True + should_redraw = True + + elif ( + uppx <= 2 + and self._in_ds + ): + # we should de-downsample back to our original + # source data so we clear our path data in prep + # to generate a new one from original source data. + new_sample_rate = True + should_ds = False + should_redraw = True + showing_src_data = True + + # MAIN RENDER LOGIC: + # - determine in view data and redraw on range change + # - determine downsampling ops if needed + # - (incrementally) update ``QPainterPath`` + + out = r.render( + read, + array_key, + profiler, + uppx=uppx, + # use_vr=True, + + # TODO: better way to detect and pass this? + # if we want to eventually cache renderers for a given uppx + # we should probably use this as a key + state? + should_redraw=should_redraw, + new_sample_rate=new_sample_rate, + should_ds=should_ds, + showing_src_data=showing_src_data, + + do_append=do_append, + ) + + if not out: + log.warning(f'{self.name} failed to render!?') + return graphics + + path, reset = out + + # XXX: SUPER UGGGHHH... without this we get stale cache + # graphics that don't update until you downsampler again.. + if reset: + with graphics.reset_cache(): + # assign output paths to graphicis obj + graphics.path = r.path + graphics.fast_path = r.fast_path + + # XXX: we don't need this right? + # graphics.draw_last_datum( + # path, + # src_array, + # reset, + # array_key, + # index_field=self.index_field, + # ) + # graphics.update() + # profiler('.update()') + else: + # assign output paths to graphicis obj + graphics.path = r.path + graphics.fast_path = r.fast_path + + graphics.draw_last_datum( + path, + src_array, + reset, + array_key, + index_field=self.index_field, + ) + graphics.update() + profiler('.update()') + + # TODO: does this actuallly help us in any way (prolly should + # look at the source / ask ogi). I think it avoid artifacts on + # wheel-scroll downsampling curve updates? + # TODO: is this ever better? + # graphics.prepareGeometryChange() + # profiler('.prepareGeometryChange()') + + # track downsampled state + self._in_ds = r._in_ds + + return graphics + + def draw_last( + self, + array_key: Optional[str] = None, + only_last_uppx: bool = False, + + ) -> None: + + # shm read and slice to view + ( + xfirst, xlast, src_array, + ivl, ivr, in_view, + ) = self.read() + + g = self.graphics + array_key = array_key or self.name + x, y = g.draw_last_datum( + g.path, + src_array, + False, # never reset path + array_key, + self.index_field, + ) + + # the renderer is downsampling we choose + # to always try and update a single (interpolating) + # line segment that spans and tries to display + # the last uppx's worth of datums. + # we only care about the last pixel's + # worth of data since that's all the screen + # can represent on the last column where + # the most recent datum is being drawn. + if ( + self._in_ds + or only_last_uppx + ): + dsg = self.ds_graphics or self.graphics + + # XXX: pretty sure we don't need this? + # if isinstance(g, Curve): + # with dsg.reset_cache(): + uppx = self._last_uppx + y = y[-uppx:] + ymn, ymx = y.min(), y.max() + # print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}') + try: + iuppx = x[-uppx] + except IndexError: + # we're less then an x-px wide so just grab the start + # datum index. + iuppx = x[0] + + dsg._last_line = QLineF( + iuppx, ymn, + x[-1], ymx, + ) + # print(f'updating DS curve {self.name}') + dsg.update() + + else: + # print(f'updating NOT DS curve {self.name}') + g.update() + + def curve_width_pxs(self) -> float: + ''' + Return the width of the current datums in view in pixel units. + + ''' + _, lbar, rbar, _ = self.bars_range() + return self.view.mapViewToDevice( + QLineF( + lbar, 0, + rbar, 0 + ) + ).length() + + def default_view( + self, + bars_from_y: int = int(616 * 3/8), + y_offset: int = 0, + do_ds: bool = True, + + ) -> None: + ''' + Set the plot's viewbox to a "default" startup setting where + we try to show the underlying data range sanely. + + ''' + shm: ShmArray = self.shm + array: np.ndarray = shm.array + view: ChartView = self.plot.vb + ( + vl, + first_datum, + datum_start, + datum_stop, + last_datum, + vr, + ) = self.datums_range(array=array) + + # invalid case: view is not ordered correctly + # return and expect caller to sort it out. + if ( + vl > vr + ): + log.warning( + 'Skipping `.default_view()` viewbox not initialized..\n' + f'l -> r: {vl} -> {vr}\n' + f'datum_start -> datum_stop: {datum_start} -> {datum_stop}\n' + ) + return + + chartw: ChartPlotWidget = self.plot.getViewWidget() + index_field = self.index_field + step = self.index_step() + + if index_field == 'time': + # transform l -> r view range values into + # data index domain to determine how view + # should be reset to better match data. + read_slc = slice_from_time( + array, + start_t=vl, + stop_t=vr, + step=step, + ) + else: + read_slc = slice(0, datum_stop - datum_start + 1) + + index_iv = array[index_field][read_slc] + uppx: float = self.graphics.x_uppx() or 1 + + # l->r distance in scene units, no larger then data span + data_diff = last_datum - first_datum + rl_diff = min(vr - vl, data_diff) + + # orient by offset from the y-axis including + # space to compensate for the L1 labels. + if not y_offset: + + # we get the L1 spread label "length" in view coords and + # make sure it doesn't colide with the right-most datum in + # view. + _, l1_len = chartw.pre_l1_xs() + offset = l1_len/(uppx*step) + + # if no L1 label is present just offset by a few datums + # from the y-axis. + if chartw._max_l1_line_len == 0: + offset += 3*step + else: + offset = (y_offset * step) + uppx*step + + # align right side of view to the rightmost datum + the selected + # offset from above. + r_reset = last_datum + offset + + # no data is in view so check for the only 2 sane cases: + # - entire view is LEFT of data + # - entire view is RIGHT of data + if index_iv.size == 0: + log.warning(f'No data in view for {vl} -> {vr}') + + # 2 cases either the view is to the left or right of the + # data set. + if ( + vl <= first_datum + and vr <= first_datum + ): + l_reset = first_datum + + elif ( + vl >= last_datum + and vr >= last_datum + ): + l_reset = r_reset - rl_diff + + else: + raise RuntimeError(f'Unknown view state {vl} -> {vr}') + + else: + # maintain the l->r view distance + l_reset = r_reset - rl_diff + + # remove any custom user yrange setttings + if chartw._static_yrange == 'axis': + chartw._static_yrange = None + + view.setXRange( + min=l_reset, + max=r_reset, + padding=0, + ) + + if do_ds: + view.maybe_downsample_graphics() + view._set_yrange() + + # caller should do this! + # self.linked.graphics_cycle() diff --git a/piker/ui/_render.py b/piker/ui/_render.py index ae737875..e037122d 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -18,8 +18,8 @@ High level streaming graphics primitives. This is an intermediate layer which associates real-time low latency -graphics primitives with underlying FSP related data structures for fast -incremental update. +graphics primitives with underlying stream/flow related data structures +for fast incremental update. ''' from __future__ import annotations @@ -32,934 +32,25 @@ import msgspec import numpy as np import pyqtgraph as pg from PyQt5.QtGui import QPainterPath -from PyQt5.QtCore import QLineF -from ..data._sharedmem import ( - ShmArray, -) -from ..data.feed import Flume from ..data._formatters import ( IncrementalFormatter, - OHLCBarsFmtr, # Plain OHLC renderer - OHLCBarsAsCurveFmtr, # OHLC converted to line - StepCurveFmtr, # "step" curve (like for vlm) ) from ..data._pathops import ( xy_downsample, - slice_from_time, -) -from ._ohlc import ( - BarItems, -) -from ._curve import ( - Curve, - StepCurve, - FlattenedOHLC, ) from ..log import get_logger from .._profile import ( Profiler, - pg_profile_enabled, ) - if TYPE_CHECKING: - from ._interaction import ChartView - from ._chart import ChartPlotWidget + from ._dataviz import Viz log = get_logger(__name__) -def render_baritems( - viz: Viz, - graphics: BarItems, - read: tuple[ - int, int, np.ndarray, - int, int, np.ndarray, - ], - profiler: Profiler, - **kwargs, - -) -> None: - ''' - Graphics management logic for a ``BarItems`` object. - - Mostly just logic to determine when and how to downsample an OHLC - lines curve into a flattened line graphic and when to display one - graphic or the other. - - TODO: this should likely be moved into some kind of better abstraction - layer, if not a `Renderer` then something just above it? - - ''' - bars = graphics - - self = viz # TODO: make this a ``Viz`` method? - r = self._src_r - first_render: bool = False - - # if no source data renderer exists create one. - if not r: - first_render = True - - # OHLC bars path renderer - r = self._src_r = Renderer( - viz=self, - fmtr=OHLCBarsFmtr( - shm=viz.shm, - viz=viz, - ), - ) - - ds_curve_r = Renderer( - viz=self, - fmtr=OHLCBarsAsCurveFmtr( - shm=viz.shm, - viz=viz, - ), - ) - - curve = FlattenedOHLC( - name=f'{viz.name}_ds_ohlc', - color=bars._color, - ) - viz.ds_graphics = curve - curve.hide() - self.plot.addItem(curve) - - # baseline "line" downsampled OHLC curve that should - # kick on only when we reach a certain uppx threshold. - self._render_table = (ds_curve_r, curve) - - ds_r, curve = self._render_table - - # print( - # f'r: {r.fmtr.xy_slice}\n' - # f'ds_r: {ds_r.fmtr.xy_slice}\n' - # ) - - # do checks for whether or not we require downsampling: - # - if we're **not** downsampling then we simply want to - # render the bars graphics curve and update.. - # - if instead we are in a downsamplig state then we to - x_gt = 6 * (self.index_step() or 1) - uppx = curve.x_uppx() - # print(f'BARS UPPX: {uppx}') - in_line = should_line = curve.isVisible() - - if ( - in_line - and uppx < x_gt - ): - # print('FLIPPING TO BARS') - should_line = False - viz._in_ds = False - - elif ( - not in_line - and uppx >= x_gt - ): - # print('FLIPPING TO LINE') - should_line = True - viz._in_ds = True - - profiler(f'ds logic complete line={should_line}') - - # do graphics updates - if should_line: - r = ds_r - graphics = curve - profiler('updated ds curve') - - else: - graphics = bars - - if first_render: - bars.show() - - changed_to_line = False - if ( - not in_line - and should_line - ): - # change to line graphic - log.info( - f'downsampling to line graphic {self.name}' - ) - bars.hide() - curve.show() - curve.update() - changed_to_line = True - - elif ( - in_line - and not should_line - ): - # change to bars graphic - log.info( - f'showing bars graphic {self.name}\n' - f'first bars render?: {first_render}' - ) - curve.hide() - bars.show() - bars.update() - - # XXX: is this required? - viz._in_ds = should_line - - should_redraw = ( - changed_to_line - or not should_line - ) - return ( - graphics, - r, - should_redraw, - ) - - -class Viz(msgspec.Struct): # , frozen=True): - ''' - (Data) "Visualization" compound type which wraps a real-time - shm array stream with displayed graphics (curves, charts) - for high level access and control as well as efficient incremental - update. - - The intention is for this type to eventually be capable of shm-passing - of incrementally updated graphics stream data between actors. - - ''' - name: str - plot: pg.PlotItem - _shm: ShmArray - flume: Flume - graphics: Curve | BarItems - - # for tracking y-mn/mx for y-axis auto-ranging - yrange: tuple[float, float] = None - - # in some cases a viz may want to change its - # graphical "type" or, "form" when downsampling, to - # start this is only ever an interpolation line. - ds_graphics: Optional[Curve] = None - - is_ohlc: bool = False - render: bool = True # toggle for display loop - - # _index_field: str = 'index' - _index_field: str = 'time' - - # downsampling state - _last_uppx: float = 0 - _in_ds: bool = False - _index_step: float | None = None - - # map from uppx -> (downsampled data, incremental graphics) - _src_r: Optional[Renderer] = None - _render_table: dict[ - Optional[int], - tuple[Renderer, pg.GraphicsItem], - ] = (None, None) - - # cache of y-range values per x-range input. - _mxmns: dict[tuple[int, int], tuple[float, float]] = {} - - @property - def shm(self) -> ShmArray: - return self._shm - - @property - def index_field(self) -> str: - return self._index_field - - def index_step( - self, - reset: bool = False, - - ) -> float: - if self._index_step is None: - index = self.shm.array[self.index_field] - self._index_step = index[-1] - index[-2] - - return self._index_step - - def maxmin( - self, - lbar: int, - rbar: int, - - ) -> Optional[tuple[float, float]]: - ''' - Compute the cached max and min y-range values for a given - x-range determined by ``lbar`` and ``rbar`` or ``None`` - if no range can be determined (yet). - - ''' - # TODO: hash the slice instead maybe? - # https://stackoverflow.com/a/29980872 - rkey = (round(lbar), round(rbar)) - cached_result = self._mxmns.get(rkey) - do_print = False - if cached_result: - - if do_print: - print( - f'{self.name} CACHED maxmin\n' - f'{rkey} -> {cached_result}' - ) - return cached_result - - shm = self.shm - if shm is None: - return None - - arr = shm.array - - # get relative slice indexes into array - if self.index_field == 'time': - read_slc = slice_from_time( - arr, - start_t=lbar, - stop_t=rbar, - ) - slice_view = arr[read_slc] - - else: - ifirst = arr[0]['index'] - slice_view = arr[ - lbar - ifirst: - (rbar - ifirst) + 1 - ] - - if not slice_view.size: - log.warning(f'{self.name} no maxmin in view?') - return None - - elif self.yrange: - mxmn = self.yrange - if do_print: - print( - f'{self.name} M4 maxmin:\n' - f'{rkey} -> {mxmn}' - ) - - else: - if self.is_ohlc: - ylow = np.min(slice_view['low']) - yhigh = np.max(slice_view['high']) - - else: - view = slice_view[self.name] - ylow = np.min(view) - yhigh = np.max(view) - - mxmn = ylow, yhigh - if ( - do_print - # and self.index_step() > 1 - ): - s = 3 - print( - f'{self.name} MANUAL ohlc={self.is_ohlc} maxmin:\n' - f'{rkey} -> {mxmn}\n' - f'read_slc: {read_slc}\n' - f'abs_slc: {slice_view["index"]}\n' - f'first {s}:\n{slice_view[:s]}\n' - f'last {s}:\n{slice_view[-s:]}\n' - ) - - # cache result for input range - assert mxmn - self._mxmns[rkey] = mxmn - - return mxmn - - def view_range(self) -> tuple[int, int]: - ''' - Return the start and stop x-indexes for the managed ``ViewBox``. - - ''' - vr = self.plot.viewRect() - return ( - vr.left(), - vr.right(), - ) - - def bars_range(self) -> tuple[int, int, int, int]: - ''' - Return a range tuple for the left-view, left-datum, right-datum - and right-view x-indices. - - ''' - l, start, datum_start, datum_stop, stop, r = self.datums_range() - return l, datum_start, datum_stop, r - - def datums_range( - self, - view_range: None | tuple[float, float] = None, - index_field: str | None = None, - array: None | np.ndarray = None, - - ) -> tuple[ - int, int, int, int, int, int - ]: - ''' - Return a range tuple for the datums present in view. - - ''' - l, r = view_range or self.view_range() - - index_field: str = index_field or self.index_field - if index_field == 'index': - l, r = round(l), round(r) - - if array is None: - array = self.shm.array - - index = array[index_field] - first = round(index[0]) - last = round(index[-1]) - - # first and last datums in view determined by - # l / r view range. - leftmost = round(l) - rightmost = round(r) - - # invalid view state - if ( - r < l - or l < 0 - or r < 0 - or (l > last and r > last) - ): - leftmost = first - rightmost = last - else: - rightmost = max( - min(last, rightmost), - first, - ) - - leftmost = min( - max(first, leftmost), - last, - rightmost - 1, - ) - - assert leftmost < rightmost - - return ( - l, # left x-in-view - first, # first datum - leftmost, - rightmost, - last, # last_datum - r, # right-x-in-view - ) - - def read( - self, - array_field: Optional[str] = None, - index_field: str | None = None, - profiler: None | Profiler = None, - - ) -> tuple[ - int, int, np.ndarray, - int, int, np.ndarray, - ]: - ''' - Read the underlying shm array buffer and - return the data plus indexes for the first - and last - which has been written to. - - ''' - index_field: str = index_field or self.index_field - vr = l, r = self.view_range() - - # readable data - array = self.shm.array - - if profiler: - profiler('self.shm.array READ') - - ( - l, - ifirst, - lbar, - rbar, - ilast, - r, - ) = self.datums_range( - view_range=vr, - index_field=index_field, - array=array, - ) - - if profiler: - profiler('self.datums_range()') - - abs_slc = slice(ifirst, ilast) - - # TODO: support time slicing - if index_field == 'time': - read_slc = slice_from_time( - array, - start_t=lbar, - stop_t=rbar, - ) - - # TODO: maybe we should return this from the slicer call - # above? - in_view = array[read_slc] - if in_view.size: - abs_indx = in_view['index'] - abs_slc = slice( - int(abs_indx[0]), - int(abs_indx[-1]), - ) - - if profiler: - profiler( - '`slice_from_time(' - f'start_t={lbar}' - f'stop_t={rbar})' - ) - - # array-index slicing - # TODO: can we do time based indexing using arithmetic presuming - # a uniform time stamp step size? - else: - # get read-relative indices adjusting for master shm index. - lbar_i = max(l, ifirst) - ifirst - rbar_i = min(r, ilast) - ifirst - - # NOTE: the slice here does NOT include the extra ``+ 1`` - # BUT the ``in_view`` slice DOES.. - read_slc = slice(lbar_i, rbar_i) - in_view = array[lbar_i: rbar_i + 1] - - # XXX: same as ^ - # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] - if profiler: - profiler('index arithmetic for slicing') - - if array_field: - array = array[array_field] - - return ( - # abs indices + full data set - abs_slc.start, - abs_slc.stop, - array, - - # relative (read) indices + in view data - read_slc.start, - read_slc.stop, - in_view, - ) - - def update_graphics( - self, - use_vr: bool = True, - render: bool = True, - array_key: str | None = None, - - profiler: Profiler | None = None, - do_append: bool = True, - - **kwargs, - - ) -> pg.GraphicsObject: - ''' - Read latest datums from shm and render to (incrementally) - render to graphics. - - ''' - profiler = Profiler( - msg=f'Viz.update_graphics() for {self.name}', - disabled=not pg_profile_enabled(), - ms_threshold=4, - # ms_threshold=ms_slower_then, - ) - # shm read and slice to view - read = ( - xfirst, xlast, src_array, - ivl, ivr, in_view, - ) = self.read(profiler=profiler) - - profiler('read src shm data') - - graphics = self.graphics - - if ( - not in_view.size - or not render - ): - # print('exiting early') - return graphics - - should_redraw: bool = False - - # TODO: probably specialize ``Renderer`` types instead of - # these logic checks? - # - put these blocks into a `.load_renderer()` meth? - # - consider a OHLCRenderer, StepCurveRenderer, Renderer? - r = self._src_r - if isinstance(graphics, BarItems): - # XXX: special case where we change out graphics - # to a line after a certain uppx threshold. - ( - graphics, - r, - should_redraw, - ) = render_baritems( - self, - graphics, - read, - profiler, - **kwargs, - ) - - elif not r: - if isinstance(graphics, StepCurve): - - r = self._src_r = Renderer( - viz=self, - fmtr=StepCurveFmtr( - shm=self.shm, - viz=self, - ), - ) - - else: - r = self._src_r - if not r: - # just using for ``.diff()`` atm.. - r = self._src_r = Renderer( - viz=self, - fmtr=IncrementalFormatter( - shm=self.shm, - viz=self, - ), - ) - - # ``Curve`` derivative case(s): - array_key = array_key or self.name - # print(array_key) - - # ds update config - new_sample_rate: bool = False - should_ds: bool = r._in_ds - showing_src_data: bool = not r._in_ds - - # downsampling incremental state checking - # check for and set std m4 downsample conditions - uppx = graphics.x_uppx() - uppx_diff = (uppx - self._last_uppx) - profiler(f'diffed uppx {uppx}') - if ( - uppx > 1 - and abs(uppx_diff) >= 1 - ): - log.debug( - f'{array_key} sampler change: {self._last_uppx} -> {uppx}' - ) - self._last_uppx = uppx - - new_sample_rate = True - showing_src_data = False - should_ds = True - should_redraw = True - - elif ( - uppx <= 2 - and self._in_ds - ): - # we should de-downsample back to our original - # source data so we clear our path data in prep - # to generate a new one from original source data. - new_sample_rate = True - should_ds = False - should_redraw = True - showing_src_data = True - - # MAIN RENDER LOGIC: - # - determine in view data and redraw on range change - # - determine downsampling ops if needed - # - (incrementally) update ``QPainterPath`` - - out = r.render( - read, - array_key, - profiler, - uppx=uppx, - # use_vr=True, - - # TODO: better way to detect and pass this? - # if we want to eventually cache renderers for a given uppx - # we should probably use this as a key + state? - should_redraw=should_redraw, - new_sample_rate=new_sample_rate, - should_ds=should_ds, - showing_src_data=showing_src_data, - - do_append=do_append, - ) - - if not out: - log.warning(f'{self.name} failed to render!?') - return graphics - - path, reset = out - - # XXX: SUPER UGGGHHH... without this we get stale cache - # graphics that don't update until you downsampler again.. - if reset: - with graphics.reset_cache(): - # assign output paths to graphicis obj - graphics.path = r.path - graphics.fast_path = r.fast_path - - # XXX: we don't need this right? - # graphics.draw_last_datum( - # path, - # src_array, - # reset, - # array_key, - # index_field=self.index_field, - # ) - # graphics.update() - # profiler('.update()') - else: - # assign output paths to graphicis obj - graphics.path = r.path - graphics.fast_path = r.fast_path - - graphics.draw_last_datum( - path, - src_array, - reset, - array_key, - index_field=self.index_field, - ) - graphics.update() - profiler('.update()') - - # TODO: does this actuallly help us in any way (prolly should - # look at the source / ask ogi). I think it avoid artifacts on - # wheel-scroll downsampling curve updates? - # TODO: is this ever better? - # graphics.prepareGeometryChange() - # profiler('.prepareGeometryChange()') - - # track downsampled state - self._in_ds = r._in_ds - - return graphics - - def draw_last( - self, - array_key: Optional[str] = None, - only_last_uppx: bool = False, - - ) -> None: - - # shm read and slice to view - ( - xfirst, xlast, src_array, - ivl, ivr, in_view, - ) = self.read() - - g = self.graphics - array_key = array_key or self.name - x, y = g.draw_last_datum( - g.path, - src_array, - False, # never reset path - array_key, - self.index_field, - ) - - # the renderer is downsampling we choose - # to always try and update a single (interpolating) - # line segment that spans and tries to display - # the last uppx's worth of datums. - # we only care about the last pixel's - # worth of data since that's all the screen - # can represent on the last column where - # the most recent datum is being drawn. - if ( - self._in_ds - or only_last_uppx - ): - dsg = self.ds_graphics or self.graphics - - # XXX: pretty sure we don't need this? - # if isinstance(g, Curve): - # with dsg.reset_cache(): - uppx = self._last_uppx - y = y[-uppx:] - ymn, ymx = y.min(), y.max() - # print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}') - try: - iuppx = x[-uppx] - except IndexError: - # we're less then an x-px wide so just grab the start - # datum index. - iuppx = x[0] - - dsg._last_line = QLineF( - iuppx, ymn, - x[-1], ymx, - ) - # print(f'updating DS curve {self.name}') - dsg.update() - - else: - # print(f'updating NOT DS curve {self.name}') - g.update() - - def curve_width_pxs(self) -> float: - ''' - Return the width of the current datums in view in pixel units. - - ''' - _, lbar, rbar, _ = self.bars_range() - return self.view.mapViewToDevice( - QLineF( - lbar, 0, - rbar, 0 - ) - ).length() - - def default_view( - self, - bars_from_y: int = int(616 * 3/8), - y_offset: int = 0, - do_ds: bool = True, - - ) -> None: - ''' - Set the plot's viewbox to a "default" startup setting where - we try to show the underlying data range sanely. - - ''' - shm: ShmArray = self.shm - array: np.ndarray = shm.array - view: ChartView = self.plot.vb - ( - vl, - first_datum, - datum_start, - datum_stop, - last_datum, - vr, - ) = self.datums_range(array=array) - - # invalid case: view is not ordered correctly - # return and expect caller to sort it out. - if ( - vl > vr - ): - log.warning( - 'Skipping `.default_view()` viewbox not initialized..\n' - f'l -> r: {vl} -> {vr}\n' - f'datum_start -> datum_stop: {datum_start} -> {datum_stop}\n' - ) - return - - chartw: ChartPlotWidget = self.plot.getViewWidget() - index_field = self.index_field - step = self.index_step() - - if index_field == 'time': - # transform l -> r view range values into - # data index domain to determine how view - # should be reset to better match data. - read_slc = slice_from_time( - array, - start_t=vl, - stop_t=vr, - step=step, - ) - else: - read_slc = slice(0, datum_stop - datum_start + 1) - - index_iv = array[index_field][read_slc] - uppx: float = self.graphics.x_uppx() or 1 - - # l->r distance in scene units, no larger then data span - data_diff = last_datum - first_datum - rl_diff = min(vr - vl, data_diff) - - # orient by offset from the y-axis including - # space to compensate for the L1 labels. - if not y_offset: - - # we get the L1 spread label "length" in view coords and - # make sure it doesn't colide with the right-most datum in - # view. - _, l1_len = chartw.pre_l1_xs() - offset = l1_len/(uppx*step) - - # if no L1 label is present just offset by a few datums - # from the y-axis. - if chartw._max_l1_line_len == 0: - offset += 3*step - else: - offset = (y_offset * step) + uppx*step - - # align right side of view to the rightmost datum + the selected - # offset from above. - r_reset = last_datum + offset - - # no data is in view so check for the only 2 sane cases: - # - entire view is LEFT of data - # - entire view is RIGHT of data - if index_iv.size == 0: - log.warning(f'No data in view for {vl} -> {vr}') - - # 2 cases either the view is to the left or right of the - # data set. - if ( - vl <= first_datum - and vr <= first_datum - ): - l_reset = first_datum - - elif ( - vl >= last_datum - and vr >= last_datum - ): - l_reset = r_reset - rl_diff - - else: - raise RuntimeError(f'Unknown view state {vl} -> {vr}') - - else: - # maintain the l->r view distance - l_reset = r_reset - rl_diff - - # remove any custom user yrange setttings - if chartw._static_yrange == 'axis': - chartw._static_yrange = None - - view.setXRange( - min=l_reset, - max=r_reset, - padding=0, - ) - - if do_ds: - view.maybe_downsample_graphics() - view._set_yrange() - - # caller should do this! - # self.linked.graphics_cycle() - - class Renderer(msgspec.Struct): viz: Viz From 14104185d2ac993317d46cbf98f321ed353759df Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 14 Dec 2022 13:50:57 -0500 Subject: [PATCH 66/96] Move `DisplayState.incr_info()` -> `Viz` --- piker/ui/_dataviz.py | 102 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index d2cb9c23..bec5481f 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -61,6 +61,7 @@ from .._profile import ( if TYPE_CHECKING: from ._interaction import ChartView from ._chart import ChartPlotWidget + from ._display import DisplayState log = get_logger(__name__) @@ -953,3 +954,104 @@ class Viz(msgspec.Struct): # , frozen=True): # caller should do this! # self.linked.graphics_cycle() + + def incr_info( + self, + + # NOTE: pass in a copy if you don't want your orignal mutated. + state: DisplayState, + + update_state: bool = True, + update_uppx: float = 16, + is_1m: bool = False, + + ) -> tuple: + + # shm = shm or self.ohlcv + # chart = chart or self.chart + globalz = state.globalz + if is_1m: + state = state.hist_vars + else: + state = state.vars + + _, _, _, r = self.bars_range() + + i_step = self.shm.array[-1][self.index_field] + + # last-in-view: is a real-time update necessary? + liv = r >= i_step + + # TODO: make this not loop through all vizs each time? + # compute the first available graphic's x-units-per-pixel + uppx = self.plot.vb.x_uppx() + + # NOTE: this used to be implemented in a dedicated + # "increment task": ``check_for_new_bars()`` but it doesn't + # make sense to do a whole task switch when we can just do + # this simple index-diff and all the fsp sub-curve graphics + # are diffed on each draw cycle anyway; so updates to the + # "curve" length is already automatic. + glast = globalz['i_last'] + i_diff = i_step - glast + + # print(f'{chart.name} TIME STEP: {i_step}') + # i_diff = i_step - state['i_last'] + + should_global_increment: bool = False + if i_step > glast: + globalz['i_last'] = i_step + should_global_increment = True + + # update global state for this chart + if ( + # state is None + not is_1m + and i_diff > 0 + ): + state['i_last'] = i_step + + append_diff = i_step - state['i_last_append'] + # append_diff = i_step - _i_last_append + + # update the "last datum" (aka extending the vizs graphic with + # new data) only if the number of unit steps is >= the number of + # such unit steps per pixel (aka uppx). Iow, if the zoom level + # is such that a datum(s) update to graphics wouldn't span + # to a new pixel, we don't update yet. + do_append = ( + append_diff >= uppx + and i_diff + ) + if ( + do_append + and not is_1m + ): + # _i_last_append = i_step + state['i_last_append'] = i_step + + # fqsn = self.flume.symbol.fqsn + # print( + # f'DOING APPEND => {fqsn}\n' + # f'i_step:{i_step}\n' + # f'i_diff:{i_diff}\n' + # f'last:{_i_last}\n' + # f'last_append:{_i_last_append}\n' + # f'append_diff:{append_diff}\n' + # f'r: {r}\n' + # f'liv: {liv}\n' + # f'uppx: {uppx}\n' + # ) + + do_rt_update = uppx < update_uppx + + # TODO: pack this into a struct + return ( + uppx, + liv, + do_append, + i_diff, + append_diff, + do_rt_update, + should_global_increment, + ) From 530b2731ba232daeb99f995e0096294ce9e590fd Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 14 Dec 2022 17:36:01 -0500 Subject: [PATCH 67/96] Add global `i_step` per overlay to `DisplayState` Using a global "last index step" (via module var) obviously has problems when working with multiple feed sets in a single global app instance: any separate feed-set will be incremented according to an app-global index-step and thus won't correctly calc per-feed-set-step update info. Impl deatz: - drop `DisplayState.incr_info()` (since previously moved to `Viz`) and call that method on each appropriate `Viz` instance where necessary; further ensure the appropriate `DisplayState` instance is passed in to each call and make sure to pass a `state: DisplayState`. - add `DisplayState.hist_vars: dict` for history chart (sets) to determine the per-feed (not set) current slow chart (time) step. - add `DisplayState.globalz: dict` to house a common per-feed-set state and use it inside the new `Viz.incr_info()` such that a `should_increment: bool` can be returned and used by the display loop to determine whether to x-shift the current chart. --- piker/ui/_display.py | 243 ++++++++++++++----------------------------- 1 file changed, 77 insertions(+), 166 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 676b27a5..39f6cf1b 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -128,10 +128,6 @@ def chart_maxmin( ) -_i_last: int = 0 -_i_last_append: int = 0 - - class DisplayState(Struct): ''' Chart-local real-time graphics state container. @@ -154,113 +150,33 @@ class DisplayState(Struct): hist_last_price_sticky: YAxisLabel # misc state tracking - vars: dict[str, Any] = field(default_factory=lambda: { - 'tick_margin': 0, - 'i_last': 0, - 'i_last_append': 0, - 'last_mx_vlm': 0, - 'last_mx': 0, - 'last_mn': 0, - }) + vars: dict[str, Any] = field( + default_factory=lambda: { + 'tick_margin': 0, + 'i_last': 0, + 'i_last_append': 0, + 'last_mx_vlm': 0, + 'last_mx': 0, + 'last_mn': 0, + } + ) + hist_vars: dict[str, Any] = field( + default_factory=lambda: { + 'tick_margin': 0, + 'i_last': 0, + 'i_last_append': 0, + 'last_mx_vlm': 0, + 'last_mx': 0, + 'last_mn': 0, + } + ) + + globalz: None | dict[str, Any] = None vlm_chart: Optional[ChartPlotWidget] = None vlm_sticky: Optional[YAxisLabel] = None wap_in_history: bool = False - def incr_info( - self, - chart: Optional[ChartPlotWidget] = None, - shm: Optional[ShmArray] = None, - state: Optional[dict] = None, # pass in a copy if you don't - - update_state: bool = True, - update_uppx: float = 16, - is_1m: bool = False, - - ) -> tuple: - - shm = shm or self.ohlcv - chart = chart or self.chart - # state = state or self.vars - - if ( - not update_state - and state - ): - state = state.copy() - - # compute the first available graphic's x-units-per-pixel - uppx = chart.view.x_uppx() - - # NOTE: this used to be implemented in a dedicated - # "increment task": ``check_for_new_bars()`` but it doesn't - # make sense to do a whole task switch when we can just do - # this simple index-diff and all the fsp sub-curve graphics - # are diffed on each draw cycle anyway; so updates to the - # "curve" length is already automatic. - - # increment the view position by the sample offset. - # i_step = shm.index - i_step = shm.array[-1]['time'] - # i_diff = i_step - state['i_last'] - # state['i_last'] = i_step - global _i_last, _i_last_append - i_diff = i_step - _i_last - # update global state - if ( - # state is None - not is_1m - and i_diff > 0 - ): - _i_last = i_step - - # append_diff = i_step - state['i_last_append'] - append_diff = i_step - _i_last_append - - # real-time update necessary? - main_viz = chart.get_viz(chart.name) - _, _, _, r = main_viz.bars_range() - liv = r >= shm.index - - # update the "last datum" (aka extending the vizs graphic with - # new data) only if the number of unit steps is >= the number of - # such unit steps per pixel (aka uppx). Iow, if the zoom level - # is such that a datum(s) update to graphics wouldn't span - # to a new pixel, we don't update yet. - do_append = ( - append_diff >= uppx - and i_diff - ) - if ( - do_append - and not is_1m - ): - _i_last_append = i_step - # fqsn = self.flume.symbol.fqsn - # print( - # f'DOING APPEND => {fqsn}\n' - # f'i_step:{i_step}\n' - # f'i_diff:{i_diff}\n' - # f'last:{_i_last}\n' - # f'last_append:{_i_last_append}\n' - # f'append_diff:{append_diff}\n' - # f'r: {r}\n' - # f'liv: {liv}\n' - # f'uppx: {uppx}\n' - # ) - - do_rt_update = uppx < update_uppx - - # TODO: pack this into a struct - return ( - uppx, - liv, - do_append, - i_diff, - append_diff, - do_rt_update, - ) - async def graphics_update_loop( @@ -295,7 +211,15 @@ async def graphics_update_loop( hist_chart = godwidget.hist_linked.chart assert hist_chart + # per-viz-set global last index tracking for global chart + # view UX incrementing. + globalz = { + 'i_last': 0, + 'i_last_append': 0, + } + dss: dict[str, DisplayState] = {} + for fqsn, flume in feed.flumes.items(): ohlcv = flume.rt_shm hist_ohlcv = flume.hist_shm @@ -315,7 +239,8 @@ async def graphics_update_loop( ) last_price_sticky.show() - slow_pi = hist_chart._vizs[fqsn].plot + hist_viz = hist_chart._vizs[fqsn] + slow_pi = hist_viz.plot hist_last_price_sticky = slow_pi.getAxis('right')._stickies[fqsn] hist_last_price_sticky.update_from_data( *hist_ohlcv.array[-1][[ @@ -389,7 +314,8 @@ async def graphics_update_loop( 'last_mx_vlm': last_mx_vlm, 'last_mx': last_mx, 'last_mn': last_mn, - } + }, + 'globalz': globalz, }) if vlm_chart: @@ -400,15 +326,15 @@ async def graphics_update_loop( fast_chart.default_view() + ds.hist_vars.update({ + 'i_last_append': i_last, + 'i_last': i_last, + }) + # TODO: probably factor this into some kinda `DisplayState` # API that can be reused at least in terms of pulling view # params (eg ``.bars_range()``). async def increment_history_view(): - i_last = hist_ohlcv.index - state = ds.vars.copy() | { - 'i_last_append': i_last, - 'i_last': i_last, - } _, hist_step_size_s, _ = flume.get_ds_info() async with flume.index_stream( @@ -433,12 +359,11 @@ async def graphics_update_loop( i_diff, append_diff, do_rt_update, - ) = ds.incr_info( - chart=hist_chart, - shm=ds.hist_ohlcv, - state=state, + should_incr, + + ) = hist_viz.incr_info( + state=ds, is_1m=True, - # update_state=False, ) # print( # f'liv: {liv}\n' @@ -559,40 +484,8 @@ def graphics_update_cycle( i_diff, append_diff, do_rt_update, - ) = ds.incr_info() - - # don't real-time "shift" the curve to the - # left unless we get one of the following: - if ( - ( - do_append - and liv - ) - or trigger_all - ): - # print(f'INCREMENTING {fqsn}') - chart.increment_view(steps=i_diff) - main_viz.plot.vb._set_yrange( - # yrange=(mn, mx), - ) - - # NOTE: since vlm and ohlc charts are axis linked now we don't - # need the double increment request? - # if vlm_chart: - # vlm_chart.increment_view(steps=i_diff) - - profiler('view incremented') - - # frames_by_type: dict[str, dict] = {} - # lasts = {} - - # build tick-type "frames" of tick sequences since - # likely the tick arrival rate is higher then our - # (throttled) quote stream rate. - - # iterate in FIFO order per tick-frame - # if sym != fqsn: - # continue + should_incr, + ) = main_viz.incr_info(state=ds) # TODO: we should only run mxmn when we know # an update is due via ``do_append`` above. @@ -623,11 +516,6 @@ def graphics_update_cycle( # graphic. clear_types = _tick_groups['clears'] - # XXX: if we wanted to iterate in "latest" (i.e. most - # current) tick first order as an optimization where we only - # update from the last tick from each type class. - # last_clear_updated: bool = False - # update ohlc sampled price bars if ( do_rt_update @@ -647,6 +535,29 @@ def graphics_update_cycle( # do_append=do_append, ) + # don't real-time "shift" the curve to the + # left unless we get one of the following: + if ( + ( + should_incr + and do_append + and liv + ) + or trigger_all + ): + # print(f'INCREMENTING {fqsn}') + chart.increment_view(steps=i_diff) + main_viz.plot.vb._set_yrange( + # yrange=(mn, mx), + ) + + # NOTE: since vlm and ohlc charts are axis linked now we don't + # need the double increment request? + # if vlm_chart: + # vlm_chart.increment_view(steps=i_diff) + + profiler('view incremented') + # from pprint import pformat # frame_counts = { # typ: len(frame) for typ, frame in frames_by_type.items() @@ -767,6 +678,8 @@ def graphics_update_cycle( ) # check if slow chart needs a resize + + hist_viz = hist_chart._vizs[fqsn] ( _, hist_liv, @@ -774,15 +687,13 @@ def graphics_update_cycle( _, _, _, - ) = ds.incr_info( - chart=hist_chart, - shm=ds.hist_ohlcv, - update_state=False, + _, + ) = hist_viz.incr_info( + state=ds, is_1m=True, ) if hist_liv: - viz = hist_chart._vizs[fqsn] - viz.plot.vb._set_yrange( + hist_viz.plot.vb._set_yrange( # yrange=hist_chart.maxmin(name=fqsn), ) @@ -816,7 +727,7 @@ def graphics_update_cycle( ): viz.draw_last( array_key=curve_name, - only_last_uppx=True, + # only_last_uppx=True, ) # volume chart logic.. @@ -1209,7 +1120,7 @@ async def display_symbol_data( # for zoom-interaction purposes. hist_chart.get_viz(fqsn).draw_last( array_key=fqsn, - only_last_uppx=True, + # only_last_uppx=True, ) pis.setdefault(fqsn, [None, None])[1] = hist_chart.plotItem @@ -1308,7 +1219,7 @@ async def display_symbol_data( # for zoom-interaction purposes. viz.draw_last( array_key=fqsn, - only_last_uppx=True, + # only_last_uppx=True, ) hist_pi.vb.maxmin = partial( From edf721f755de69349c7fa2005018256b09410fe5 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 15 Dec 2022 14:26:50 -0500 Subject: [PATCH 68/96] Make `LinearRegion` link using epoch-time index Turned out to be super simple to get the first draft to work since the fast and slow chart now use the same domain, however, it seems like maybe there's an offset issue still where the fast may be a couple minutes ahead of the slow? Need to dig in a bit.. --- piker/ui/_display.py | 180 ++++++++++++++++++++++++------------------- 1 file changed, 100 insertions(+), 80 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 39f6cf1b..c44fd210 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -853,92 +853,113 @@ async def link_views_with_region( hist_pi.addItem(region, ignoreBounds=True) region.setOpacity(6/16) - viz = rt_chart._vizs[flume.symbol.fqsn] + viz = rt_chart.get_viz(flume.symbol.fqsn) assert viz + index_field = viz.index_field # XXX: no idea why this doesn't work but it's causing # a weird placement of the region on the way-far-left.. # region.setClipItem(viz.graphics) - # poll for datums load and timestep detection - for _ in range(100): - try: - _, _, ratio = flume.get_ds_info() - break - except IndexError: - await trio.sleep(0.01) - continue + if index_field == 'time': + + # in the (epoch) index case we can map directly + # from the fast chart's x-domain values since they are + # on the same index as the slow chart. + + def update_region_from_pi( + window, + viewRange: tuple[tuple, tuple], + is_manual: bool = True, + ) -> None: + # put linear region "in front" in layer terms + region.setZValue(10) + + # set the region on the history chart + # to the range currently viewed in the + # HFT/real-time chart. + region.setRegion(viewRange[0]) + else: - raise RuntimeError( - 'Failed to detect sampling periods from shm!?') + # poll for datums load and timestep detection + for _ in range(100): + try: + _, _, ratio = flume.get_ds_info() + break + except IndexError: + await trio.sleep(0.01) + continue + else: + raise RuntimeError( + 'Failed to detect sampling periods from shm!?') - # sampling rate transform math: - # ----------------------------- - # define the fast chart to slow chart as a linear mapping - # over the fast index domain `i` to the slow index domain - # `j` as: - # - # j = i - i_offset - # ------------ + j_offset - # j/i - # - # conversely the inverse function is: - # - # i = j/i * (j - j_offset) + i_offset - # - # Where `j_offset` is our ``izero_hist`` and `i_offset` is our - # `izero_rt`, the ``ShmArray`` offsets which correspond to the - # indexes in each array where the "current" time is indexed at init. - # AKA the index where new data is "appended to" and historical data - # if "prepended from". - # - # more practically (and by default) `i` is normally an index - # into 1s samples and `j` is an index into 60s samples (aka 1m). - # in the below handlers ``ratio`` is the `j/i` and ``mn``/``mx`` - # are the low and high index input from the source index domain. + # sampling rate transform math: + # ----------------------------- + # define the fast chart to slow chart as a linear mapping + # over the fast index domain `i` to the slow index domain + # `j` as: + # + # j = i - i_offset + # ------------ + j_offset + # j/i + # + # conversely the inverse function is: + # + # i = j/i * (j - j_offset) + i_offset + # + # Where `j_offset` is our ``izero_hist`` and `i_offset` is our + # `izero_rt`, the ``ShmArray`` offsets which correspond to the + # indexes in each array where the "current" time is indexed at init. + # AKA the index where new data is "appended to" and historical data + # if "prepended from". + # + # more practically (and by default) `i` is normally an index + # into 1s samples and `j` is an index into 60s samples (aka 1m). + # in the below handlers ``ratio`` is the `j/i` and ``mn``/``mx`` + # are the low and high index input from the source index domain. - def update_region_from_pi( - window, - viewRange: tuple[tuple, tuple], - is_manual: bool = True, + def update_region_from_pi( + window, + viewRange: tuple[tuple, tuple], + is_manual: bool = True, - ) -> None: - # put linear region "in front" in layer terms - region.setZValue(10) + ) -> None: + # put linear region "in front" in layer terms + region.setZValue(10) - # set the region on the history chart - # to the range currently viewed in the - # HFT/real-time chart. - mn, mx = viewRange[0] - ds_mn = (mn - izero_rt)/ratio - ds_mx = (mx - izero_rt)/ratio - lhmn = ds_mn + izero_hist - lhmx = ds_mx + izero_hist - # print( - # f'rt_view_range: {(mn, mx)}\n' - # f'ds_mn, ds_mx: {(ds_mn, ds_mx)}\n' - # f'lhmn, lhmx: {(lhmn, lhmx)}\n' - # ) - region.setRegion(( - lhmn, - lhmx, - )) + # set the region on the history chart + # to the range currently viewed in the + # HFT/real-time chart. + mn, mx = viewRange[0] + ds_mn = (mn - izero_rt)/ratio + ds_mx = (mx - izero_rt)/ratio + lhmn = ds_mn + izero_hist + lhmx = ds_mx + izero_hist + # print( + # f'rt_view_range: {(mn, mx)}\n' + # f'ds_mn, ds_mx: {(ds_mn, ds_mx)}\n' + # f'lhmn, lhmx: {(lhmn, lhmx)}\n' + # ) + region.setRegion(( + lhmn, + lhmx, + )) - # TODO: if we want to have the slow chart adjust range to - # match the fast chart's selection -> results in the - # linear region expansion never can go "outside of view". - # hmn, hmx = hvr = hist_chart.view.state['viewRange'][0] - # print((hmn, hmx)) - # if ( - # hvr - # and (lhmn < hmn or lhmx > hmx) - # ): - # hist_pi.setXRange( - # lhmn, - # lhmx, - # padding=0, - # ) - # hist_linked.graphics_cycle() + # TODO: if we want to have the slow chart adjust range to + # match the fast chart's selection -> results in the + # linear region expansion never can go "outside of view". + # hmn, hmx = hvr = hist_chart.view.state['viewRange'][0] + # print((hmn, hmx)) + # if ( + # hvr + # and (lhmn < hmn or lhmx > hmx) + # ): + # hist_pi.setXRange( + # lhmn, + # lhmx, + # padding=0, + # ) + # hist_linked.graphics_cycle() # connect region to be updated on plotitem interaction. rt_pi.sigRangeChanged.connect(update_region_from_pi) @@ -1333,12 +1354,11 @@ async def display_symbol_data( ) godwidget.resize_all() - # hist_chart.hide() - # await link_views_with_region( - # rt_chart, - # hist_chart, - # flume, - # ) + await link_views_with_region( + rt_chart, + hist_chart, + flume, + ) # start graphics update loop after receiving first live quote ln.start_soon( From 93330954c2e74d6447ef140c00b3f90a0fee6c39 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 15 Dec 2022 17:01:21 -0500 Subject: [PATCH 69/96] Ugh, use `bool` flag to determine index field.. --- piker/data/_pathops.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index da51e9c1..3dee38b2 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -94,6 +94,7 @@ def path_arrays_from_ohlc( data: np.ndarray, start: int64, bar_gap: float64 = 0.43, + use_time_index: bool = True, # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 # index_field: str, @@ -126,8 +127,11 @@ def path_arrays_from_ohlc( high = q['high'] low = q['low'] close = q['close'] - # index = float64(q['index']) - index = float64(q['time']) + + if use_time_index: + index = float64(q['time']) + else: + index = float64(q['index']) # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 # index = float64(q[index_field]) From 24b384f3ef699bcef3d8893ec44c8d648ff4e853 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 15 Dec 2022 23:48:07 -0500 Subject: [PATCH 70/96] Set `path_arrays_from_ohlc(use_time_index=True)` on epoch indexing Allows easily switching between normal array `int` indexing and time indexing by just flipping the `Viz._index_field: str`. Also, guard all the x-data audit breakpoints with a time indexing condition. --- piker/data/_formatters.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 458810aa..760c4218 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -315,10 +315,6 @@ class IncrementalFormatter(msgspec.Struct): self.xy_nd_start -= prepend_len profiler('prepended xy history: {prepend_length}') - xndall = self.x_nd[self.xy_slice] - if xndall.any() and (xndall == 0.5).any(): - breakpoint() - if append_len: self.incr_update_xy_nd( shm, @@ -384,7 +380,10 @@ class IncrementalFormatter(msgspec.Struct): # update the last "in view data range" if len(x_1d): self._last_ivdr = x_1d[0], x_1d[-1] - if (x_1d[-1] == 0.5).any(): + if ( + self.index_field == 'time' + and (x_1d[-1] == 0.5).any() + ): breakpoint() profiler('.format_to_1d()') @@ -498,7 +497,11 @@ class IncrementalFormatter(msgspec.Struct): # NOTE: we don't include the very last datum which is filled in # normally by another graphics object. x_1d = array[self.index_field][:-1] - if x_1d.any() and (x_1d[-1] == 0.5).any(): + if ( + self.index_field == 'time' + and x_1d.any() + and (x_1d[-1] == 0.5).any() + ): breakpoint() y_1d = array[array_key][:-1] @@ -613,6 +616,9 @@ class OHLCBarsFmtr(IncrementalFormatter): array, start, bar_gap=w * self.index_step_size, + + # XXX: don't ask, due to a ``numba`` bug.. + use_time_index=(self.index_field == 'time'), ) return x, y, c @@ -677,7 +683,6 @@ class StepCurveFmtr(IncrementalFormatter): # fill out Nx2 array to hold each step's left + right vertices. y_out = np.empty( - # (len(out), 2), x_out.shape, dtype=out.dtype, ) @@ -785,14 +790,6 @@ class StepCurveFmtr(IncrementalFormatter): x_step = self.x_nd[start:stop] y_step = self.y_nd[start:stop] - # debugging - # if y_step.any(): - # s = 3 - # print( - # f'x_step:\n{x_step[-s:]}\n' - # f'y_step:\n{y_step[-s:]}\n\n' - # ) - # slice out in-view data ivl, ivr = vr @@ -805,10 +802,11 @@ class StepCurveFmtr(IncrementalFormatter): x_1d = x_step_iv.reshape(x_step_iv.size) y_1d = y_step_iv.reshape(y_step_iv.size) - if not x_1d.size == y_1d.size: - breakpoint() - - if x_1d.any() and (x_1d == 0.5).any(): + if ( + self.index_field == 'time' + and x_1d.any() + and (x_1d == 0.5).any() + ): breakpoint() # debugging From 0d0675ac7e4263a6190a943aecddc89db2f94b32 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 16 Dec 2022 00:00:46 -0500 Subject: [PATCH 71/96] `Viz._index_field` a `typing.Literal[str]` --- piker/ui/_dataviz.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index bec5481f..82cc2e37 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -21,6 +21,7 @@ Data vizualization APIs from __future__ import annotations from typing import ( Optional, + Literal, TYPE_CHECKING, ) @@ -243,8 +244,16 @@ class Viz(msgspec.Struct): # , frozen=True): is_ohlc: bool = False render: bool = True # toggle for display loop - # _index_field: str = 'index' - _index_field: str = 'time' + _index_field: Literal[ + 'index', + 'time', + + # TODO: idea is to re-index all time series to a common + # longest-len-int-index where we avoid gaps and instead + # graph on the 0 -> N domain of the array index super set. + # 'gapless', + + ] = 'time' # downsampling state _last_uppx: float = 0 From 444768d30f72c2893f107a85aaab0c1b970d5374 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 16 Dec 2022 12:40:32 -0500 Subject: [PATCH 72/96] Adjust OHLC bar x-offsets to be time span matched Previously we were drawing with the middle of the bar on each index with arms to either side: +/- some arm length. Instead this changes so that each bar is drawn *after* each index/timestamp such that in graphics coords the bar span more correctly matches the time span in the x-domain. This makes the linked region between slow and fast chart directly match (without any transform) for epoch-time indexing such that the last x-coord in view on the fast chart is no more then the next time step in (downsampled) slow view. Deats: - adjust in `._pathops.path_arrays_from_ohlc()` and take an `bar_w` bar width input (normally taken from the data step size). - change `.ui._ohlc.bar_from_ohlc_row()` and `BarItems.draw_last_datum()` to match. --- piker/data/_pathops.py | 16 ++++++++++------ piker/ui/_ohlc.py | 29 +++++++++++++++++++++-------- 2 files changed, 31 insertions(+), 14 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 3dee38b2..aa6d628d 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -93,7 +93,8 @@ def xy_downsample( def path_arrays_from_ohlc( data: np.ndarray, start: int64, - bar_gap: float64 = 0.43, + bar_w: float64, + bar_gap: float64 = 0.16, use_time_index: bool = True, # XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622 @@ -119,6 +120,8 @@ def path_arrays_from_ohlc( ) y, c = x.copy(), x.copy() + half_w: float = bar_w/2 + # TODO: report bug for assert @ # /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991 for i, q in enumerate(data[start:], start): @@ -143,13 +146,14 @@ def path_arrays_from_ohlc( istop = istart + 6 # x,y detail the 6 points which connect all vertexes of a ohlc bar + mid: float = index + half_w x[istart:istop] = ( - index - bar_gap, - index, - index, - index, - index, index + bar_gap, + mid, + mid, + mid, + mid, + index + bar_w - bar_gap, ) y[istart:istop] = ( open, diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index d935bd5c..f717b1a5 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -51,7 +51,8 @@ log = get_logger(__name__) def bar_from_ohlc_row( row: np.ndarray, # 0.5 is no overlap between arms, 1.0 is full overlap - bar_gap: float = 0.43 + bar_w: float, + bar_gap: float = 0.16 ) -> tuple[QLineF]: ''' @@ -67,9 +68,11 @@ def bar_from_ohlc_row( # history path faster since it's done in C++: # https://doc.qt.io/qt-5/qgraphicslineitem.html + mid: float = (bar_w / 2) + index + # high -> low vertical (body) line if low != high: - hl = QLineF(index, low, index, high) + hl = QLineF(mid, low, mid, high) else: # XXX: if we don't do it renders a weird rectangle? # see below for filtering this later... @@ -80,10 +83,13 @@ def bar_from_ohlc_row( # the index's range according to the view mapping coordinates. # open line - o = QLineF(index - bar_gap, open, index, open) + o = QLineF(index + bar_gap, open, mid, open) # close line - c = QLineF(index, close, index + bar_gap, close) + c = QLineF( + mid, close, + index + bar_w - bar_gap, close, + ) return [hl, o, c] @@ -249,10 +255,11 @@ class BarItems(pg.GraphicsObject): step_size = index[-1] - index[-2] # generate new lines objects for updatable "current bar" + bg: float = 0.16 * step_size self._last_bar_lines = bar_from_ohlc_row( last_row, - bar_gap=step_size * 0.43 - # fields, + bar_w=step_size, + bar_gap=bg, ) # assert i == graphics.start_index - 1 @@ -268,10 +275,16 @@ class BarItems(pg.GraphicsObject): if l != h: # noqa if body is None: - body = self._last_bar_lines[0] = QLineF(i, l, i, h) + body = self._last_bar_lines[0] = QLineF( + i + bg, l, + i + step_size - bg, h, + ) else: # update body - body.setLine(i, l, i, h) + body.setLine( + body.x1(), l, + body.x2(), h, + ) # XXX: pretty sure this is causing an issue where the # bar has a large upward move right before the next From 51f2461e8b8cfb74756a20518d0275d17790cc64 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 16 Dec 2022 13:05:21 -0500 Subject: [PATCH 73/96] Add `IncrementalFormatter.x_offset: np.ndarray` Define the x-domain coords "offset" (determining the curve graphics per-datum placement) for each formatter such that there's only on place to change it when needed. Obviously each graphics type has it's own dimensionality and this is reflected by the array shapes on each subtype. --- piker/data/_formatters.py | 48 +++++++++++++++++++++++++++++++-------- 1 file changed, 38 insertions(+), 10 deletions(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 760c4218..8194c5fb 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -402,6 +402,8 @@ class IncrementalFormatter(msgspec.Struct): # Sub-type override interface # ############################### + x_offset: np.ndarray = np.array([0]) + # optional pre-graphics xy formatted data which # is incrementally updated in sync with the source data. # XXX: was ``.allocate_xy()`` @@ -422,7 +424,11 @@ class IncrementalFormatter(msgspec.Struct): ''' y_nd = src_shm._array[data_field].copy() - x_nd = src_shm._array[self.index_field].copy() + x_nd = ( + src_shm._array[self.index_field].copy() + + + self.x_offset + ) return x_nd, y_nd # XXX: was ``.update_xy()`` @@ -448,7 +454,11 @@ class IncrementalFormatter(msgspec.Struct): self.y_nd[read_slc] = y_nd_new x_nd_new = self.x_nd[read_slc] - x_nd_new[:] = new_from_src[self.index_field] + x_nd_new[:] = ( + new_from_src[self.index_field] + + + self.x_offset + ) # x_nd = self.x_nd[self.xy_slice] # y_nd = self.y_nd[self.xy_slice] @@ -516,6 +526,12 @@ class IncrementalFormatter(msgspec.Struct): class OHLCBarsFmtr(IncrementalFormatter): + x_offset: np.ndarray = np.array([ + -0.5, + 0, + 0, + 0.5, + ]) fields: list[str] = field( default_factory=lambda: ['open', 'high', 'low', 'close'] @@ -548,7 +564,9 @@ class OHLCBarsFmtr(IncrementalFormatter): # 4, # only ohlc y_nd.shape[1], ), - ) + np.array([-0.5, 0, 0, 0.5]) + ) + + + self.x_offset ) assert y_nd.any() @@ -587,7 +605,7 @@ class OHLCBarsFmtr(IncrementalFormatter): x_nd_new[:] = np.broadcast_to( new_from_src[self.index_field][:, None], new_y_nd.shape, - ) + np.array([-0.5, 0, 0, 0.5]) + ) + self.x_offset # TODO: can we drop this frame and just use the above? def format_xy_nd_to_1d( @@ -599,7 +617,7 @@ class OHLCBarsFmtr(IncrementalFormatter): start: int = 0, # XXX: do we need this? # 0.5 is no overlap between arms, 1.0 is full overlap - w: float = 0.43, + w: float = 0.16, ) -> tuple[ np.ndarray, @@ -615,6 +633,7 @@ class OHLCBarsFmtr(IncrementalFormatter): x, y, c = path_arrays_from_ohlc( array, start, + bar_w=self.index_step_size, bar_gap=w * self.index_step_size, # XXX: don't ask, due to a ``numba`` bug.. @@ -658,6 +677,11 @@ class OHLCBarsAsCurveFmtr(OHLCBarsFmtr): class StepCurveFmtr(IncrementalFormatter): + x_offset: np.ndarray = np.array([ + -0.5, + 0.5, + ]) + def allocate_xy_nd( self, @@ -676,10 +700,14 @@ class StepCurveFmtr(IncrementalFormatter): i = shm._array[self.index_field].copy() out = shm._array[data_field].copy() - x_out = np.broadcast_to( - i[:, None], - (i.size, 2), - ) + np.array([-0.5, 0.5]) + x_out = ( + np.broadcast_to( + i[:, None], + (i.size, 2), + ) + + + self.x_offset + ) # fill out Nx2 array to hold each step's left + right vertices. y_out = np.empty( @@ -744,7 +772,7 @@ class StepCurveFmtr(IncrementalFormatter): x_nd_new[:] = ( new_from_src[self.index_field][:, None] + - np.array([-0.5, 0.5]) + self.x_offset ) # XXX: uncomment for debugging From 50ef4efccb7e8d9ec4c0e962bb3e4b16d832afe2 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 16 Dec 2022 17:46:44 -0500 Subject: [PATCH 74/96] Align step curves the same as OHLC bars --- piker/data/_formatters.py | 4 ++-- piker/ui/_curve.py | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index 8194c5fb..ebeb83fb 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -678,8 +678,8 @@ class OHLCBarsAsCurveFmtr(OHLCBarsFmtr): class StepCurveFmtr(IncrementalFormatter): x_offset: np.ndarray = np.array([ - -0.5, - 0.5, + 0, + 1, ]) def allocate_xy_nd( diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index 065887af..b5d128d6 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -374,6 +374,11 @@ class Curve(pg.GraphicsObject): # lines up with the "middle" of the current # (OHLC) sample. self._last_line = QLineF( + + # NOTE: currently we draw in x-domain + # from last datum to current such that + # the end of line touches the "beginning" + # of the current datum step span. x[-2], y[-2], x[-1], y[-1], ) @@ -439,7 +444,6 @@ class StepCurve(Curve): x_2last = x[-2] y_last = y[-1] step_size = x_last - x_2last - half_step = step_size / 2 # lol, commenting this makes step curves # all "black" for me :eyeroll:.. @@ -448,7 +452,7 @@ class StepCurve(Curve): x_last, 0, ) self._last_step_rect = QRectF( - x_last - half_step, 0, + x_last, 0, step_size, y_last, ) return x, y From 3300a240c609e5582b38c7231508bd791c9a99f8 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 16 Dec 2022 18:19:39 -0500 Subject: [PATCH 75/96] Use array-`int`-indexing on single feed Might as well since it makes the chart look less gappy and we can easily flip the index switch now B) Also adds a new `'i_slow_last'` key to `DisplayState` for a singleton across all slow charts and thus no more need for special case logic in `viz.incr_info()`. --- piker/ui/_display.py | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index c44fd210..4d13427d 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -212,10 +212,12 @@ async def graphics_update_loop( assert hist_chart # per-viz-set global last index tracking for global chart - # view UX incrementing. + # view UX incrementing; these values are singleton + # per-multichart-set such that automatic x-domain shifts are only + # done once per time step update. globalz = { - 'i_last': 0, - 'i_last_append': 0, + 'i_last': 0, # multiview-global fast (1s) step index + 'i_last_slow': 0, # multiview-global slow (1m) step index } dss: dict[str, DisplayState] = {} @@ -292,8 +294,7 @@ async def graphics_update_loop( fast_chart.show() last_quote = time.time() - # global _i_last - i_last = ohlcv.index + i_last: float = 0 dss[fqsn] = ds = linked.display_state = DisplayState(**{ 'godwidget': godwidget, @@ -375,13 +376,15 @@ async def graphics_update_loop( do_append and liv ): - # hist_chart.increment_view(steps=i_diff) viz = hist_chart._vizs[fqsn] viz.plot.vb._set_yrange( # yrange=hist_chart.maxmin(name=fqsn) ) # hist_chart.view._set_yrange(yrange=hist_chart.maxmin()) + if should_incr: + hist_chart.increment_view(steps=i_diff) + nurse.start_soon(increment_history_view) # main real-time quotes update loop @@ -878,7 +881,18 @@ async def link_views_with_region( # set the region on the history chart # to the range currently viewed in the # HFT/real-time chart. - region.setRegion(viewRange[0]) + rng = mn, mx = viewRange[0] + + # hist_viz = hist_chart.get_viz(flume.symbol.fqsn) + # hist = hist_viz.shm.array[-3:] + # print( + # f'mn: {mn}\n' + # f'mx: {mx}\n' + # f'slow last 3 epochs: {list(hist["time"])}\n' + # f'slow last 3: {hist}\n' + # ) + + region.setRegion(rng) else: # poll for datums load and timestep detection @@ -1113,6 +1127,12 @@ async def display_symbol_data( tuple[str, Flume] ] = list(feed.flumes.items()) + # use array int-indexing when no aggregate feed overlays are + # loaded. + if len(fitems) == 1: + from ._dataviz import Viz + Viz._index_field = 'index' + # for the "first"/selected symbol we create new chart widgets # and sub-charts for FSPs fqsn, flume = fitems[0] From dea1c1c2d6a58a131138c0caacfedc2aef401668 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 16 Dec 2022 20:09:33 -0500 Subject: [PATCH 76/96] Make `Viz.incr_info()` sample rate agnostic Mainly it was the global (should we )increment logic that needs to be independent for the fast vs. slow chart such that the slow isn't update-shifted by the fast and vice versa. We do this using a new `'i_last_slow'` key in the `DisplayState.globalz: dict` which is singleton for each sample-rate-specific chart and works for both time and array indexing. Also, we drop some old commented `graphics.draw_last_datum()` code that never ended up being needed again inside the coordinate cache reset bloc. --- piker/ui/_dataviz.py | 90 +++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 52 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 82cc2e37..ca266d2a 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -720,23 +720,14 @@ class Viz(msgspec.Struct): # , frozen=True): path, reset = out # XXX: SUPER UGGGHHH... without this we get stale cache - # graphics that don't update until you downsampler again.. + # graphics that "smear" across the view horizontally + # when panning and the first datum is out of view.. if reset: + # assign output paths to graphicis obj but + # after a coords-cache reset. with graphics.reset_cache(): - # assign output paths to graphicis obj graphics.path = r.path graphics.fast_path = r.fast_path - - # XXX: we don't need this right? - # graphics.draw_last_datum( - # path, - # src_array, - # reset, - # array_key, - # index_field=self.index_field, - # ) - # graphics.update() - # profiler('.update()') else: # assign output paths to graphicis obj graphics.path = r.path @@ -976,23 +967,14 @@ class Viz(msgspec.Struct): # , frozen=True): ) -> tuple: - # shm = shm or self.ohlcv - # chart = chart or self.chart - globalz = state.globalz - if is_1m: - state = state.hist_vars - else: - state = state.vars + _, _, _, r = self.bars_range() # most recent right datum index in-view + i_step = self.shm.array[-1][self.index_field] # last source index. - _, _, _, r = self.bars_range() - - i_step = self.shm.array[-1][self.index_field] - - # last-in-view: is a real-time update necessary? + # check if "last (is) in view" -> is a real-time update necessary? liv = r >= i_step - # TODO: make this not loop through all vizs each time? - # compute the first available graphic's x-units-per-pixel + # compute the first available graphic obj's x-units-per-pixel + # TODO: make this not loop through all vizs each time! uppx = self.plot.vb.x_uppx() # NOTE: this used to be implemented in a dedicated @@ -1001,60 +983,64 @@ class Viz(msgspec.Struct): # , frozen=True): # this simple index-diff and all the fsp sub-curve graphics # are diffed on each draw cycle anyway; so updates to the # "curve" length is already automatic. - glast = globalz['i_last'] - i_diff = i_step - glast - - # print(f'{chart.name} TIME STEP: {i_step}') - # i_diff = i_step - state['i_last'] + last_key = 'i_last_slow' if is_1m else 'i_last' + globalz = state.globalz + varz = state.hist_vars if is_1m else state.vars + glast = globalz[last_key] + # when the current step is now greater then the last we + # have read from the display state globals, we presume that the + # underlying source shm buffer has added a new sample and thus + # we should increment the global view a step iff the last datum + # is in view. should_global_increment: bool = False if i_step > glast: - globalz['i_last'] = i_step + globalz[last_key] = i_step should_global_increment = True # update global state for this chart - if ( - # state is None - not is_1m - and i_diff > 0 - ): - state['i_last'] = i_step - - append_diff = i_step - state['i_last_append'] - # append_diff = i_step - _i_last_append + i_diff: float = i_step - glast + if i_diff > 0: + varz['i_last'] = i_step # update the "last datum" (aka extending the vizs graphic with # new data) only if the number of unit steps is >= the number of # such unit steps per pixel (aka uppx). Iow, if the zoom level # is such that a datum(s) update to graphics wouldn't span # to a new pixel, we don't update yet. + i_last_append = varz['i_last_append'] + append_diff = i_step - i_last_append do_append = ( append_diff >= uppx and i_diff ) + + do_rt_update = uppx < update_uppx + if ( do_append and not is_1m ): - # _i_last_append = i_step - state['i_last_append'] = i_step + varz['i_last_append'] = i_step # fqsn = self.flume.symbol.fqsn # print( # f'DOING APPEND => {fqsn}\n' # f'i_step:{i_step}\n' - # f'i_diff:{i_diff}\n' - # f'last:{_i_last}\n' - # f'last_append:{_i_last_append}\n' - # f'append_diff:{append_diff}\n' + # f'glast:{glast}\n' + # f'last_append:{i_last_append}\n' # f'r: {r}\n' - # f'liv: {liv}\n' + # f'uppx: {uppx}\n' + # f'liv: {liv}\n' + # f'do_append:{do_append}\n' + # f'i_diff:{i_diff}\n' + # f'do_rt_update: {do_rt_update}\n' + # f'append_diff:{append_diff}\n' + # f'should_global_increment: {should_global_increment}\n' # ) - do_rt_update = uppx < update_uppx - - # TODO: pack this into a struct + # TODO: pack this into a struct? return ( uppx, liv, From 1ece704d6eef7a8d79855d0ddb9f7ef22937ad6d Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 16 Dec 2022 20:53:55 -0500 Subject: [PATCH 77/96] Add `ChartPlotWidget.main_viz: Viz` convenience `@property` --- piker/ui/_app.py | 3 +-- piker/ui/_chart.py | 8 ++++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/piker/ui/_app.py b/piker/ui/_app.py index 2743103e..3be073e7 100644 --- a/piker/ui/_app.py +++ b/piker/ui/_app.py @@ -178,8 +178,7 @@ def _main( tractor_kwargs, ) -> None: ''' - Sync entry point to start a chart: a ``tractor`` + Qt runtime - entry point + Sync entry point to start a chart: a ``tractor`` + Qt runtime. ''' run_qtractor( diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index aea1d927..a643c498 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -1365,4 +1365,12 @@ class ChartPlotWidget(pg.PlotWidget): self, key: str, ) -> Viz: + ''' + Try to get an underlying ``Viz`` by key. + + ''' return self._vizs.get(key) + + @property + def main_viz(self) -> Viz: + return self.get_viz(self.name) From bc17308de71be6d89713d9d33ef7fd04e6be8b63 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 18 Dec 2022 19:50:07 -0500 Subject: [PATCH 78/96] Drop coordinate cacheing from `BarItems`, causes weird jitter on pan --- piker/ui/_ohlc.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index f717b1a5..288af70d 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -118,9 +118,11 @@ class BarItems(pg.GraphicsObject): self.last_bar_pen = pg.mkPen(hcolor(last_bar_color), width=2) self._name = name - self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache) - self.path = QPainterPath() + # XXX: causes this weird jitter bug when click-drag panning + # where the path curve will awkwardly flicker back and forth? + # self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache) + self.path = QPainterPath() self._last_bar_lines: tuple[QLineF, ...] | None = None def x_uppx(self) -> int: From bf8ea336979f045331c9b24d67257a568df41d25 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 18 Dec 2022 19:50:41 -0500 Subject: [PATCH 79/96] Add type annots to vars inside `Render.render()` --- piker/ui/_render.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/piker/ui/_render.py b/piker/ui/_render.py index e037122d..dff46dab 100644 --- a/piker/ui/_render.py +++ b/piker/ui/_render.py @@ -203,9 +203,9 @@ class Renderer(msgspec.Struct): ): should_redraw = True - path = self.path - fast_path = self.fast_path - reset = False + path: QPainterPath = self.path + fast_path: QPainterPath = self.fast_path + reset: bool = False self.viz.yrange = None From ed1f64cf43da901d317a5364aee774b90ab712e6 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sun, 18 Dec 2022 19:58:43 -0500 Subject: [PATCH 80/96] Fix gap detection on RHS; always bin-search on overshot time range --- piker/data/_pathops.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index aa6d628d..cea9ebf7 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -313,7 +313,7 @@ def slice_from_time( index = arr['index'] i_first = index[0] - i_last = index[-1] + # i_last = index[-1] read_i_max = arr.shape[0] if ( @@ -396,12 +396,10 @@ def slice_from_time( # ) read_i_start = new_read_i_start - 1 - i_iv_stop = index[read_i_stop - 1] + # i_iv_stop = index[read_i_stop - 1] t_iv_stop = times[read_i_stop - 1] if ( - i_iv_stop < i_last - # i_stop_t <= t_last - and t_iv_stop > i_stop_t + t_iv_stop > i_stop_t ): # t_diff = stop_t - t_iv_stop # print( From ffb57f0256e8333ab32cbc6ddb5820a538ada908 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 19 Dec 2022 09:24:14 -0500 Subject: [PATCH 81/96] Rename `reset` -> `reset_cache` --- piker/ui/_dataviz.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index ca266d2a..6d4852a9 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -305,9 +305,8 @@ class Viz(msgspec.Struct): # , frozen=True): # https://stackoverflow.com/a/29980872 rkey = (round(lbar), round(rbar)) cached_result = self._mxmns.get(rkey) - do_print = False + do_print = False # (self.index_step() == 60) if cached_result: - if do_print: print( f'{self.name} CACHED maxmin\n' @@ -327,6 +326,7 @@ class Viz(msgspec.Struct): # , frozen=True): arr, start_t=lbar, stop_t=rbar, + step=self.index_step(), ) slice_view = arr[read_slc] @@ -717,12 +717,14 @@ class Viz(msgspec.Struct): # , frozen=True): log.warning(f'{self.name} failed to render!?') return graphics - path, reset = out + path, reset_cache = out # XXX: SUPER UGGGHHH... without this we get stale cache # graphics that "smear" across the view horizontally # when panning and the first datum is out of view.. - if reset: + if ( + reset_cache + ): # assign output paths to graphicis obj but # after a coords-cache reset. with graphics.reset_cache(): @@ -736,7 +738,7 @@ class Viz(msgspec.Struct): # , frozen=True): graphics.draw_last_datum( path, src_array, - reset, + reset_cache, array_key, index_field=self.index_field, ) From 06f1b94147a8cc25bfaf4fe38ee920bf8e79ef58 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 19 Dec 2022 15:09:13 -0500 Subject: [PATCH 82/96] Make `Viz.incr_info()` do treading with time-index, and appending with array-index --- piker/ui/_dataviz.py | 87 +++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 42 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 6d4852a9..ba34616e 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -959,21 +959,26 @@ class Viz(msgspec.Struct): # , frozen=True): def incr_info( self, - - # NOTE: pass in a copy if you don't want your orignal mutated. - state: DisplayState, - - update_state: bool = True, + ds: DisplayState, update_uppx: float = 16, is_1m: bool = False, ) -> tuple: _, _, _, r = self.bars_range() # most recent right datum index in-view - i_step = self.shm.array[-1][self.index_field] # last source index. + lasts = self.shm.array[-1] + i_step = lasts['index'] # last index-specific step. + i_step_t = lasts['time'] # last time step. + + fqsn = self.flume.symbol.fqsn + if is_1m: + print(f'{fqsn} 1Min index: {i_step}, t: {i_step_t}') # check if "last (is) in view" -> is a real-time update necessary? - liv = r >= i_step + if self.index_field == 'index': + liv = (r >= i_step) + else: + liv = (r >= i_step_t) # compute the first available graphic obj's x-units-per-pixel # TODO: make this not loop through all vizs each time! @@ -985,25 +990,25 @@ class Viz(msgspec.Struct): # , frozen=True): # this simple index-diff and all the fsp sub-curve graphics # are diffed on each draw cycle anyway; so updates to the # "curve" length is already automatic. - last_key = 'i_last_slow' if is_1m else 'i_last' - globalz = state.globalz - varz = state.hist_vars if is_1m else state.vars + globalz = ds.globalz + varz = ds.hist_vars if is_1m else ds.vars + + last_key = 'i_last_slow_t' if is_1m else 'i_last_t' glast = globalz[last_key] - # when the current step is now greater then the last we - # have read from the display state globals, we presume that the - # underlying source shm buffer has added a new sample and thus - # we should increment the global view a step iff the last datum - # is in view. - should_global_increment: bool = False - if i_step > glast: - globalz[last_key] = i_step - should_global_increment = True + # calc datums diff since last global increment + i_diff_t: float = i_step_t - glast - # update global state for this chart - i_diff: float = i_step - glast - if i_diff > 0: - varz['i_last'] = i_step + # when the current step is now greater then the last we have + # read from the display state globals, we presume that the + # underlying source shm buffer has added a new sample and thus + # we should increment the global view a step (i.e. tread the + # view in place to keep the current datum at the same spot on + # screen). + should_tread: bool = False + if i_diff_t > 0: + globalz[last_key] = i_step_t + should_tread = True # update the "last datum" (aka extending the vizs graphic with # new data) only if the number of unit steps is >= the number of @@ -1012,43 +1017,41 @@ class Viz(msgspec.Struct): # , frozen=True): # to a new pixel, we don't update yet. i_last_append = varz['i_last_append'] append_diff = i_step - i_last_append - do_append = ( - append_diff >= uppx - and i_diff - ) - do_rt_update = uppx < update_uppx + do_px_step = append_diff >= uppx + do_rt_update = (uppx < update_uppx) if ( - do_append - and not is_1m + do_px_step ): varz['i_last_append'] = i_step - # fqsn = self.flume.symbol.fqsn # print( # f'DOING APPEND => {fqsn}\n' - # f'i_step:{i_step}\n' - # f'glast:{glast}\n' - # f'last_append:{i_last_append}\n' + # f'i_step: {i_step}\n' + # f'i_step_t: {i_step_t}\n' + # f'glast: {glast}\n' + # f'last_append: {i_last_append}\n' # f'r: {r}\n' - + # '-----------------------------\n' # f'uppx: {uppx}\n' # f'liv: {liv}\n' - # f'do_append:{do_append}\n' - # f'i_diff:{i_diff}\n' + # f'do_px_step: {do_px_step}\n' + # f'i_diff_t: {i_diff_t}\n' # f'do_rt_update: {do_rt_update}\n' - # f'append_diff:{append_diff}\n' - # f'should_global_increment: {should_global_increment}\n' + # f'append_diff: {append_diff}\n' + # f'should_tread: {should_tread}\n' # ) + varz['i_last'] = i_step + # TODO: pack this into a struct? return ( uppx, liv, - do_append, - i_diff, + do_px_step, + i_diff_t, append_diff, do_rt_update, - should_global_increment, + should_tread, ) From b6521498f4f260f3bd14a6a453a17c1205e4ae11 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 19 Dec 2022 15:10:34 -0500 Subject: [PATCH 83/96] Make `.increment_view()` take in a `datums: int` and always scale it by sample step size --- piker/ui/_chart.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py index a643c498..14921b3d 100644 --- a/piker/ui/_chart.py +++ b/piker/ui/_chart.py @@ -951,10 +951,6 @@ class ChartPlotWidget(pg.PlotWidget): def focus(self) -> None: self.view.setFocus() - def view_range(self) -> tuple[int, int]: - vr = self.viewRect() - return int(vr.left()), int(vr.right()) - def pre_l1_xs(self) -> tuple[float, float]: ''' Return the view x-coord for the value just before @@ -1034,25 +1030,28 @@ class ChartPlotWidget(pg.PlotWidget): def increment_view( self, - steps: int = 1, + datums: int = 1, vb: Optional[ChartView] = None, ) -> None: - """ - Increment the data view one step to the right thus "following" - the current time slot/step/bar. + ''' + Increment the data view ``datums``` steps toward y-axis thus + "following" the current time slot/step/bar. - """ - l, r = self.view_range() + ''' view = vb or self.view - if steps >= 300: + viz = self.main_viz + l, r = viz.view_range() + x_shift = viz.index_step() * datums + + if datums >= 300: print("FUCKING FIX THE GLOBAL STEP BULLSHIT") # breakpoint() return view.setXRange( - min=l + steps, - max=r + steps, + min=l + x_shift, + max=r + x_shift, # TODO: holy shit, wtf dude... why tf would this not be 0 by # default... speechless. From efbb8e86d47de3465a0848b378c72e0279f7c194 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 19 Dec 2022 17:11:34 -0500 Subject: [PATCH 84/96] Fix overlayed slow chart "treading" Turns out we were updating the wrong ``Viz``/``DisplayState`` inside the closure style `increment_history_view()`` (probably due to looping through the flumes and dynamically closing in that task-func).. Instead define the history incrementer at module level and pass in the `DisplayState` explicitly. Further rework the `DisplayState` attrs to be more focused around the `Viz` associated with the fast and slow chart and be sure to adjust output from each `Viz.incr_info()` call to latest update. Oh, and just tweaked the line palette for the moment. FYI "treading" here is referring to the x-shifting of the curve when the last datum is in view such that on new sampled appends the "last" datum is kept in the same x-location in UI terms. --- piker/ui/_display.py | 210 +++++++++++++++++++++---------------------- 1 file changed, 101 insertions(+), 109 deletions(-) diff --git a/piker/ui/_display.py b/piker/ui/_display.py index 4d13427d..e544b64d 100644 --- a/piker/ui/_display.py +++ b/piker/ui/_display.py @@ -43,12 +43,14 @@ from ..data.types import Struct from ..data._sharedmem import ( ShmArray, ) +from ..data._sampling import _tick_groups from ._axes import YAxisLabel from ._chart import ( ChartPlotWidget, LinkedSplits, GodWidget, ) +from ._dataviz import Viz from ._l1 import L1Labels from ._style import hcolor from ._fsp import ( @@ -63,7 +65,6 @@ from ._forms import ( ) from . import _pg_overrides as pgo # from ..data._source import tf_in_1s -from ..data._sampling import _tick_groups from .order_mode import ( open_order_mode, OrderMode, @@ -138,11 +139,12 @@ class DisplayState(Struct): maxmin: Callable flume: Flume - ohlcv: ShmArray - hist_ohlcv: ShmArray - # high level chart handles + # high level chart handles and underlying ``Viz`` chart: ChartPlotWidget + viz: Viz + hist_chart: ChartPlotWidget + hist_viz: Viz # axis labels l1: L1Labels @@ -178,6 +180,56 @@ class DisplayState(Struct): wap_in_history: bool = False +async def increment_history_view( + ds: DisplayState, +): + hist_chart = ds.hist_chart + hist_viz = ds.hist_viz + assert 'hist' in hist_viz.shm.token['shm_name'] + + # TODO: seems this is more reliable at keeping the slow + # chart incremented in view more correctly? + # - It might make sense to just inline this logic with the + # main display task? => it's a tradeoff of slower task + # wakeups/ctx switches verus logic checks (as normal) + # - we need increment logic that only does the view shift + # call when the uppx permits/needs it + async with hist_viz.flume.index_stream(int(1)) as istream: + async for msg in istream: + + # l3 = ds.viz.shm.array[-3:] + # print( + # f'fast step for {ds.flume.symbol.fqsn}:\n' + # f'{list(l3["time"])}\n' + # f'{l3}\n' + # ) + # check if slow chart needs an x-domain shift and/or + # y-range resize. + ( + uppx, + liv, + do_append, + i_diff_t, + append_diff, + do_rt_update, + should_tread, + + ) = hist_viz.incr_info( + ds=ds, + is_1m=True, + ) + + if ( + do_append + and liv + ): + hist_viz.plot.vb._set_yrange() + + # check if tread-in-place x-shift is needed + if should_tread: + hist_chart.increment_view(datums=append_diff) + + async def graphics_update_loop( nurse: trio.Nursery, @@ -216,8 +268,8 @@ async def graphics_update_loop( # per-multichart-set such that automatic x-domain shifts are only # done once per time step update. globalz = { - 'i_last': 0, # multiview-global fast (1s) step index - 'i_last_slow': 0, # multiview-global slow (1m) step index + 'i_last_t': 0, # multiview-global fast (1s) step index + 'i_last_slow_t': 0, # multiview-global slow (1m) step index } dss: dict[str, DisplayState] = {} @@ -293,25 +345,29 @@ async def graphics_update_loop( tick_margin = 3 * tick_size fast_chart.show() - last_quote = time.time() - i_last: float = 0 + last_quote_s = time.time() dss[fqsn] = ds = linked.display_state = DisplayState(**{ 'godwidget': godwidget, 'quotes': {}, 'maxmin': maxmin, + 'flume': flume, - 'ohlcv': ohlcv, - 'hist_ohlcv': hist_ohlcv, + 'chart': fast_chart, + 'viz': fast_viz, 'last_price_sticky': last_price_sticky, + + 'hist_chart': hist_chart, + 'hist_viz': hist_viz, 'hist_last_price_sticky': hist_last_price_sticky, + 'l1': l1, 'vars': { 'tick_margin': tick_margin, - 'i_last': i_last, - 'i_last_append': i_last, + 'i_last': 0, + 'i_last_append': 0, 'last_mx_vlm': last_mx_vlm, 'last_mx': last_mx, 'last_mn': last_mn, @@ -327,72 +383,25 @@ async def graphics_update_loop( fast_chart.default_view() - ds.hist_vars.update({ - 'i_last_append': i_last, - 'i_last': i_last, - }) + # ds.hist_vars.update({ + # 'i_last_append': 0, + # 'i_last': 0, + # }) - # TODO: probably factor this into some kinda `DisplayState` - # API that can be reused at least in terms of pulling view - # params (eg ``.bars_range()``). - async def increment_history_view(): - _, hist_step_size_s, _ = flume.get_ds_info() + nurse.start_soon( + increment_history_view, + ds, + ) - async with flume.index_stream( - # int(hist_step_size_s) - # TODO: seems this is more reliable at keeping the slow - # chart incremented in view more correctly? - # - It might make sense to just inline this logic with the - # main display task? => it's a tradeoff of slower task - # wakeups/ctx switches verus logic checks (as normal) - # - we need increment logic that only does the view shift - # call when the uppx permits/needs it - int(1), - ) as istream: - async for msg in istream: - - # check if slow chart needs an x-domain shift and/or - # y-range resize. - ( - uppx, - liv, - do_append, - i_diff, - append_diff, - do_rt_update, - should_incr, - - ) = hist_viz.incr_info( - state=ds, - is_1m=True, - ) - # print( - # f'liv: {liv}\n' - # f'do_append: {do_append}\n' - # f'append_diff: {append_diff}\n' - # ) - - if ( - do_append - and liv - ): - viz = hist_chart._vizs[fqsn] - viz.plot.vb._set_yrange( - # yrange=hist_chart.maxmin(name=fqsn) - ) - # hist_chart.view._set_yrange(yrange=hist_chart.maxmin()) - - if should_incr: - hist_chart.increment_view(steps=i_diff) - - nurse.start_soon(increment_history_view) + if ds.hist_vars['i_last'] < ds.hist_vars['i_last_append']: + breakpoint() # main real-time quotes update loop stream: tractor.MsgStream async with feed.open_multi_stream() as stream: assert stream async for quotes in stream: - quote_period = time.time() - last_quote + quote_period = time.time() - last_quote_s quote_rate = round( 1/quote_period, 1) if quote_period > 0 else float('inf') if ( @@ -405,7 +414,7 @@ async def graphics_update_loop( ): log.warning(f'High quote rate {symbol.key}: {quote_rate}') - last_quote = time.time() + last_quote_s = time.time() for sym, quote in quotes.items(): ds = dss[sym] @@ -473,22 +482,21 @@ def graphics_update_cycle( # rt "HFT" chart l1 = ds.l1 - # ohlcv = ds.ohlcv ohlcv = flume.rt_shm array = ohlcv.array - vars = ds.vars - tick_margin = vars['tick_margin'] + varz = ds.vars + tick_margin = varz['tick_margin'] ( uppx, liv, do_append, - i_diff, + i_diff_t, append_diff, do_rt_update, - should_incr, - ) = main_viz.incr_info(state=ds) + should_tread, + ) = main_viz.incr_info(ds=ds) # TODO: we should only run mxmn when we know # an update is due via ``do_append`` above. @@ -503,20 +511,8 @@ def graphics_update_cycle( mn = mn_in_view - tick_margin profiler('`ds.maxmin()` call') - if ( - prepend_update_index is not None - and lbar > prepend_update_index - ): - # on a history update (usually from the FSP subsys) - # if the segment of history that is being prepended - # isn't in view there is no reason to do a graphics - # update. - log.info('Skipping prepend graphics cycle: frame not in view') - return - - # TODO: eventually we want to separate out the utrade (aka - # dark vlm prices) here and show them as an additional - # graphic. + # TODO: eventually we want to separate out the dark vlm and show + # them as an additional graphic. clear_types = _tick_groups['clears'] # update ohlc sampled price bars @@ -542,22 +538,19 @@ def graphics_update_cycle( # left unless we get one of the following: if ( ( - should_incr + should_tread and do_append and liv ) or trigger_all ): - # print(f'INCREMENTING {fqsn}') - chart.increment_view(steps=i_diff) - main_viz.plot.vb._set_yrange( - # yrange=(mn, mx), - ) + chart.increment_view(datums=append_diff) + main_viz.plot.vb._set_yrange() # NOTE: since vlm and ohlc charts are axis linked now we don't # need the double increment request? # if vlm_chart: - # vlm_chart.increment_view(steps=i_diff) + # vlm_chart.increment_view(datums=append_diff) profiler('view incremented') @@ -656,7 +649,7 @@ def graphics_update_cycle( l1.bid_label.update_fields({'level': price, 'size': size}) # check for y-range re-size - if (mx > vars['last_mx']) or (mn < vars['last_mn']): + if (mx > varz['last_mx']) or (mn < varz['last_mn']): # fast chart resize case if ( @@ -692,16 +685,14 @@ def graphics_update_cycle( _, _, ) = hist_viz.incr_info( - state=ds, + ds=ds, is_1m=True, ) if hist_liv: - hist_viz.plot.vb._set_yrange( - # yrange=hist_chart.maxmin(name=fqsn), - ) + hist_viz.plot.vb._set_yrange() # XXX: update this every draw cycle to make L1-always-in-view work. - vars['last_mx'], vars['last_mn'] = mx, mn + varz['last_mx'], varz['last_mn'] = mx, mn # run synchronous update on all linked viz # TODO: should the "main" (aka source) viz be special? @@ -730,7 +721,7 @@ def graphics_update_cycle( ): viz.draw_last( array_key=curve_name, - # only_last_uppx=True, + only_last_uppx=True, ) # volume chart logic.. @@ -774,7 +765,7 @@ def graphics_update_cycle( profiler('`vlm_chart.update_graphics_from_flow()`') if ( - mx_vlm_in_view != vars['last_mx_vlm'] + mx_vlm_in_view != varz['last_mx_vlm'] ): yrange = (0, mx_vlm_in_view * 1.375) vlm_chart.view._set_yrange( @@ -782,7 +773,7 @@ def graphics_update_cycle( ) profiler('`vlm_chart.view._set_yrange()`') # print(f'mx vlm: {last_mx_vlm} -> {mx_vlm_in_view}') - vars['last_mx_vlm'] = mx_vlm_in_view + varz['last_mx_vlm'] = mx_vlm_in_view # update all downstream FSPs for curve_name, viz in vlm_vizs.items(): @@ -1114,10 +1105,11 @@ async def display_symbol_data( # - gradient in "lightness" based on liquidity, or lifetime in derivs? palette = itertools.cycle([ # curve color, last bar curve color - ['i3', 'gray'], - ['grayer', 'bracket'], ['grayest', 'i3'], ['default_dark', 'default'], + + ['grayer', 'bracket'], + ['i3', 'gray'], ]) pis: dict[str, list[pgo.PlotItem, pgo.PlotItem]] = {} From 98de22a740a77719a6fbf2a93112656b8ad5cf1f Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 19 Dec 2022 17:26:15 -0500 Subject: [PATCH 85/96] Enable the experimental `QPrivatePath` functionality from latest `pyqtgraph` --- piker/ui/_pg_overrides.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/piker/ui/_pg_overrides.py b/piker/ui/_pg_overrides.py index 397954fd..b7c0b9aa 100644 --- a/piker/ui/_pg_overrides.py +++ b/piker/ui/_pg_overrides.py @@ -54,6 +54,10 @@ def _do_overrides() -> None: pg.functions.invertQTransform = invertQTransform pg.PlotItem = PlotItem + # enable "QPainterPathPrivate for faster arrayToQPath" from + # https://github.com/pyqtgraph/pyqtgraph/pull/2324 + pg.setConfigOption('enableExperimental', True) + # NOTE: the below customized type contains all our changes on a method # by method basis as per the diff: From f30a48b82cafa27a4345299616776df8b255e598 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 21 Dec 2022 13:21:49 -0500 Subject: [PATCH 86/96] Use `np.diff()` on last 16 samples instead of only last datum pair --- piker/ui/_dataviz.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index ba34616e..33047678 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -285,7 +285,10 @@ class Viz(msgspec.Struct): # , frozen=True): ) -> float: if self._index_step is None: index = self.shm.array[self.index_field] - self._index_step = index[-1] - index[-2] + self._index_step = max( + np.diff(index[-16:]).max(), + 1, + ) return self._index_step From cdec4782f04eb95d0e30d1678b6c31c5d12955c6 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Thu, 22 Dec 2022 13:11:38 -0500 Subject: [PATCH 87/96] Add commented append slice-len sanity check --- piker/data/_formatters.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index ebeb83fb..ccb8d315 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -330,6 +330,9 @@ class IncrementalFormatter(msgspec.Struct): ) self.xy_nd_stop += append_len profiler('appened xy history: {append_length}') + # sanity + # slice_ln = post_slice.stop - post_slice.start + # assert append_len == slice_ln view_changed: bool = False view_range: tuple[int, int] = (ivl, ivr) From a5eed8fc1efcdd63345ba1f266a0c7f299fe4f89 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 23 Dec 2022 14:21:55 -0500 Subject: [PATCH 88/96] Fix x-axis labelling when using an epoch domain Previously with array-int indexing we had to map the input x-domain "indexes" passed to `DynamicDateAxis._indexes_to_timestr()`. In the epoch-time indexing case we obviously don't need to lookup time stamps from the underlying shm array and can instead just cast to `int` and relay the values verbatim. Further, this patch includes some style adjustments to `AxisLabel` to better enable multi-feed chart overlays by avoiding L1 label clutter when multiple y-axes are stacked adjacent: - adjust the `Axis` typical max string to include a couple spaces suffix providing for a bit more margin between side-by-side y-axes. - make the default label (fill) color the "default" from the global color scheme and drop it's opacity to .9 - add some new label placement options and use them in the `.boundingRect()` method: * `._x/y_br_offset` for relatively shifting the overall label relative to it's parent axis. * `._y_txt_h_scaling` for increasing the bounding rect's height without including more whitespace in the label's text content. - ensure labels have a high z-value such that by default they are always placed "on top" such that when we adjust the l1 labels they can be set to a lower value and thus never obscure the last-price label. --- piker/ui/_axes.py | 134 +++++++++++++++++++++++++++++----------------- 1 file changed, 86 insertions(+), 48 deletions(-) diff --git a/piker/ui/_axes.py b/piker/ui/_axes.py index 52278819..4d197f5a 100644 --- a/piker/ui/_axes.py +++ b/piker/ui/_axes.py @@ -49,7 +49,7 @@ class Axis(pg.AxisItem): def __init__( self, plotitem: pgo.PlotItem, - typical_max_str: str = '100 000.000', + typical_max_str: str = '100 000.000 ', text_color: str = 'bracket', lru_cache_tick_strings: bool = True, **kwargs @@ -95,9 +95,10 @@ class Axis(pg.AxisItem): self.setPen(_axis_pen) # this is the text color - # self.setTextPen(pg.mkPen(hcolor(text_color))) self.text_color = text_color + # generate a bounding rect based on sizing to a "typical" + # maximum length-ed string defined as init default. self.typical_br = _font._qfm.boundingRect(typical_max_str) # size the pertinent axis dimension to a "typical value" @@ -154,8 +155,8 @@ class Axis(pg.AxisItem): pi: pgo.PlotItem, name: None | str = None, digits: None | int = 2, - # axis_name: str = 'right', - bg_color='bracket', + bg_color='default', + fg_color='black', ) -> YAxisLabel: @@ -165,22 +166,20 @@ class Axis(pg.AxisItem): digits = digits or 2 # TODO: ``._ysticks`` should really be an attr on each - # ``PlotItem`` no instead of the (containing because of - # overlays) widget? + # ``PlotItem`` now instead of the containing widget (because of + # overlays) ? # add y-axis "last" value label sticky = self._stickies[name] = YAxisLabel( pi=pi, parent=self, - # TODO: pass this from symbol data - digits=digits, - opacity=1, + digits=digits, # TODO: pass this from symbol data + opacity=0.9, # slight see-through bg_color=bg_color, + fg_color=fg_color, ) pi.sigRangeChanged.connect(sticky.update_on_resize) - # pi.addItem(sticky) - # pi.addItem(last) return sticky @@ -244,7 +243,6 @@ class PriceAxis(Axis): self._min_tick = size def size_to_values(self) -> None: - # self.typical_br = _font._qfm.boundingRect(typical_max_str) self.setWidth(self.typical_br.width()) # XXX: drop for now since it just eats up h space @@ -302,27 +300,44 @@ class DynamicDateAxis(Axis): # XX: ARGGGGG AG:LKSKDJF:LKJSDFD chart = self.pi.chart_widget - flow = chart._vizs[chart.name] - shm = flow.shm - bars = shm.array - first = shm._first.value + viz = chart._vizs[chart.name] + shm = viz.shm + array = shm.array + times = array['time'] + i_0, i_l = times[0], times[-1] - bars_len = len(bars) - times = bars['time'] + if ( + (indexes[0] < i_0 + and indexes[-1] < i_l) + or + (indexes[0] > i_0 + and indexes[-1] > i_l) + ): + return [] - epochs = times[list( - map( - int, - filter( - lambda i: i > 0 and i < bars_len, - (i-first for i in indexes) + if viz.index_field == 'index': + arr_len = times.shape[0] + first = shm._first.value + epochs = times[ + list( + map( + int, + filter( + lambda i: i > 0 and i < arr_len, + (i - first for i in indexes) + ) + ) ) - ) - )] + ] + else: + epochs = list(map(int, indexes)) # TODO: **don't** have this hard coded shift to EST # delay = times[-1] - times[-2] - dts = np.array(epochs, dtype='datetime64[s]') + dts = np.array( + epochs, + dtype='datetime64[s]', + ) # see units listing: # https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units @@ -340,24 +355,39 @@ class DynamicDateAxis(Axis): spacing: float, ) -> list[str]: + + return self._indexes_to_timestrs(values) + + # NOTE: handy for debugging the lru cache # info = self.tickStrings.cache_info() # print(info) - return self._indexes_to_timestrs(values) class AxisLabel(pg.GraphicsObject): - _x_margin = 0 - _y_margin = 0 + # relative offsets *OF* the bounding rect relative + # to parent graphics object. + # eg. | => <_x_br_offset> => | | + _x_br_offset: float = 0 + _y_br_offset: float = 0 + + # relative offsets of text *within* bounding rect + # eg. | <_x_margin> => | + _x_margin: float = 0 + _y_margin: float = 0 + + # multiplier of the text content's height in order + # to force a larger (y-dimension) bounding rect. + _y_txt_h_scaling: float = 1 def __init__( self, parent: pg.GraphicsItem, digits: int = 2, - bg_color: str = 'bracket', + bg_color: str = 'default', fg_color: str = 'black', - opacity: int = 1, # XXX: seriously don't set this to 0 + opacity: int = .8, # XXX: seriously don't set this to 0 font_size: str = 'default', use_arrow: bool = True, @@ -368,6 +398,7 @@ class AxisLabel(pg.GraphicsObject): self.setParentItem(parent) self.setFlag(self.ItemIgnoresTransformations) + self.setZValue(100) # XXX: pretty sure this is faster self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache) @@ -399,14 +430,14 @@ class AxisLabel(pg.GraphicsObject): p: QtGui.QPainter, opt: QtWidgets.QStyleOptionGraphicsItem, w: QtWidgets.QWidget + ) -> None: - """Draw a filled rectangle based on the size of ``.label_str`` text. + ''' + Draw a filled rectangle based on the size of ``.label_str`` text. Subtypes can customize further by overloading ``.draw()``. - """ - # p.setCompositionMode(QtWidgets.QPainter.CompositionMode_SourceOver) - + ''' if self.label_str: # if not self.rect: @@ -417,7 +448,11 @@ class AxisLabel(pg.GraphicsObject): p.setFont(self._dpifont.font) p.setPen(self.fg_color) - p.drawText(self.rect, self.text_flags, self.label_str) + p.drawText( + self.rect, + self.text_flags, + self.label_str, + ) def draw( self, @@ -425,6 +460,8 @@ class AxisLabel(pg.GraphicsObject): rect: QtCore.QRectF ) -> None: + p.setOpacity(self.opacity) + if self._use_arrow: if not self.path: self._draw_arrow_path() @@ -432,15 +469,13 @@ class AxisLabel(pg.GraphicsObject): p.drawPath(self.path) p.fillPath(self.path, pg.mkBrush(self.bg_color)) - # this adds a nice black outline around the label for some odd - # reason; ok by us - p.setOpacity(self.opacity) - # this cause the L1 labels to glitch out if used in the subtype # and it will leave a small black strip with the arrow path if # done before the above - p.fillRect(self.rect, self.bg_color) - + p.fillRect( + self.rect, + self.bg_color, + ) def boundingRect(self): # noqa ''' @@ -484,15 +519,18 @@ class AxisLabel(pg.GraphicsObject): txt_h, txt_w = txt_br.height(), txt_br.width() # print(f'wsw: {self._dpifont.boundingRect(" ")}') - # allow subtypes to specify a static width and height + # allow subtypes to override width and height h, w = self.size_hint() - # print(f'axis size: {self._parent.size()}') - # print(f'axis geo: {self._parent.geometry()}') self.rect = QtCore.QRectF( - 0, 0, + + # relative bounds offsets + self._x_br_offset, + self._y_br_offset, + (w or txt_w) + self._x_margin / 2, - (h or txt_h) + self._y_margin / 2, + + (h or txt_h) * self._y_txt_h_scaling + (self._y_margin / 2), ) # print(self.rect) # hb = self.path.controlPointRect() From bfc6014ad32ad2a18ba274891413ce2dac14ac12 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Fri, 13 Jan 2023 14:12:23 -0500 Subject: [PATCH 89/96] Fix history array name --- piker/fsp/_engine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/fsp/_engine.py b/piker/fsp/_engine.py index f5af2ac2..37852cfc 100644 --- a/piker/fsp/_engine.py +++ b/piker/fsp/_engine.py @@ -188,7 +188,7 @@ async def fsp_compute( history_by_field['time'] = src_time[-len(history_by_field):] - history['time'] = src.array['time'] + history_output['time'] = src.array['time'] # TODO: XXX: # THERE'S A BIG BUG HERE WITH THE `index` field since we're From 7ce3f10e73b121a7ca59ac558fe26e79ea4f6377 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Sat, 24 Dec 2022 18:18:06 -0500 Subject: [PATCH 90/96] Just-offset-from-arrow-marker on slow chart We want the fast and slow chart to behave the same on calls to `Viz.default_view()` so adjust the offset calc to make both work: - just offset by the line len regardless of step / uppx - add back the `should_line: bool` output from `render_bar_items()` (and use it to set a new `ds_allowed: bool` guard variable) so that we can bypass calling the m4 downsampler unless the bars have been switched to the interpolation line graphic (which we normally required before any downsampling of OHLC graphics data). Further, this drops use of the `use_vr: bool` flag from all rendering since we pretty much always use it by default. --- piker/ui/_dataviz.py | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 33047678..88d4eada 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -213,6 +213,7 @@ def render_baritems( graphics, r, should_redraw, + should_line, ) @@ -547,6 +548,7 @@ class Viz(msgspec.Struct): # , frozen=True): # BUT the ``in_view`` slice DOES.. read_slc = slice(lbar_i, rbar_i) in_view = array[lbar_i: rbar_i + 1] + # in_view = array[lbar_i-1: rbar_i+1] # XXX: same as ^ # to_draw = array[lbar - ifirst:(rbar - ifirst) + 1] @@ -570,7 +572,6 @@ class Viz(msgspec.Struct): # , frozen=True): def update_graphics( self, - use_vr: bool = True, render: bool = True, array_key: str | None = None, @@ -609,6 +610,7 @@ class Viz(msgspec.Struct): # , frozen=True): return graphics should_redraw: bool = False + ds_allowed: bool = True # TODO: probably specialize ``Renderer`` types instead of # these logic checks? @@ -622,6 +624,7 @@ class Viz(msgspec.Struct): # , frozen=True): graphics, r, should_redraw, + in_line, ) = render_baritems( self, graphics, @@ -629,6 +632,7 @@ class Viz(msgspec.Struct): # , frozen=True): profiler, **kwargs, ) + ds_allowed = in_line elif not r: if isinstance(graphics, StepCurve): @@ -655,7 +659,6 @@ class Viz(msgspec.Struct): # , frozen=True): # ``Curve`` derivative case(s): array_key = array_key or self.name - # print(array_key) # ds update config new_sample_rate: bool = False @@ -670,6 +673,7 @@ class Viz(msgspec.Struct): # , frozen=True): if ( uppx > 1 and abs(uppx_diff) >= 1 + and ds_allowed ): log.debug( f'{array_key} sampler change: {self._last_uppx} -> {uppx}' @@ -681,6 +685,10 @@ class Viz(msgspec.Struct): # , frozen=True): should_ds = True should_redraw = True + # "back to source" case: + # this more or less skips use of the m4 downsampler + # inside ``Renderer.render()`` which results in a path + # drawn verbatim to match the xy source data. elif ( uppx <= 2 and self._in_ds @@ -703,7 +711,6 @@ class Viz(msgspec.Struct): # , frozen=True): array_key, profiler, uppx=uppx, - # use_vr=True, # TODO: better way to detect and pass this? # if we want to eventually cache renderers for a given uppx @@ -838,7 +845,7 @@ class Viz(msgspec.Struct): # , frozen=True): def default_view( self, bars_from_y: int = int(616 * 3/8), - y_offset: int = 0, + y_offset: int = 0, # in datums do_ds: bool = True, ) -> None: @@ -898,17 +905,8 @@ class Viz(msgspec.Struct): # , frozen=True): # orient by offset from the y-axis including # space to compensate for the L1 labels. if not y_offset: + _, offset = chartw.pre_l1_xs() - # we get the L1 spread label "length" in view coords and - # make sure it doesn't colide with the right-most datum in - # view. - _, l1_len = chartw.pre_l1_xs() - offset = l1_len/(uppx*step) - - # if no L1 label is present just offset by a few datums - # from the y-axis. - if chartw._max_l1_line_len == 0: - offset += 3*step else: offset = (y_offset * step) + uppx*step @@ -973,9 +971,7 @@ class Viz(msgspec.Struct): # , frozen=True): i_step = lasts['index'] # last index-specific step. i_step_t = lasts['time'] # last time step. - fqsn = self.flume.symbol.fqsn - if is_1m: - print(f'{fqsn} 1Min index: {i_step}, t: {i_step_t}') + # fqsn = self.flume.symbol.fqsn # check if "last (is) in view" -> is a real-time update necessary? if self.index_field == 'index': From a7d78a3f405cfe8312f8bd9bc28d5acf5cf31f93 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Tue, 27 Dec 2022 13:13:21 -0500 Subject: [PATCH 91/96] Use left-style index search on RHS scan as well --- piker/data/_pathops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index cea9ebf7..90ea4d5e 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -411,7 +411,7 @@ def slice_from_time( new_read_i_stop = np.searchsorted( times[read_i_start:], i_stop_t, - side='right', + side='left', ) if ( From fc17187ff43da6e012f980e7a80cb8e0cce7e100 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 28 Dec 2022 00:55:16 -0500 Subject: [PATCH 92/96] Drop edge case from `slice_from_time()` Doesn't seem like we really need to handle the situation where the start or stop input time stamps are outside the index range of the data since the new binary search handling via `numpy.searchsorted()` covers this case at minimal runtime cost and with an equally correct output. Allows us to drop some other indexing endpoint internal variables as well. --- piker/data/_pathops.py | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/piker/data/_pathops.py b/piker/data/_pathops.py index 90ea4d5e..25e2c451 100644 --- a/piker/data/_pathops.py +++ b/piker/data/_pathops.py @@ -309,25 +309,9 @@ def slice_from_time( times = arr['time'] t_first = round(times[0]) - t_last = round(times[-1]) - index = arr['index'] - i_first = index[0] - # i_last = index[-1] read_i_max = arr.shape[0] - if ( - start_t < t_first - and stop_t > t_last - ): - read_i_start = 0 - read_i_stop = read_i_max - read_slc = slice( - 0, - read_i_max, - ) - return read_slc - if step is None: step = round(times[-1] - times[-2]) if step == 0: @@ -359,11 +343,9 @@ def slice_from_time( # NOTE: this is usually the result of a time series with time gaps # where it is expected that each index step maps to a uniform step # in the time stamp series. - i_iv_start = index[read_i_start] t_iv_start = times[read_i_start] if ( - i_iv_start >= i_first - and t_iv_start > i_start_t + t_iv_start > i_start_t ): # do a binary search for the best index mapping to ``start_t`` # given we measured an overshoot using the uniform-time-step @@ -396,7 +378,6 @@ def slice_from_time( # ) read_i_start = new_read_i_start - 1 - # i_iv_stop = index[read_i_stop - 1] t_iv_stop = times[read_i_stop - 1] if ( t_iv_stop > i_stop_t @@ -441,6 +422,7 @@ def slice_from_time( # NOTE: if caller needs absolute buffer indices they can # slice the buffer abs index like so: + # index = arr['index'] # abs_indx = index[read_slc] # abs_slc = slice( # int(abs_indx[0]), From 459cbfdbad911045c69071c6e380d95bff90a128 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 28 Dec 2022 01:30:34 -0500 Subject: [PATCH 93/96] Further fixes `Viz.default_view()` and `.index_step()` Use proper uppx scaling when either of scaling the data to the x-domain index-range or when the uppx is < 1 (now that we support it) such that both the fast and slow chart always appropriately scale and offset to the y-axis with the last datum graphic just adjacent to the order line arrow markers. Further this fixes the `.index_step()` calc to use the "earliest" 16 values to compute the expected sample step diff since the last set often contained gaps due to start up race conditions and generated unexpected/incorrect output. Further this drops the `.curve_width_pxs()` method and replaces it with `.px_width()`, taken from the graphics object API and instead returns the pixel account for the whole view width instead of the x-domain-data-range within the view. --- piker/ui/_dataviz.py | 105 +++++++++++++++++++++++++++---------------- 1 file changed, 66 insertions(+), 39 deletions(-) diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py index 88d4eada..07ead769 100644 --- a/piker/ui/_dataviz.py +++ b/piker/ui/_dataviz.py @@ -286,10 +286,14 @@ class Viz(msgspec.Struct): # , frozen=True): ) -> float: if self._index_step is None: index = self.shm.array[self.index_field] - self._index_step = max( - np.diff(index[-16:]).max(), - 1, - ) + isample = index[:16] + mxdiff = np.diff(isample).max() + self._index_step = max(mxdiff, 1) + if ( + mxdiff < 1 + or 1 < mxdiff < 60 + ): + breakpoint() return self._index_step @@ -298,6 +302,8 @@ class Viz(msgspec.Struct): # , frozen=True): lbar: int, rbar: int, + use_caching: bool = True, + ) -> Optional[tuple[float, float]]: ''' Compute the cached max and min y-range values for a given @@ -308,15 +314,17 @@ class Viz(msgspec.Struct): # , frozen=True): # TODO: hash the slice instead maybe? # https://stackoverflow.com/a/29980872 rkey = (round(lbar), round(rbar)) - cached_result = self._mxmns.get(rkey) - do_print = False # (self.index_step() == 60) - if cached_result: - if do_print: - print( - f'{self.name} CACHED maxmin\n' - f'{rkey} -> {cached_result}' - ) - return cached_result + + do_print: bool = False + if use_caching: + cached_result = self._mxmns.get(rkey) + if cached_result: + if do_print: + print( + f'{self.name} CACHED maxmin\n' + f'{rkey} -> {cached_result}' + ) + return cached_result shm = self.shm if shm is None: @@ -332,14 +340,15 @@ class Viz(msgspec.Struct): # , frozen=True): stop_t=rbar, step=self.index_step(), ) - slice_view = arr[read_slc] else: ifirst = arr[0]['index'] - slice_view = arr[ - lbar - ifirst: + read_slc = slice( + lbar - ifirst, (rbar - ifirst) + 1 - ] + ) + + slice_view = arr[read_slc] if not slice_view.size: log.warning(f'{self.name} no maxmin in view?') @@ -366,14 +375,13 @@ class Viz(msgspec.Struct): # , frozen=True): mxmn = ylow, yhigh if ( do_print - # and self.index_step() > 1 ): s = 3 print( f'{self.name} MANUAL ohlc={self.is_ohlc} maxmin:\n' f'{rkey} -> {mxmn}\n' f'read_slc: {read_slc}\n' - f'abs_slc: {slice_view["index"]}\n' + # f'abs_slc: {slice_view["index"]}\n' f'first {s}:\n{slice_view[:s]}\n' f'last {s}:\n{slice_view[-s:]}\n' ) @@ -610,7 +618,7 @@ class Viz(msgspec.Struct): # , frozen=True): return graphics should_redraw: bool = False - ds_allowed: bool = True + ds_allowed: bool = True # guard for m4 activation # TODO: probably specialize ``Renderer`` types instead of # these logic checks? @@ -624,7 +632,7 @@ class Viz(msgspec.Struct): # , frozen=True): graphics, r, should_redraw, - in_line, + ds_allowed, # in line mode? ) = render_baritems( self, graphics, @@ -632,7 +640,6 @@ class Viz(msgspec.Struct): # , frozen=True): profiler, **kwargs, ) - ds_allowed = in_line elif not r: if isinstance(graphics, StepCurve): @@ -807,7 +814,7 @@ class Viz(msgspec.Struct): # , frozen=True): # XXX: pretty sure we don't need this? # if isinstance(g, Curve): # with dsg.reset_cache(): - uppx = self._last_uppx + uppx = round(self._last_uppx) y = y[-uppx:] ymn, ymx = y.min(), y.max() # print(f'drawing uppx={uppx} mxmn line: {ymn}, {ymx}') @@ -829,19 +836,6 @@ class Viz(msgspec.Struct): # , frozen=True): # print(f'updating NOT DS curve {self.name}') g.update() - def curve_width_pxs(self) -> float: - ''' - Return the width of the current datums in view in pixel units. - - ''' - _, lbar, rbar, _ = self.bars_range() - return self.view.mapViewToDevice( - QLineF( - lbar, 0, - rbar, 0 - ) - ).length() - def default_view( self, bars_from_y: int = int(616 * 3/8), @@ -900,19 +894,33 @@ class Viz(msgspec.Struct): # , frozen=True): # l->r distance in scene units, no larger then data span data_diff = last_datum - first_datum - rl_diff = min(vr - vl, data_diff) + rl_diff = vr - vl + rescale_to_data: bool = False + # new_uppx: float = 1 + + if rl_diff > data_diff: + rescale_to_data = True + rl_diff = data_diff + new_uppx: float = data_diff / self.px_width() # orient by offset from the y-axis including # space to compensate for the L1 labels. if not y_offset: - _, offset = chartw.pre_l1_xs() + _, l1_offset = chartw.pre_l1_xs() + + offset = l1_offset + + if ( + rescale_to_data + ): + offset = (offset / uppx) * new_uppx else: offset = (y_offset * step) + uppx*step # align right side of view to the rightmost datum + the selected # offset from above. - r_reset = last_datum + offset + r_reset = (self.graphics.x_last() or last_datum) + offset # no data is in view so check for the only 2 sane cases: # - entire view is LEFT of data @@ -1054,3 +1062,22 @@ class Viz(msgspec.Struct): # , frozen=True): do_rt_update, should_tread, ) + + def px_width(self) -> float: + ''' + Return the width of the view box containing + this graphic in pixel units. + + ''' + vb = self.plot.vb + if not vb: + return 0 + + vl, vr = self.view_range() + + return vb.mapViewToDevice( + QLineF( + vl, 0, + vr, 0, + ) + ).length() From 6a0c36922e799891747c0148d6c5b388edad2522 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 28 Dec 2022 02:02:19 -0500 Subject: [PATCH 94/96] Drop `._index_step` from formatters and instead defer to `Viz.index_step()` --- piker/data/_formatters.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/piker/data/_formatters.py b/piker/data/_formatters.py index ccb8d315..3e440fe8 100644 --- a/piker/data/_formatters.py +++ b/piker/data/_formatters.py @@ -99,15 +99,13 @@ class IncrementalFormatter(msgspec.Struct): _last_vr: tuple[float, float] | None = None _last_ivdr: tuple[float, float] | None = None - _index_step_size: float = None - @property def index_step_size(self) -> float: ''' Readonly value computed on first ``.diff()`` call. ''' - return self._index_step_size + return self.viz.index_step() def __repr__(self) -> str: msg = ( @@ -183,11 +181,6 @@ class IncrementalFormatter(msgspec.Struct): nd_stop = self.xy_nd_stop = src_stop align_index = array[self.index_field] - self._index_step_size = ( - align_index[-1] - - - align_index[-2] - ) # compute the length diffs between the first/last index entry in # the input data and the last indexes we have on record from the From c1988c4d8deed7d37af8639b0b67d6a7c25e66f3 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Mon, 26 Dec 2022 14:46:46 -0500 Subject: [PATCH 95/96] Add a parent-type for graphics: `FlowGraphic` Factor some common methods into the parent type: - `.x_uppx()` for reading the horizontal units-per-pixel. - `.x_last()` for reading the "closest to y-axis" last datum coordinate for zooming "around" during mouse interaction. - `.px_width()` for computing the max width of any curve in view in pixels. Adjust all previous derived `pg.GraphicsObject` child types to now inherit from this new parent and in particular enable proper `.x_uppx()` support to `BarItems`. --- piker/ui/_curve.py | 126 ++++++++++++++++++++++++--------------------- piker/ui/_ohlc.py | 9 ++-- 2 files changed, 73 insertions(+), 62 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index b5d128d6..f22dcd14 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -51,7 +51,59 @@ _line_styles: dict[str, int] = { } -class Curve(pg.GraphicsObject): +class FlowGraphic(pg.GraphicsObject): + ''' + Base class with minimal interface for `QPainterPath` implemented, + real-time updated "data flow" graphics. + + See subtypes below. + + ''' + # sub-type customization methods + declare_paintables: Optional[Callable] = None + sub_paint: Optional[Callable] = None + + # TODO: can we remove this? + # sub_br: Optional[Callable] = None + + def x_uppx(self) -> int: + + px_vecs = self.pixelVectors()[0] + if px_vecs: + xs_in_px = px_vecs.x() + return round(xs_in_px) + else: + return 0 + + def x_last(self) -> float: + ''' + Return the last most x value of the last line segment. + + ''' + return self._last_line.x1() + + def px_width(self) -> float: + ''' + Return the width of the view box containing + this graphic in pixel units. + + ''' + vb = self.getViewBox() + if not vb: + return 0 + + vr = self.viewRect() + vl, vr = int(vr.left()), int(vr.right()) + + return vb.mapViewToDevice( + QLineF( + vl, 0, + vr, 0, + ) + ).length() + + +class Curve(FlowGraphic): ''' A faster, simpler, append friendly version of ``pyqtgraph.PlotCurveItem`` built for highly customizable real-time @@ -81,11 +133,6 @@ class Curve(pg.GraphicsObject): ''' - # sub-type customization methods - declare_paintables: Optional[Callable] = None - sub_paint: Optional[Callable] = None - # sub_br: Optional[Callable] = None - def __init__( self, *args, @@ -95,7 +142,6 @@ class Curve(pg.GraphicsObject): fill_color: Optional[str] = None, style: str = 'solid', name: Optional[str] = None, - use_fpath: bool = True, **kwargs @@ -110,11 +156,11 @@ class Curve(pg.GraphicsObject): # self._last_cap: int = 0 self.path: Optional[QPainterPath] = None - # additional path used for appends which tries to avoid - # triggering an update/redraw of the presumably larger - # historical ``.path`` above. - self.use_fpath = use_fpath - self.fast_path: Optional[QPainterPath] = None + # additional path that can be optionally used for appends which + # tries to avoid triggering an update/redraw of the presumably + # larger historical ``.path`` above. the flag to enable + # this behaviour is found in `Renderer.render()`. + self.fast_path: QPainterPath | None = None # TODO: we can probably just dispense with the parent since # we're basically only using the pen setting now... @@ -154,58 +200,19 @@ class Curve(pg.GraphicsObject): # endpoint (something we saw on trade rate curves) self.setCacheMode(QGraphicsItem.DeviceCoordinateCache) - # XXX: see explanation for different caching modes: - # https://stackoverflow.com/a/39410081 - # seems to only be useful if we don't re-generate the entire - # QPainterPath every time - # curve.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache) - + # XXX-NOTE-XXX: graphics caching. + # see explanation for different caching modes: + # https://stackoverflow.com/a/39410081 seems to only be useful + # if we don't re-generate the entire QPainterPath every time # don't ever use this - it's a colossal nightmare of artefacts # and is disastrous for performance. - # curve.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache) + # self.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache) # allow sub-type customization declare = self.declare_paintables if declare: declare() - # TODO: probably stick this in a new parent - # type which will contain our own version of - # what ``PlotCurveItem`` had in terms of base - # functionality? A `FlowGraphic` maybe? - def x_uppx(self) -> int: - - px_vecs = self.pixelVectors()[0] - if px_vecs: - xs_in_px = px_vecs.x() - return round(xs_in_px) - else: - return 0 - - def x_last(self) -> float: - ''' - Return the last most x value of the last line segment. - - ''' - return self._last_line.x2() - - def px_width(self) -> float: - - vb = self.getViewBox() - if not vb: - return 0 - - vr = self.viewRect() - l, r = int(vr.left()), int(vr.right()) - - start, stop = self._xrange - lbar = max(l, start) - rbar = min(r, stop) - - return vb.mapViewToDevice( - QLineF(lbar, 0, rbar, 0) - ).length() - # XXX: lol brutal, the internals of `CurvePoint` (inherited by # our `LineDot`) required ``.getData()`` to work.. def getData(self): @@ -370,6 +377,9 @@ class Curve(pg.GraphicsObject): x = src_data[index_field] y = src_data[array_key] + x_last = x[-1] + x_2last = x[-2] + # draw the "current" step graphic segment so it # lines up with the "middle" of the current # (OHLC) sample. @@ -379,8 +389,8 @@ class Curve(pg.GraphicsObject): # from last datum to current such that # the end of line touches the "beginning" # of the current datum step span. - x[-2], y[-2], - x[-1], y[-1], + x_2last , y[-2], + x_last, y[-1], ) return x, y diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index 288af70d..9712bb9d 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -36,6 +36,7 @@ from PyQt5.QtCore import ( from PyQt5.QtGui import QPainterPath +from ._curve import FlowGraphic from .._profile import pg_profile_enabled, ms_slower_then from ._style import hcolor from ..log import get_logger @@ -94,7 +95,7 @@ def bar_from_ohlc_row( return [hl, o, c] -class BarItems(pg.GraphicsObject): +class BarItems(FlowGraphic): ''' "Price range" bars graphics rendered from a OHLC sampled sequence. @@ -125,9 +126,9 @@ class BarItems(pg.GraphicsObject): self.path = QPainterPath() self._last_bar_lines: tuple[QLineF, ...] | None = None - def x_uppx(self) -> int: - # we expect the downsample curve report this. - return 0 + # def x_uppx(self) -> int: + # # we expect the downsample curve report this. + # return 0 def x_last(self) -> float: ''' From 340045af77aecad63f8a97c7c2d55c40dc366a26 Mon Sep 17 00:00:00 2001 From: Tyler Goodlet Date: Wed, 28 Dec 2022 01:20:55 -0500 Subject: [PATCH 96/96] Make `FlowGraphic.x_last()` be optionally `None` In the case where the last-datum-graphic hasn't been created yet, simply return a `None` from this method so the caller can choose to ignore the output. Further, drop `.px_width()` since it makes more sense defined on `Viz` as well as the previously commented `BarItems.x_uppx()` method. Also, don't round the `.x_uppx()` output since it can then be used when < 1 to do x-domain scaling during high zoom usage. --- piker/ui/_curve.py | 30 +++++------------------------- piker/ui/_ohlc.py | 15 ++++++++------- 2 files changed, 13 insertions(+), 32 deletions(-) diff --git a/piker/ui/_curve.py b/piker/ui/_curve.py index f22dcd14..a3287341 100644 --- a/piker/ui/_curve.py +++ b/piker/ui/_curve.py @@ -70,37 +70,17 @@ class FlowGraphic(pg.GraphicsObject): px_vecs = self.pixelVectors()[0] if px_vecs: - xs_in_px = px_vecs.x() - return round(xs_in_px) + return px_vecs.x() else: return 0 - def x_last(self) -> float: + def x_last(self) -> float | None: ''' - Return the last most x value of the last line segment. + Return the last most x value of the last line segment or if not + drawn yet, ``None``. ''' - return self._last_line.x1() - - def px_width(self) -> float: - ''' - Return the width of the view box containing - this graphic in pixel units. - - ''' - vb = self.getViewBox() - if not vb: - return 0 - - vr = self.viewRect() - vl, vr = int(vr.left()), int(vr.right()) - - return vb.mapViewToDevice( - QLineF( - vl, 0, - vr, 0, - ) - ).length() + return self._last_line.x1() if self._last_line else None class Curve(FlowGraphic): diff --git a/piker/ui/_ohlc.py b/piker/ui/_ohlc.py index 9712bb9d..de421cd2 100644 --- a/piker/ui/_ohlc.py +++ b/piker/ui/_ohlc.py @@ -126,16 +126,17 @@ class BarItems(FlowGraphic): self.path = QPainterPath() self._last_bar_lines: tuple[QLineF, ...] | None = None - # def x_uppx(self) -> int: - # # we expect the downsample curve report this. - # return 0 - - def x_last(self) -> float: + def x_last(self) -> None | float: ''' - Return the last most x value of the close line segment. + Return the last most x value of the close line segment + or if not drawn yet, ``None``. ''' - return self._last_bar_lines[-1].x2() + if self._last_bar_lines: + close_arm_line = self._last_bar_lines[-1] + return close_arm_line.x2() if close_arm_line else None + else: + return None # Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect def boundingRect(self):