Compare commits
16 Commits
310_plus
...
mxmn_from_
Author | SHA1 | Date |
---|---|---|
Tyler Goodlet | be7afdaa89 | |
Tyler Goodlet | 1c561207f5 | |
Tyler Goodlet | ed2c962bb9 | |
Tyler Goodlet | 147ceca016 | |
Tyler Goodlet | 03a7940f83 | |
Tyler Goodlet | dd2a9f74f1 | |
Tyler Goodlet | 49c720af3c | |
Tyler Goodlet | c620517543 | |
Tyler Goodlet | a425c29ef1 | |
Tyler Goodlet | 783914c7fe | |
Tyler Goodlet | 920a394539 | |
Tyler Goodlet | e977597cd0 | |
Tyler Goodlet | 7a33ba64f1 | |
Tyler Goodlet | 191b94b67c | |
Tyler Goodlet | 4ad7b073c3 | |
Tyler Goodlet | d92ff9c7a0 |
|
@ -114,7 +114,7 @@ async def fsp_compute(
|
||||||
dict[str, np.ndarray], # multi-output case
|
dict[str, np.ndarray], # multi-output case
|
||||||
np.ndarray, # single output case
|
np.ndarray, # single output case
|
||||||
]
|
]
|
||||||
history_output = await out_stream.__anext__()
|
history_output = await anext(out_stream)
|
||||||
|
|
||||||
func_name = func.__name__
|
func_name = func.__name__
|
||||||
profiler(f'{func_name} generated history')
|
profiler(f'{func_name} generated history')
|
||||||
|
@ -374,7 +374,8 @@ async def cascade(
|
||||||
'key': dst_shm_token,
|
'key': dst_shm_token,
|
||||||
'first': dst._first.value,
|
'first': dst._first.value,
|
||||||
'last': dst._last.value,
|
'last': dst._last.value,
|
||||||
}})
|
}
|
||||||
|
})
|
||||||
return tracker, index
|
return tracker, index
|
||||||
|
|
||||||
def is_synced(
|
def is_synced(
|
||||||
|
|
|
@ -230,25 +230,26 @@ class GodWidget(QWidget):
|
||||||
# - we'll probably want per-instrument/provider state here?
|
# - we'll probably want per-instrument/provider state here?
|
||||||
# change the order config form over to the new chart
|
# change the order config form over to the new chart
|
||||||
|
|
||||||
# XXX: since the pp config is a singleton widget we have to
|
|
||||||
# also switch it over to the new chart's interal-layout
|
|
||||||
# self.linkedsplits.chart.qframe.hbox.removeWidget(self.pp_pane)
|
|
||||||
chart = linkedsplits.chart
|
|
||||||
|
|
||||||
# chart is already in memory so just focus it
|
# chart is already in memory so just focus it
|
||||||
linkedsplits.show()
|
linkedsplits.show()
|
||||||
linkedsplits.focus()
|
linkedsplits.focus()
|
||||||
linkedsplits.graphics_cycle()
|
linkedsplits.graphics_cycle()
|
||||||
await trio.sleep(0)
|
await trio.sleep(0)
|
||||||
|
|
||||||
# resume feeds *after* rendering chart view asap
|
# XXX: since the pp config is a singleton widget we have to
|
||||||
chart.resume_all_feeds()
|
# also switch it over to the new chart's interal-layout
|
||||||
|
# self.linkedsplits.chart.qframe.hbox.removeWidget(self.pp_pane)
|
||||||
|
chart = linkedsplits.chart
|
||||||
|
|
||||||
# TODO: we need a check to see if the chart
|
# resume feeds *after* rendering chart view asap
|
||||||
# last had the xlast in view, if so then shift so it's
|
if chart:
|
||||||
# still in view, if the user was viewing history then
|
chart.resume_all_feeds()
|
||||||
# do nothing yah?
|
|
||||||
chart.default_view()
|
# TODO: we need a check to see if the chart
|
||||||
|
# last had the xlast in view, if so then shift so it's
|
||||||
|
# still in view, if the user was viewing history then
|
||||||
|
# do nothing yah?
|
||||||
|
chart.default_view()
|
||||||
|
|
||||||
self.linkedsplits = linkedsplits
|
self.linkedsplits = linkedsplits
|
||||||
symbol = linkedsplits.symbol
|
symbol = linkedsplits.symbol
|
||||||
|
@ -760,9 +761,18 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
|
|
||||||
self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem)
|
self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem)
|
||||||
|
|
||||||
|
# indempotent startup flag for auto-yrange subsys
|
||||||
|
# to detect the "first time" y-domain graphics begin
|
||||||
|
# to be shown in the (main) graphics view.
|
||||||
|
self._on_screen: bool = False
|
||||||
|
|
||||||
def resume_all_feeds(self):
|
def resume_all_feeds(self):
|
||||||
for feed in self._feeds.values():
|
try:
|
||||||
self.linked.godwidget._root_n.start_soon(feed.resume)
|
for feed in self._feeds.values():
|
||||||
|
self.linked.godwidget._root_n.start_soon(feed.resume)
|
||||||
|
except RuntimeError:
|
||||||
|
# TODO: cancel the qtractor runtime here?
|
||||||
|
raise
|
||||||
|
|
||||||
def pause_all_feeds(self):
|
def pause_all_feeds(self):
|
||||||
for feed in self._feeds.values():
|
for feed in self._feeds.values():
|
||||||
|
@ -859,7 +869,8 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
|
|
||||||
def default_view(
|
def default_view(
|
||||||
self,
|
self,
|
||||||
bars_from_y: int = 3000,
|
bars_from_y: int = 616,
|
||||||
|
do_ds: bool = True,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
|
@ -920,8 +931,11 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
max=end,
|
max=end,
|
||||||
padding=0,
|
padding=0,
|
||||||
)
|
)
|
||||||
self.view.maybe_downsample_graphics()
|
|
||||||
view._set_yrange()
|
if do_ds:
|
||||||
|
self.view.maybe_downsample_graphics()
|
||||||
|
view._set_yrange()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.linked.graphics_cycle()
|
self.linked.graphics_cycle()
|
||||||
except IndexError:
|
except IndexError:
|
||||||
|
@ -1255,7 +1269,6 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
If ``bars_range`` is provided use that range.
|
If ``bars_range`` is provided use that range.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
# print(f'Chart[{self.name}].maxmin()')
|
|
||||||
profiler = pg.debug.Profiler(
|
profiler = pg.debug.Profiler(
|
||||||
msg=f'`{str(self)}.maxmin(name={name})`: `{self.name}`',
|
msg=f'`{str(self)}.maxmin(name={name})`: `{self.name}`',
|
||||||
disabled=not pg_profile_enabled(),
|
disabled=not pg_profile_enabled(),
|
||||||
|
@ -1287,11 +1300,18 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
|
|
||||||
key = round(lbar), round(rbar)
|
key = round(lbar), round(rbar)
|
||||||
res = flow.maxmin(*key)
|
res = flow.maxmin(*key)
|
||||||
if res == (None, None):
|
|
||||||
log.error(
|
if (
|
||||||
|
res is None
|
||||||
|
):
|
||||||
|
log.warning(
|
||||||
f"{flow_key} no mxmn for bars_range => {key} !?"
|
f"{flow_key} no mxmn for bars_range => {key} !?"
|
||||||
)
|
)
|
||||||
res = 0, 0
|
res = 0, 0
|
||||||
|
if not self._on_screen:
|
||||||
|
self.default_view(do_ds=False)
|
||||||
|
self._on_screen = True
|
||||||
|
|
||||||
profiler(f'yrange mxmn: {key} -> {res}')
|
profiler(f'yrange mxmn: {key} -> {res}')
|
||||||
|
# print(f'{flow_key} yrange mxmn: {key} -> {res}')
|
||||||
return res
|
return res
|
||||||
|
|
|
@ -223,14 +223,20 @@ def ds_m4(
|
||||||
assert frames >= (xrange / uppx)
|
assert frames >= (xrange / uppx)
|
||||||
|
|
||||||
# call into ``numba``
|
# call into ``numba``
|
||||||
nb, i_win, y_out = _m4(
|
(
|
||||||
|
nb,
|
||||||
|
x_out,
|
||||||
|
y_out,
|
||||||
|
ymn,
|
||||||
|
ymx,
|
||||||
|
) = _m4(
|
||||||
x,
|
x,
|
||||||
y,
|
y,
|
||||||
|
|
||||||
frames,
|
frames,
|
||||||
|
|
||||||
# TODO: see func below..
|
# TODO: see func below..
|
||||||
# i_win,
|
# x_out,
|
||||||
# y_out,
|
# y_out,
|
||||||
|
|
||||||
# first index in x data to start at
|
# first index in x data to start at
|
||||||
|
@ -243,10 +249,11 @@ def ds_m4(
|
||||||
# filter out any overshoot in the input allocation arrays by
|
# filter out any overshoot in the input allocation arrays by
|
||||||
# removing zero-ed tail entries which should start at a certain
|
# removing zero-ed tail entries which should start at a certain
|
||||||
# index.
|
# index.
|
||||||
i_win = i_win[i_win != 0]
|
x_out = x_out[x_out != 0]
|
||||||
y_out = y_out[:i_win.size]
|
y_out = y_out[:x_out.size]
|
||||||
|
|
||||||
return nb, i_win, y_out
|
# print(f'M4 output ymn, ymx: {ymn},{ymx}')
|
||||||
|
return nb, x_out, y_out, ymn, ymx
|
||||||
|
|
||||||
|
|
||||||
@jit(
|
@jit(
|
||||||
|
@ -260,8 +267,8 @@ def _m4(
|
||||||
|
|
||||||
frames: int,
|
frames: int,
|
||||||
|
|
||||||
# TODO: using this approach by having the ``.zeros()`` alloc lines
|
# TODO: using this approach, having the ``.zeros()`` alloc lines
|
||||||
# below, in put python was causing segs faults and alloc crashes..
|
# below in pure python, there were segs faults and alloc crashes..
|
||||||
# we might need to see how it behaves with shm arrays and consider
|
# we might need to see how it behaves with shm arrays and consider
|
||||||
# allocating them once at startup?
|
# allocating them once at startup?
|
||||||
|
|
||||||
|
@ -274,14 +281,22 @@ def _m4(
|
||||||
x_start: int,
|
x_start: int,
|
||||||
step: float,
|
step: float,
|
||||||
|
|
||||||
) -> int:
|
) -> tuple[
|
||||||
# nbins = len(i_win)
|
int,
|
||||||
# count = len(xs)
|
np.ndarray,
|
||||||
|
np.ndarray,
|
||||||
|
float,
|
||||||
|
float,
|
||||||
|
]:
|
||||||
|
'''
|
||||||
|
Implementation of the m4 algorithm in ``numba``:
|
||||||
|
http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
|
||||||
|
|
||||||
|
'''
|
||||||
# these are pre-allocated and mutated by ``numba``
|
# these are pre-allocated and mutated by ``numba``
|
||||||
# code in-place.
|
# code in-place.
|
||||||
y_out = np.zeros((frames, 4), ys.dtype)
|
y_out = np.zeros((frames, 4), ys.dtype)
|
||||||
i_win = np.zeros(frames, xs.dtype)
|
x_out = np.zeros(frames, xs.dtype)
|
||||||
|
|
||||||
bincount = 0
|
bincount = 0
|
||||||
x_left = x_start
|
x_left = x_start
|
||||||
|
@ -295,24 +310,34 @@ def _m4(
|
||||||
|
|
||||||
# set all bins in the left-most entry to the starting left-most x value
|
# set all bins in the left-most entry to the starting left-most x value
|
||||||
# (aka a row broadcast).
|
# (aka a row broadcast).
|
||||||
i_win[bincount] = x_left
|
x_out[bincount] = x_left
|
||||||
# set all y-values to the first value passed in.
|
# set all y-values to the first value passed in.
|
||||||
y_out[bincount] = ys[0]
|
y_out[bincount] = ys[0]
|
||||||
|
|
||||||
|
# full input y-data mx and mn
|
||||||
|
mx: float = -np.inf
|
||||||
|
mn: float = np.inf
|
||||||
|
|
||||||
|
# compute OHLC style max / min values per window sized x-frame.
|
||||||
for i in range(len(xs)):
|
for i in range(len(xs)):
|
||||||
|
|
||||||
x = xs[i]
|
x = xs[i]
|
||||||
y = ys[i]
|
y = ys[i]
|
||||||
|
|
||||||
if x < x_left + step: # the current window "step" is [bin, bin+1)
|
if x < x_left + step: # the current window "step" is [bin, bin+1)
|
||||||
y_out[bincount, 1] = min(y, y_out[bincount, 1])
|
ymn = y_out[bincount, 1] = min(y, y_out[bincount, 1])
|
||||||
y_out[bincount, 2] = max(y, y_out[bincount, 2])
|
ymx = y_out[bincount, 2] = max(y, y_out[bincount, 2])
|
||||||
y_out[bincount, 3] = y
|
y_out[bincount, 3] = y
|
||||||
|
mx = max(mx, ymx)
|
||||||
|
mn = min(mn, ymn)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Find the next bin
|
# Find the next bin
|
||||||
while x >= x_left + step:
|
while x >= x_left + step:
|
||||||
x_left += step
|
x_left += step
|
||||||
|
|
||||||
bincount += 1
|
bincount += 1
|
||||||
i_win[bincount] = x_left
|
x_out[bincount] = x_left
|
||||||
y_out[bincount] = y
|
y_out[bincount] = y
|
||||||
|
|
||||||
return bincount, i_win, y_out
|
return bincount, x_out, y_out, mn, mx
|
||||||
|
|
|
@ -105,6 +105,10 @@ def chart_maxmin(
|
||||||
mn, mx = out
|
mn, mx = out
|
||||||
|
|
||||||
mx_vlm_in_view = 0
|
mx_vlm_in_view = 0
|
||||||
|
|
||||||
|
# TODO: we need to NOT call this to avoid a manual
|
||||||
|
# np.max/min trigger and especially on the vlm_chart
|
||||||
|
# flows which aren't shown.. like vlm?
|
||||||
if vlm_chart:
|
if vlm_chart:
|
||||||
out = vlm_chart.maxmin()
|
out = vlm_chart.maxmin()
|
||||||
if out:
|
if out:
|
||||||
|
@ -222,33 +226,9 @@ async def graphics_update_loop(
|
||||||
tick_margin = 3 * tick_size
|
tick_margin = 3 * tick_size
|
||||||
|
|
||||||
chart.show()
|
chart.show()
|
||||||
# view = chart.view
|
|
||||||
last_quote = time.time()
|
last_quote = time.time()
|
||||||
i_last = ohlcv.index
|
i_last = ohlcv.index
|
||||||
|
|
||||||
# async def iter_drain_quotes():
|
|
||||||
# # NOTE: all code below this loop is expected to be synchronous
|
|
||||||
# # and thus draw instructions are not picked up jntil the next
|
|
||||||
# # wait / iteration.
|
|
||||||
# async for quotes in stream:
|
|
||||||
# while True:
|
|
||||||
# try:
|
|
||||||
# moar = stream.receive_nowait()
|
|
||||||
# except trio.WouldBlock:
|
|
||||||
# yield quotes
|
|
||||||
# break
|
|
||||||
# else:
|
|
||||||
# for sym, quote in moar.items():
|
|
||||||
# ticks_frame = quote.get('ticks')
|
|
||||||
# if ticks_frame:
|
|
||||||
# quotes[sym].setdefault(
|
|
||||||
# 'ticks', []).extend(ticks_frame)
|
|
||||||
# print('pulled extra')
|
|
||||||
|
|
||||||
# yield quotes
|
|
||||||
|
|
||||||
# async for quotes in iter_drain_quotes():
|
|
||||||
|
|
||||||
ds = linked.display_state = DisplayState(**{
|
ds = linked.display_state = DisplayState(**{
|
||||||
'quotes': {},
|
'quotes': {},
|
||||||
'linked': linked,
|
'linked': linked,
|
||||||
|
@ -293,6 +273,7 @@ async def graphics_update_loop(
|
||||||
|
|
||||||
# chart isn't active/shown so skip render cycle and pause feed(s)
|
# chart isn't active/shown so skip render cycle and pause feed(s)
|
||||||
if chart.linked.isHidden():
|
if chart.linked.isHidden():
|
||||||
|
print('skipping update')
|
||||||
chart.pause_all_feeds()
|
chart.pause_all_feeds()
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -416,10 +397,8 @@ def graphics_update_cycle(
|
||||||
)
|
)
|
||||||
or trigger_all
|
or trigger_all
|
||||||
):
|
):
|
||||||
# TODO: we should track and compute whether the last
|
|
||||||
# pixel in a curve should show new data based on uppx
|
|
||||||
# and then iff update curves and shift?
|
|
||||||
chart.increment_view(steps=i_diff)
|
chart.increment_view(steps=i_diff)
|
||||||
|
# chart.increment_view(steps=i_diff + round(append_diff - uppx))
|
||||||
|
|
||||||
if vlm_chart:
|
if vlm_chart:
|
||||||
vlm_chart.increment_view(steps=i_diff)
|
vlm_chart.increment_view(steps=i_diff)
|
||||||
|
@ -477,7 +456,6 @@ def graphics_update_cycle(
|
||||||
):
|
):
|
||||||
chart.update_graphics_from_flow(
|
chart.update_graphics_from_flow(
|
||||||
chart.name,
|
chart.name,
|
||||||
# do_append=uppx < update_uppx,
|
|
||||||
do_append=do_append,
|
do_append=do_append,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -337,6 +337,7 @@ class Flow(msgspec.Struct): # , frozen=True):
|
||||||
name: str
|
name: str
|
||||||
plot: pg.PlotItem
|
plot: pg.PlotItem
|
||||||
graphics: Union[Curve, BarItems]
|
graphics: Union[Curve, BarItems]
|
||||||
|
yrange: tuple[float, float] = None
|
||||||
|
|
||||||
# in some cases a flow may want to change its
|
# in some cases a flow may want to change its
|
||||||
# graphical "type" or, "form" when downsampling,
|
# graphical "type" or, "form" when downsampling,
|
||||||
|
@ -386,10 +387,11 @@ class Flow(msgspec.Struct): # , frozen=True):
|
||||||
lbar: int,
|
lbar: int,
|
||||||
rbar: int,
|
rbar: int,
|
||||||
|
|
||||||
) -> tuple[float, float]:
|
) -> Optional[tuple[float, float]]:
|
||||||
'''
|
'''
|
||||||
Compute the cached max and min y-range values for a given
|
Compute the cached max and min y-range values for a given
|
||||||
x-range determined by ``lbar`` and ``rbar``.
|
x-range determined by ``lbar`` and ``rbar`` or ``None``
|
||||||
|
if no range can be determined (yet).
|
||||||
|
|
||||||
'''
|
'''
|
||||||
rkey = (lbar, rbar)
|
rkey = (lbar, rbar)
|
||||||
|
@ -399,40 +401,44 @@ class Flow(msgspec.Struct): # , frozen=True):
|
||||||
|
|
||||||
shm = self.shm
|
shm = self.shm
|
||||||
if shm is None:
|
if shm is None:
|
||||||
mxmn = None
|
return None
|
||||||
|
|
||||||
else: # new block for profiling?..
|
arr = shm.array
|
||||||
arr = shm.array
|
|
||||||
|
|
||||||
# build relative indexes into shm array
|
# build relative indexes into shm array
|
||||||
# TODO: should we just add/use a method
|
# TODO: should we just add/use a method
|
||||||
# on the shm to do this?
|
# on the shm to do this?
|
||||||
ifirst = arr[0]['index']
|
ifirst = arr[0]['index']
|
||||||
slice_view = arr[
|
slice_view = arr[
|
||||||
lbar - ifirst:
|
lbar - ifirst:
|
||||||
(rbar - ifirst) + 1
|
(rbar - ifirst) + 1
|
||||||
]
|
]
|
||||||
|
|
||||||
if not slice_view.size:
|
if not slice_view.size:
|
||||||
mxmn = None
|
return None
|
||||||
|
|
||||||
|
elif self.yrange:
|
||||||
|
mxmn = self.yrange
|
||||||
|
# print(f'{self.name} M4 maxmin: {mxmn}')
|
||||||
|
|
||||||
|
else:
|
||||||
|
if self.is_ohlc:
|
||||||
|
ylow = np.min(slice_view['low'])
|
||||||
|
yhigh = np.max(slice_view['high'])
|
||||||
|
|
||||||
else:
|
else:
|
||||||
if self.is_ohlc:
|
view = slice_view[self.name]
|
||||||
ylow = np.min(slice_view['low'])
|
ylow = np.min(view)
|
||||||
yhigh = np.max(slice_view['high'])
|
yhigh = np.max(view)
|
||||||
|
|
||||||
else:
|
mxmn = ylow, yhigh
|
||||||
view = slice_view[self.name]
|
# print(f'{self.name} MANUAL maxmin: {mxmin}')
|
||||||
ylow = np.min(view)
|
|
||||||
yhigh = np.max(view)
|
|
||||||
|
|
||||||
mxmn = ylow, yhigh
|
# cache result for input range
|
||||||
|
assert mxmn
|
||||||
|
self._mxmns[rkey] = mxmn
|
||||||
|
|
||||||
if mxmn is not None:
|
return mxmn
|
||||||
# cache new mxmn result
|
|
||||||
self._mxmns[rkey] = mxmn
|
|
||||||
|
|
||||||
return mxmn
|
|
||||||
|
|
||||||
def view_range(self) -> tuple[int, int]:
|
def view_range(self) -> tuple[int, int]:
|
||||||
'''
|
'''
|
||||||
|
@ -628,10 +634,13 @@ class Flow(msgspec.Struct): # , frozen=True):
|
||||||
# source data so we clear our path data in prep
|
# source data so we clear our path data in prep
|
||||||
# to generate a new one from original source data.
|
# to generate a new one from original source data.
|
||||||
new_sample_rate = True
|
new_sample_rate = True
|
||||||
showing_src_data = True
|
|
||||||
should_ds = False
|
should_ds = False
|
||||||
should_redraw = True
|
should_redraw = True
|
||||||
|
|
||||||
|
showing_src_data = True
|
||||||
|
# reset yrange to be computed from source data
|
||||||
|
self.yrange = None
|
||||||
|
|
||||||
# MAIN RENDER LOGIC:
|
# MAIN RENDER LOGIC:
|
||||||
# - determine in view data and redraw on range change
|
# - determine in view data and redraw on range change
|
||||||
# - determine downsampling ops if needed
|
# - determine downsampling ops if needed
|
||||||
|
@ -657,6 +666,10 @@ class Flow(msgspec.Struct): # , frozen=True):
|
||||||
|
|
||||||
**rkwargs,
|
**rkwargs,
|
||||||
)
|
)
|
||||||
|
if showing_src_data:
|
||||||
|
# print(f"{self.name} SHOWING SOURCE")
|
||||||
|
# reset yrange to be computed from source data
|
||||||
|
self.yrange = None
|
||||||
|
|
||||||
if not out:
|
if not out:
|
||||||
log.warning(f'{self.name} failed to render!?')
|
log.warning(f'{self.name} failed to render!?')
|
||||||
|
@ -664,6 +677,9 @@ class Flow(msgspec.Struct): # , frozen=True):
|
||||||
|
|
||||||
path, data, reset = out
|
path, data, reset = out
|
||||||
|
|
||||||
|
# if self.yrange:
|
||||||
|
# print(f'flow {self.name} yrange from m4: {self.yrange}')
|
||||||
|
|
||||||
# XXX: SUPER UGGGHHH... without this we get stale cache
|
# XXX: SUPER UGGGHHH... without this we get stale cache
|
||||||
# graphics that don't update until you downsampler again..
|
# graphics that don't update until you downsampler again..
|
||||||
if reset:
|
if reset:
|
||||||
|
@ -1058,6 +1074,7 @@ class Renderer(msgspec.Struct):
|
||||||
# xy-path data transform: convert source data to a format
|
# xy-path data transform: convert source data to a format
|
||||||
# able to be passed to a `QPainterPath` rendering routine.
|
# able to be passed to a `QPainterPath` rendering routine.
|
||||||
if not len(hist):
|
if not len(hist):
|
||||||
|
# XXX: this might be why the profiler only has exits?
|
||||||
return
|
return
|
||||||
|
|
||||||
x_out, y_out, connect = self.format_xy(
|
x_out, y_out, connect = self.format_xy(
|
||||||
|
@ -1144,11 +1161,14 @@ class Renderer(msgspec.Struct):
|
||||||
|
|
||||||
elif should_ds and uppx > 1:
|
elif should_ds and uppx > 1:
|
||||||
|
|
||||||
x_out, y_out = xy_downsample(
|
x_out, y_out, ymn, ymx = xy_downsample(
|
||||||
x_out,
|
x_out,
|
||||||
y_out,
|
y_out,
|
||||||
uppx,
|
uppx,
|
||||||
)
|
)
|
||||||
|
self.flow.yrange = ymn, ymx
|
||||||
|
# print(f'{self.flow.name} post ds: ymn, ymx: {ymn},{ymx}')
|
||||||
|
|
||||||
reset = True
|
reset = True
|
||||||
profiler(f'FULL PATH downsample redraw={should_ds}')
|
profiler(f'FULL PATH downsample redraw={should_ds}')
|
||||||
self._in_ds = True
|
self._in_ds = True
|
||||||
|
|
|
@ -639,20 +639,25 @@ async def open_vlm_displays(
|
||||||
names: list[str],
|
names: list[str],
|
||||||
|
|
||||||
) -> tuple[float, float]:
|
) -> tuple[float, float]:
|
||||||
|
'''
|
||||||
|
Flows "group" maxmin loop; assumes all named flows
|
||||||
|
are in the same co-domain and thus can be sorted
|
||||||
|
as one set.
|
||||||
|
|
||||||
|
Iterates all the named flows and calls the chart
|
||||||
|
api to find their range values and return.
|
||||||
|
|
||||||
|
TODO: really we should probably have a more built-in API
|
||||||
|
for this?
|
||||||
|
|
||||||
|
'''
|
||||||
mx = 0
|
mx = 0
|
||||||
for name in names:
|
for name in names:
|
||||||
|
ymn, ymx = chart.maxmin(name=name)
|
||||||
mxmn = chart.maxmin(name=name)
|
mx = max(mx, ymx)
|
||||||
if mxmn:
|
|
||||||
ymax = mxmn[1]
|
|
||||||
if ymax > mx:
|
|
||||||
mx = ymax
|
|
||||||
|
|
||||||
return 0, mx
|
return 0, mx
|
||||||
|
|
||||||
chart.view.maxmin = partial(multi_maxmin, names=['volume'])
|
|
||||||
|
|
||||||
# TODO: fix the x-axis label issue where if you put
|
# TODO: fix the x-axis label issue where if you put
|
||||||
# the axis on the left it's totally not lined up...
|
# the axis on the left it's totally not lined up...
|
||||||
# show volume units value on LHS (for dinkus)
|
# show volume units value on LHS (for dinkus)
|
||||||
|
@ -776,6 +781,7 @@ async def open_vlm_displays(
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
for name in names:
|
for name in names:
|
||||||
|
|
||||||
if 'dark' in name:
|
if 'dark' in name:
|
||||||
color = dark_vlm_color
|
color = dark_vlm_color
|
||||||
elif 'rate' in name:
|
elif 'rate' in name:
|
||||||
|
|
|
@ -923,6 +923,7 @@ class ChartView(ViewBox):
|
||||||
# XXX: super important to be aware of this.
|
# XXX: super important to be aware of this.
|
||||||
# or not flow.graphics.isVisible()
|
# or not flow.graphics.isVisible()
|
||||||
):
|
):
|
||||||
|
# print(f'skipping {flow.name}')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# pass in no array which will read and render from the last
|
# pass in no array which will read and render from the last
|
||||||
|
|
|
@ -49,12 +49,17 @@ def xy_downsample(
|
||||||
|
|
||||||
x_spacer: float = 0.5,
|
x_spacer: float = 0.5,
|
||||||
|
|
||||||
) -> tuple[np.ndarray, np.ndarray]:
|
) -> tuple[
|
||||||
|
np.ndarray,
|
||||||
|
np.ndarray,
|
||||||
|
float,
|
||||||
|
float,
|
||||||
|
]:
|
||||||
|
|
||||||
# downsample whenever more then 1 pixels per datum can be shown.
|
# downsample whenever more then 1 pixels per datum can be shown.
|
||||||
# always refresh data bounds until we get diffing
|
# always refresh data bounds until we get diffing
|
||||||
# working properly, see above..
|
# working properly, see above..
|
||||||
bins, x, y = ds_m4(
|
bins, x, y, ymn, ymx = ds_m4(
|
||||||
x,
|
x,
|
||||||
y,
|
y,
|
||||||
uppx,
|
uppx,
|
||||||
|
@ -67,7 +72,7 @@ def xy_downsample(
|
||||||
)).flatten()
|
)).flatten()
|
||||||
y = y.flatten()
|
y = y.flatten()
|
||||||
|
|
||||||
return x, y
|
return x, y, ymn, ymx
|
||||||
|
|
||||||
|
|
||||||
@njit(
|
@njit(
|
||||||
|
|
23
setup.py
23
setup.py
|
@ -57,7 +57,6 @@ setup(
|
||||||
# from github currently (see requirements.txt)
|
# from github currently (see requirements.txt)
|
||||||
# 'trimeter', # not released yet..
|
# 'trimeter', # not released yet..
|
||||||
# 'tractor',
|
# 'tractor',
|
||||||
# asyncvnc,
|
|
||||||
|
|
||||||
# brokers
|
# brokers
|
||||||
'asks==2.4.8',
|
'asks==2.4.8',
|
||||||
|
@ -72,34 +71,32 @@ setup(
|
||||||
|
|
||||||
# UI
|
# UI
|
||||||
'PyQt5',
|
'PyQt5',
|
||||||
# 'pyqtgraph', from our fork see reqs.txt
|
'pyqtgraph',
|
||||||
'qdarkstyle >= 3.0.2', # themeing
|
'qdarkstyle >= 3.0.2',
|
||||||
'fuzzywuzzy[speedup]', # fuzzy search
|
# fuzzy search
|
||||||
|
'fuzzywuzzy[speedup]',
|
||||||
|
|
||||||
# tsdbs
|
# tsdbs
|
||||||
# anyio-marketstore # from gh see reqs.txt
|
'pymarketstore',
|
||||||
],
|
],
|
||||||
extras_require={
|
extras_require={
|
||||||
|
|
||||||
|
# serialization
|
||||||
'tsdb': [
|
'tsdb': [
|
||||||
'docker',
|
'docker',
|
||||||
],
|
],
|
||||||
|
|
||||||
},
|
},
|
||||||
tests_require=['pytest'],
|
tests_require=['pytest'],
|
||||||
python_requires=">=3.10",
|
python_requires=">=3.9", # literally for ``datetime.datetime.fromisoformat``...
|
||||||
keywords=[
|
keywords=["async", "trading", "finance", "quant", "charting"],
|
||||||
"async",
|
|
||||||
"trading",
|
|
||||||
"finance",
|
|
||||||
"quant",
|
|
||||||
"charting",
|
|
||||||
],
|
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Development Status :: 3 - Alpha',
|
'Development Status :: 3 - Alpha',
|
||||||
'License :: OSI Approved :: ',
|
'License :: OSI Approved :: ',
|
||||||
'Operating System :: POSIX :: Linux',
|
'Operating System :: POSIX :: Linux',
|
||||||
"Programming Language :: Python :: Implementation :: CPython",
|
"Programming Language :: Python :: Implementation :: CPython",
|
||||||
"Programming Language :: Python :: 3 :: Only",
|
"Programming Language :: Python :: 3 :: Only",
|
||||||
|
"Programming Language :: Python :: 3.9",
|
||||||
"Programming Language :: Python :: 3.10",
|
"Programming Language :: Python :: 3.10",
|
||||||
'Intended Audience :: Financial and Insurance Industry',
|
'Intended Audience :: Financial and Insurance Industry',
|
||||||
'Intended Audience :: Science/Research',
|
'Intended Audience :: Science/Research',
|
||||||
|
|
Loading…
Reference in New Issue