Limit ohlc queries to 800k datums to avoid `purepc` size error

l1_precision_fix
Tyler Goodlet 2022-04-08 11:47:22 -04:00
parent ce3229df7d
commit 15630f465d
1 changed files with 17 additions and 5 deletions

View File

@ -258,16 +258,29 @@ class Storage:
if fqsn not in syms: if fqsn not in syms:
return {} return {}
tfstr = tf_in_1s[1]
params = Params(
symbols=fqsn,
timeframe=tfstr,
attrgroup='OHLCV',
# limit_from_start=True,
# TODO: figure the max limit here given the
# ``purepc`` msg size limit of purerpc: 33554432
limit=int(800e3),
)
if timeframe is None: if timeframe is None:
log.info(f'starting {fqsn} tsdb granularity scan..') log.info(f'starting {fqsn} tsdb granularity scan..')
# loop through and try to find highest granularity # loop through and try to find highest granularity
for tfstr in tf_in_1s.values(): for tfstr in tf_in_1s.values():
try: try:
log.info(f'querying for {tfstr}@{fqsn}') log.info(f'querying for {tfstr}@{fqsn}')
result = await client.query( params.set('timeframe', tfstr)
Params(fqsn, tfstr, 'OHLCV',) result = await client.query(params)
)
break break
except purerpc.grpclib.exceptions.UnknownError: except purerpc.grpclib.exceptions.UnknownError:
# XXX: this is already logged by the container and # XXX: this is already logged by the container and
# thus shows up through `marketstored` logs relay. # thus shows up through `marketstored` logs relay.
@ -277,8 +290,7 @@ class Storage:
return {} return {}
else: else:
tfstr = tf_in_1s[timeframe] result = await client.query(params)
result = await client.query(Params(fqsn, tfstr, 'OHLCV',))
# TODO: it turns out column access on recarrays is actually slower: # TODO: it turns out column access on recarrays is actually slower:
# https://jakevdp.github.io/PythonDataScienceHandbook/02.09-structured-data-numpy.html#RecordArrays:-Structured-Arrays-with-a-Twist # https://jakevdp.github.io/PythonDataScienceHandbook/02.09-structured-data-numpy.html#RecordArrays:-Structured-Arrays-with-a-Twist