piker/piker/data/_source.py

292 lines
7.3 KiB
Python
Raw Normal View History

2020-11-06 17:23:14 +00:00
# piker: trading gear for hackers
# Copyright (C) 2018-present Tyler Goodlet (in stewardship for piker0)
2020-11-06 17:23:14 +00:00
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
numpy data source coversion helpers.
"""
2022-03-01 00:47:07 +00:00
from __future__ import annotations
2022-02-22 23:16:12 +00:00
from typing import Any
2020-10-22 18:05:35 +00:00
import decimal
import numpy as np
import pandas as pd
2021-08-25 14:41:35 +00:00
from pydantic import BaseModel, validate_arguments
# from numba import from_dtype
ohlc_fields = [
('time', float),
('open', float),
('high', float),
('low', float),
('close', float),
('volume', int),
('bar_wap', float),
]
ohlc_with_index = ohlc_fields.copy()
ohlc_with_index.insert(0, ('index', int))
2020-09-26 18:11:14 +00:00
# our minimum structured array layout for ohlc data
base_iohlc_dtype = np.dtype(ohlc_with_index)
base_ohlc_dtype = np.dtype(ohlc_fields)
# TODO: for now need to construct this manually for readonly arrays, see
# https://github.com/numba/numba/issues/4511
# numba_ohlc_dtype = from_dtype(base_ohlc_dtype)
2020-07-15 12:42:01 +00:00
# map time frame "keys" to minutes values
tf_in_1m = {
'1m': 1,
'5m': 5,
'15m': 15,
'30m': 30,
'1h': 60,
'4h': 240,
'1d': 1440,
}
def mk_fqsn(
provider: str,
symbol: str,
) -> str:
'''
Generate a "fully qualified symbol name" which is
a reverse-hierarchical cross broker/provider symbol
'''
return '.'.join([symbol, provider]).lower()
2020-10-22 18:05:35 +00:00
def float_digits(
value: float,
) -> int:
if value == 0:
return 0
2020-10-22 18:05:35 +00:00
return int(-decimal.Decimal(str(value)).as_tuple().exponent)
2020-06-17 15:44:54 +00:00
def ohlc_zeros(length: int) -> np.ndarray:
"""Construct an OHLC field formatted structarray.
For "why a structarray" see here: https://stackoverflow.com/a/52443038
Bottom line, they're faster then ``np.recarray``.
2020-10-22 18:05:35 +00:00
2020-06-17 15:44:54 +00:00
"""
return np.zeros(length, dtype=base_ohlc_dtype)
2020-06-17 15:44:54 +00:00
def uncons_fqsn(fqsn: str) -> tuple[str, str, str]:
'''
Unpack a fully-qualified-symbol-name to ``tuple``.
'''
# TODO: probably reverse the order of all this XD
tokens = fqsn.split('.')
if len(tokens) > 3:
symbol, venue, suffix, broker = tokens
else:
symbol, venue, broker = tokens
suffix = ''
# head, _, broker = fqsn.rpartition('.')
# symbol, _, suffix = head.rpartition('.')
return (
broker,
'.'.join([symbol, venue]),
suffix,
)
2021-02-06 19:38:00 +00:00
class Symbol(BaseModel):
"""I guess this is some kinda container thing for dealing with
all the different meta-data formats from brokers?
2020-10-22 18:05:35 +00:00
Yah, i guess dats what it izz.
"""
2021-02-06 19:38:00 +00:00
key: str
2022-03-01 00:47:07 +00:00
tick_size: float = 0.01
lot_tick_size: float = 0.0 # "volume" precision as min step value
tick_size_digits: int = 2
lot_size_digits: int = 0
suffix: str = ''
2022-02-22 23:16:12 +00:00
broker_info: dict[str, dict[str, Any]] = {}
2021-02-19 23:43:56 +00:00
# specifies a "class" of financial instrument
# ex. stock, futer, option, bond etc.
2022-03-01 00:47:07 +00:00
# @validate_arguments
@classmethod
def from_broker_info(
cls,
broker: str,
symbol: str,
info: dict[str, Any],
suffix: str = '',
2022-03-01 00:47:07 +00:00
# XXX: like wtf..
# ) -> 'Symbol':
) -> None:
tick_size = info.get('price_tick_size', 0.01)
lot_tick_size = info.get('lot_tick_size', 0.0)
return Symbol(
key=symbol,
tick_size=tick_size,
lot_tick_size=lot_tick_size,
tick_size_digits=float_digits(tick_size),
lot_size_digits=float_digits(lot_tick_size),
suffix=suffix,
2022-03-01 00:47:07 +00:00
broker_info={broker: info},
)
@classmethod
def from_fqsn(
cls,
fqsn: str,
info: dict[str, Any],
# XXX: like wtf..
# ) -> 'Symbol':
) -> None:
broker, key, suffix = uncons_fqsn(fqsn)
return cls.from_broker_info(
broker,
key,
info=info,
suffix=suffix,
)
2022-03-01 00:47:07 +00:00
@property
def type_key(self) -> str:
return list(self.broker_info.values())[0]['asset_type']
@property
2022-02-22 23:16:12 +00:00
def brokers(self) -> list[str]:
return list(self.broker_info.keys())
def nearest_tick(self, value: float) -> float:
'''
Return the nearest tick value based on mininum increment.
'''
mult = 1 / self.tick_size
return round(value * mult) / mult
2022-01-11 21:20:26 +00:00
def front_feed(self) -> tuple[str, str]:
'''
Return the "current" feed key for this symbol.
(i.e. the broker + symbol key in a tuple).
'''
return (
list(self.broker_info.keys())[0],
self.key,
)
def front_fqsn(self) -> str:
broker, key = self.front_feed()
if self.suffix:
tokens = (key, self.suffix, broker)
else:
tokens = (key, broker)
fqsn = '.'.join(tokens)
return fqsn
def iterfqsns(self) -> list[str]:
keys = []
for broker in self.broker_info.keys():
fqsn = mk_fqsn(self.key, broker)
if self.suffix:
fqsn += f'.{self.suffix}'
keys.append(fqsn)
return keys
2021-09-10 15:35:00 +00:00
def from_df(
2021-10-22 17:03:12 +00:00
df: pd.DataFrame,
source=None,
default_tf=None
2021-10-22 17:03:12 +00:00
2020-06-18 02:58:54 +00:00
) -> np.recarray:
"""Convert OHLC formatted ``pandas.DataFrame`` to ``numpy.recarray``.
2020-10-22 18:05:35 +00:00
"""
df.reset_index(inplace=True)
2020-07-08 19:41:14 +00:00
# hackery to convert field names
date = 'Date'
if 'date' in df.columns:
date = 'date'
# convert to POSIX time
df[date] = [d.timestamp() for d in df[date]]
# try to rename from some camel case
2020-07-08 19:41:14 +00:00
columns = {
'Date': 'time',
2020-07-08 19:41:14 +00:00
'date': 'time',
'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
'Volume': 'volume',
# most feeds are providing this over sesssion anchored
'vwap': 'bar_wap',
# XXX: ib_insync calls this the "wap of the bar"
# but no clue what is actually is...
# https://github.com/pikers/piker/issues/119#issuecomment-729120988
'average': 'bar_wap',
}
2020-07-08 19:41:14 +00:00
df = df.rename(columns=columns)
for name in df.columns:
# if name not in base_ohlc_dtype.names[1:]:
if name not in base_ohlc_dtype.names:
del df[name]
2020-07-08 19:41:14 +00:00
# TODO: it turns out column access on recarrays is actually slower:
# https://jakevdp.github.io/PythonDataScienceHandbook/02.09-structured-data-numpy.html#RecordArrays:-Structured-Arrays-with-a-Twist
# it might make sense to make these structured arrays?
array = df.to_records(index=False)
_nan_to_closest_num(array)
return array
def _nan_to_closest_num(array: np.ndarray):
"""Return interpolated values instead of NaN.
2020-10-22 18:05:35 +00:00
"""
for col in ['open', 'high', 'low', 'close']:
mask = np.isnan(array[col])
if not mask.size:
continue
array[col][mask] = np.interp(
np.flatnonzero(mask), np.flatnonzero(~mask), array[col][~mask]
)