diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 65b020f1..89c43132 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -43,16 +43,21 @@ jobs:
- name: Checkout
uses: actions/checkout@v3
- - name: Build DB container
- run: docker build -t piker:elastic dockering/elastic
+ # elastic only
+ # - name: Build DB container
+ # run: docker build -t piker:elastic dockering/elastic
- name: Setup python
- uses: actions/setup-python@v3
+ uses: actions/setup-python@v4
with:
python-version: '3.10'
+ # elastic only
+ # - name: Install dependencies
+ # run: pip install -U .[es] -r requirements-test.txt -r requirements.txt --upgrade-strategy eager
+
- name: Install dependencies
- run: pip install -U .[es] -r requirements-test.txt -r requirements.txt --upgrade-strategy eager
+ run: pip install -U . -r requirements-test.txt -r requirements.txt --upgrade-strategy eager
- name: Test suite
run: pytest tests -rs
diff --git a/config/brokers.toml b/config/brokers.toml
index bb57c78d..7205d82c 100644
--- a/config/brokers.toml
+++ b/config/brokers.toml
@@ -1,19 +1,32 @@
[questrade]
-refresh_token = ""
-access_token = ""
-api_server = "https://api06.iq.questrade.com/"
+refresh_token = ''
+access_token = ''
+api_server = 'https://api06.iq.questrade.com/'
expires_in = 1800
-token_type = "Bearer"
+token_type = 'Bearer'
expires_at = 1616095326.355846
+
+[deribit]
+key_id = ''
+key_secret = ''
+
+
[kraken]
-key_descr = "api_0"
-api_key = ""
-secret = ""
+key_descr = ''
+api_key = ''
+secret = ''
+
+
+[kucoin]
+key_id = ''
+key_secret = ''
+key_passphrase = ''
+
[ib]
hosts = [
- "127.0.0.1",
+ '127.0.0.1',
]
# XXX: the order in which ports will be scanned
# (by the `brokerd` daemon-actor)
@@ -30,8 +43,8 @@ ports = [
# is not supported so you have to manually download
# and XML report and put it in a location that can be
# accessed by the ``brokerd.ib`` backend code for parsing.
-flex_token = '666666666666666666666666'
-flex_trades_query_id = '666666' # live account
+flex_token = ''
+flex_trades_query_id = '' # live account
# when clients are being scanned this determines
# which clients are preferred to be used for data
@@ -47,11 +60,6 @@ prefer_data_account = [
# the order in which accounts will be selectable
# in the order mode UI (if found via clients during
# API-app scanning)when a new symbol is loaded.
-paper = "XX0000000"
-margin = "X0000000"
-ira = "X0000000"
-
-
-[deribit]
-key_id = 'XXXXXXXX'
-key_secret = 'Xx_XxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXx'
+paper = 'XX0000000'
+margin = 'X0000000'
+ira = 'X0000000'
diff --git a/config/conf.toml b/config/conf.toml
new file mode 100644
index 00000000..6dde7ee6
--- /dev/null
+++ b/config/conf.toml
@@ -0,0 +1,4 @@
+[network]
+tsdb.backend = 'marketstore'
+tsdb.host = 'localhost'
+tsdb.grpc_port = 5995
diff --git a/dockering/ib/docker-compose.yml b/dockering/ib/docker-compose.yml
index 8c676623..7e6fd05f 100644
--- a/dockering/ib/docker-compose.yml
+++ b/dockering/ib/docker-compose.yml
@@ -2,8 +2,21 @@
# https://github.com/waytrade/ib-gateway-docker/blob/master/docker-compose.yml
version: "3.5"
+
services:
+
ib_gw_paper:
+
+ # apparently java is a mega cukc:
+ # https://stackoverflow.com/a/56895801
+ # https://bugs.openjdk.org/browse/JDK-8150460
+ ulimits:
+ # nproc: 65535
+ nproc: 6000
+ nofile:
+ soft: 2000
+ hard: 3000
+
# other image tags available:
# https://github.com/waytrade/ib-gateway-docker#supported-tags
# image: waytrade/ib-gateway:981.3j
diff --git a/piker/_cacheables.py b/piker/_cacheables.py
index ba7361c3..9be4d079 100644
--- a/piker/_cacheables.py
+++ b/piker/_cacheables.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -14,14 +14,20 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-"""
+'''
Cacheing apis and toolz.
-"""
+'''
from collections import OrderedDict
from contextlib import (
- asynccontextmanager,
+ asynccontextmanager as acm,
+)
+from typing import (
+ Awaitable,
+ Callable,
+ ParamSpec,
+ TypeVar,
)
from tractor.trionics import maybe_open_context
@@ -32,19 +38,54 @@ from .log import get_logger
log = get_logger(__name__)
+T = TypeVar("T")
+P = ParamSpec("P")
-def async_lifo_cache(maxsize=128):
- """Async ``cache`` with a LIFO policy.
+
+# TODO: move this to `tractor.trionics`..
+# - egs. to replicate for tests: https://github.com/aio-libs/async-lru#usage
+# - their suite as well:
+# https://github.com/aio-libs/async-lru/tree/master/tests
+# - asked trio_util about it too:
+# https://github.com/groove-x/trio-util/issues/21
+def async_lifo_cache(
+ maxsize=128,
+
+ # NOTE: typing style was learned from:
+ # https://stackoverflow.com/a/71132186
+) -> Callable[
+ Callable[P, Awaitable[T]],
+ Callable[
+ Callable[P, Awaitable[T]],
+ Callable[P, Awaitable[T]],
+ ],
+]:
+ '''
+ Async ``cache`` with a LIFO policy.
Implemented my own since no one else seems to have
a standard. I'll wait for the smarter people to come
up with one, but until then...
- """
+
+ NOTE: when decorating, due to this simple/naive implementation, you
+ MUST call the decorator like,
+
+ .. code:: python
+
+ @async_lifo_cache()
+ async def cache_target():
+
+ '''
cache = OrderedDict()
- def decorator(fn):
+ def decorator(
+ fn: Callable[P, Awaitable[T]],
+ ) -> Callable[P, Awaitable[T]]:
- async def wrapper(*args):
+ async def decorated(
+ *args: P.args,
+ **kwargs: P.kwargs,
+ ) -> T:
key = args
try:
return cache[key]
@@ -53,16 +94,20 @@ def async_lifo_cache(maxsize=128):
# discard last added new entry
cache.popitem()
- # do it
- cache[key] = await fn(*args)
+ # call underlying
+ cache[key] = await fn(
+ *args,
+ **kwargs,
+ )
return cache[key]
- return wrapper
+ return decorated
return decorator
-@asynccontextmanager
+# TODO: move this to `.brokers.utils`..
+@acm
async def open_cached_client(
brokername: str,
) -> 'Client': # noqa
diff --git a/piker/accounting/__init__.py b/piker/accounting/__init__.py
new file mode 100644
index 00000000..778bdd4e
--- /dev/null
+++ b/piker/accounting/__init__.py
@@ -0,0 +1,115 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+"Accounting for degens": count dem numberz that tracks how much you got
+for tendiez.
+
+'''
+from ..log import get_logger
+
+from ._ledger import (
+ iter_by_dt,
+ Transaction,
+ TransactionLedger,
+ open_trade_ledger,
+)
+from ._pos import (
+ load_pps_from_ledger,
+ open_pps,
+ Position,
+ PpTable,
+)
+from ._mktinfo import (
+ Asset,
+ dec_digits,
+ digits_to_dec,
+ MktPair,
+ Symbol,
+ unpack_fqme,
+)
+from ._allocate import (
+ mk_allocator,
+ Allocator,
+)
+
+log = get_logger(__name__)
+
+__all__ = [
+ 'Allocator',
+ 'Asset',
+ 'MktPair',
+ 'Position',
+ 'PpTable',
+ 'Symbol',
+ 'Transaction',
+ 'TransactionLedger',
+ 'dec_digits',
+ 'digits_to_dec',
+ 'iter_by_dt',
+ 'load_pps_from_ledger',
+ 'mk_allocator',
+ 'open_pps',
+ 'open_trade_ledger',
+ 'unpack_fqme',
+]
+
+
+def get_likely_pair(
+ src: str,
+ dst: str,
+ bs_mktid: str,
+
+) -> str | None:
+ '''
+ Attempt to get the likely trading pair matching a given destination
+ asset `dst: str`.
+
+ '''
+ try:
+ src_name_start = bs_mktid.rindex(src)
+ except (
+ ValueError, # substr not found
+ ):
+ # TODO: handle nested positions..(i.e.
+ # positions where the src fiat was used to
+ # buy some other dst which was furhter used
+ # to buy another dst..)
+ # log.warning(
+ # f'No src fiat {src} found in {bs_mktid}?'
+ # )
+ return
+
+ likely_dst = bs_mktid[:src_name_start]
+ if likely_dst == dst:
+ return bs_mktid
+
+
+if __name__ == '__main__':
+ import sys
+ from pprint import pformat
+
+ args = sys.argv
+ assert len(args) > 1, 'Specifiy account(s) from `brokers.toml`'
+ args = args[1:]
+ for acctid in args:
+ broker, name = acctid.split('.')
+ trans, updated_pps = load_pps_from_ledger(broker, name)
+ print(
+ f'Processing transactions into pps for {broker}:{acctid}\n'
+ f'{pformat(trans)}\n\n'
+ f'{pformat(updated_pps)}'
+ )
diff --git a/piker/clearing/_allocate.py b/piker/accounting/_allocate.py
similarity index 84%
rename from piker/clearing/_allocate.py
rename to piker/accounting/_allocate.py
index d201368d..b4345785 100644
--- a/piker/clearing/_allocate.py
+++ b/piker/accounting/_allocate.py
@@ -23,9 +23,9 @@ from typing import Optional
from bidict import bidict
-from ..data._source import Symbol
+from ._pos import Position
+from . import MktPair
from ..data.types import Struct
-from ..pp import Position
_size_units = bidict({
@@ -42,7 +42,7 @@ SizeUnit = Enum(
class Allocator(Struct):
- symbol: Symbol
+ mkt: MktPair
# TODO: if we ever want ot support non-uniform entry-slot-proportion
# "sizes"
@@ -114,8 +114,8 @@ class Allocator(Struct):
depending on position / order entry config.
'''
- sym = self.symbol
- ld = sym.lot_size_digits
+ mkt: MktPair = self.mkt
+ ld: int = mkt.size_tick_digits
size_unit = self.size_unit
live_size = live_pp.size
@@ -125,13 +125,13 @@ class Allocator(Struct):
u_per_slot, currency_per_slot = self.step_sizes()
if size_unit == 'units':
- slot_size = u_per_slot
- l_sub_pp = self.units_limit - abs_live_size
+ slot_size: float = u_per_slot
+ l_sub_pp: float = self.units_limit - abs_live_size
elif size_unit == 'currency':
- live_cost_basis = abs_live_size * live_pp.ppu
- slot_size = currency_per_slot / price
- l_sub_pp = (self.currency_limit - live_cost_basis) / price
+ live_cost_basis: float = abs_live_size * live_pp.ppu
+ slot_size: float = currency_per_slot / price
+ l_sub_pp: float = (self.currency_limit - live_cost_basis) / price
else:
raise ValueError(
@@ -141,8 +141,14 @@ class Allocator(Struct):
# an entry (adding-to or starting a pp)
if (
live_size == 0
- or (action == 'buy' and live_size > 0)
- or action == 'sell' and live_size < 0
+ or (
+ action == 'buy'
+ and live_size > 0
+ )
+ or (
+ action == 'sell'
+ and live_size < 0
+ )
):
order_size = min(
slot_size,
@@ -178,7 +184,7 @@ class Allocator(Struct):
order_size = max(slotted_pp, slot_size)
if (
- abs_live_size < slot_size or
+ abs_live_size < slot_size
# NOTE: front/back "loading" heurstic:
# if the remaining pp is in between 0-1.5x a slot's
@@ -187,14 +193,17 @@ class Allocator(Struct):
# **without** going past a net-zero pp. if the pp is
# > 1.5x a slot size, then front load: exit a slot's and
# expect net-zero to be acquired on the final exit.
- slot_size < pp_size < round((1.5*slot_size), ndigits=ld) or
+ or slot_size < pp_size < round((1.5*slot_size), ndigits=ld)
+ or (
- # underlying requires discrete (int) units (eg. stocks)
- # and thus our slot size (based on our limit) would
- # exit a fractional unit's worth so, presuming we aren't
- # supporting a fractional-units-style broker, we need
- # exit the final unit.
- ld == 0 and abs_live_size == 1
+ # underlying requires discrete (int) units (eg. stocks)
+ # and thus our slot size (based on our limit) would
+ # exit a fractional unit's worth so, presuming we aren't
+ # supporting a fractional-units-style broker, we need
+ # exit the final unit.
+ ld == 0
+ and abs_live_size == 1
+ )
):
order_size = abs_live_size
@@ -203,13 +212,14 @@ class Allocator(Struct):
# compute a fractional slots size to display
slots_used = self.slots_used(
Position(
- symbol=sym,
+ mkt=mkt,
size=order_size,
ppu=price,
- bsuid=sym,
+ bs_mktid=mkt.bs_mktid,
)
)
+ # TODO: render an actual ``Executable`` type here?
return {
'size': abs(round(order_size, ndigits=ld)),
'size_digits': ld,
@@ -249,7 +259,7 @@ class Allocator(Struct):
def mk_allocator(
- symbol: Symbol,
+ mkt: MktPair,
startup_pp: Position,
# default allocation settings
@@ -276,6 +286,6 @@ def mk_allocator(
defaults.update(user_def)
return Allocator(
- symbol=symbol,
+ mkt=mkt,
**defaults,
)
diff --git a/piker/accounting/_ledger.py b/piker/accounting/_ledger.py
new file mode 100644
index 00000000..5107f2bb
--- /dev/null
+++ b/piker/accounting/_ledger.py
@@ -0,0 +1,295 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+Trade and transaction ledger processing.
+
+'''
+from __future__ import annotations
+from collections import UserDict
+from contextlib import contextmanager as cm
+from pathlib import Path
+from typing import (
+ Any,
+ Callable,
+ Iterator,
+ Union,
+ Generator
+)
+
+from pendulum import (
+ datetime,
+ DateTime,
+ from_timestamp,
+ parse,
+)
+import tomli_w # for fast ledger writing
+
+from .. import config
+from ..data.types import Struct
+from ..log import get_logger
+from ._mktinfo import (
+ Symbol, # legacy
+ MktPair,
+ Asset,
+)
+
+log = get_logger(__name__)
+
+
+class Transaction(Struct, frozen=True):
+
+ # TODO: unify this with the `MktPair`,
+ # once we have that as a required field,
+ # we don't really need the fqme any more..
+ fqme: str
+
+ tid: Union[str, int] # unique transaction id
+ size: float
+ price: float
+ cost: float # commisions or other additional costs
+ dt: datetime
+
+ # TODO: we can drop this right since we
+ # can instead expect the backend to provide this
+ # via the `MktPair`?
+ expiry: datetime | None = None
+
+ # TODO: drop the Symbol type, construct using
+ # t.sys (the transaction system)
+
+ # the underlying "transaction system", normally one of a ``MktPair``
+ # (a description of a tradable double auction) or a ledger-recorded
+ # ("ledger" in any sense as long as you can record transfers) of any
+ # sort) ``Asset``.
+ sym: MktPair | Asset | Symbol | None = None
+
+ @property
+ def sys(self) -> Symbol:
+ return self.sym
+
+ # (optional) key-id defined by the broker-service backend which
+ # ensures the instrument-symbol market key for this record is unique
+ # in the "their backend/system" sense; i.e. this uid for the market
+ # as defined (internally) in some namespace defined by the broker
+ # service.
+ bs_mktid: str | int | None = None
+
+ def to_dict(self) -> dict:
+ dct = super().to_dict()
+
+ # TODO: switch to sys!
+ dct.pop('sym')
+
+ # ensure we use a pendulum formatted
+ # ISO style str here!@
+ dct['dt'] = str(self.dt)
+ return dct
+
+
+class TransactionLedger(UserDict):
+ '''
+ Very simple ``dict`` wrapper + ``pathlib.Path`` handle to
+ a TOML formatted transaction file for enabling file writes
+ dynamically whilst still looking exactly like a ``dict`` from the
+ outside.
+
+ '''
+ def __init__(
+ self,
+ ledger_dict: dict,
+ file_path: Path,
+ tx_sort: Callable,
+
+ ) -> None:
+ self.file_path = file_path
+ self.tx_sort = tx_sort
+ super().__init__(ledger_dict)
+
+ def update_from_t(
+ self,
+ t: Transaction,
+ ) -> None:
+ self.data[t.tid] = t.to_dict()
+
+ def iter_trans(
+ self,
+ mkt_by_fqme: dict[str, MktPair],
+ broker: str = 'paper',
+
+ ) -> Generator[
+ tuple[str, Transaction],
+ None,
+ None,
+ ]:
+ '''
+ Deliver trades records in ``(key: str, t: Transaction)``
+ form via generator.
+
+ '''
+ if broker != 'paper':
+ raise NotImplementedError('Per broker support not dun yet!')
+
+ # TODO: lookup some standard normalizer
+ # func in the backend?
+ # from ..brokers import get_brokermod
+ # mod = get_brokermod(broker)
+ # trans_dict = mod.norm_trade_records(self.data)
+
+ # NOTE: instead i propose the normalizer is
+ # a one shot routine (that can be lru cached)
+ # and instead call it for each entry incrementally:
+ # normer = mod.norm_trade_record(txdict)
+
+ # TODO: use tx_sort here yah?
+ for tid, txdict in self.data.items():
+ # special field handling for datetimes
+ # to ensure pendulum is used!
+ fqme = txdict.get('fqme') or txdict['fqsn']
+ dt = parse(txdict['dt'])
+ expiry = txdict.get('expiry')
+
+ mkt = mkt_by_fqme.get(fqme)
+ if not mkt:
+ # we can't build a trans if we don't have
+ # the ``.sys: MktPair`` info, so skip.
+ continue
+
+ tx = Transaction(
+ fqme=fqme,
+ tid=txdict['tid'],
+ dt=dt,
+ price=txdict['price'],
+ size=txdict['size'],
+ cost=txdict.get('cost', 0),
+ bs_mktid=txdict['bs_mktid'],
+
+ # TODO: change to .sys!
+ sym=mkt,
+ expiry=parse(expiry) if expiry else None,
+ )
+ yield tid, tx
+
+ def to_trans(
+ self,
+ **kwargs,
+
+ ) -> dict[str, Transaction]:
+ '''
+ Return entire output from ``.iter_trans()`` in a ``dict``.
+
+ '''
+ return dict(self.iter_trans(**kwargs))
+
+ def write_config(
+ self,
+
+ ) -> None:
+ '''
+ Render the self.data ledger dict to it's TOML file form.
+
+ '''
+ cpy = self.data.copy()
+ towrite: dict[str, Any] = {}
+ for tid, trans in cpy.items():
+
+ # drop key for non-expiring assets
+ txdict = towrite[tid] = self.data[tid]
+ if (
+ 'expiry' in txdict
+ and txdict['expiry'] is None
+ ):
+ txdict.pop('expiry')
+
+ # re-write old acro-key
+ fqme = txdict.get('fqsn')
+ if fqme:
+ txdict['fqme'] = fqme
+
+ with self.file_path.open(mode='wb') as fp:
+ tomli_w.dump(towrite, fp)
+
+
+def iter_by_dt(
+ records: dict[str, Any],
+
+ # NOTE: parsers are looked up in the insert order
+ # so if you know that the record stats show some field
+ # is more common then others, stick it at the top B)
+ parsers: dict[tuple[str], Callable] = {
+ 'dt': None, # parity case
+ 'datetime': parse, # datetime-str
+ 'time': from_timestamp, # float epoch
+ },
+ key: Callable | None = None,
+
+) -> Iterator[tuple[str, dict]]:
+ '''
+ Iterate entries of a ``records: dict`` table sorted by entry recorded
+ datetime presumably set at the ``'dt'`` field in each entry.
+
+ '''
+ def dyn_parse_to_dt(
+ pair: tuple[str, dict],
+ ) -> DateTime:
+ _, txdict = pair
+ k, v, parser = next(
+ (k, txdict[k], parsers[k]) for k in parsers if k in txdict
+ )
+
+ return parser(v) if parser else v
+
+ for tid, data in sorted(
+ records.items(),
+ key=key or dyn_parse_to_dt,
+ ):
+ yield tid, data
+
+
+@cm
+def open_trade_ledger(
+ broker: str,
+ account: str,
+
+ # default is to sort by detected datetime-ish field
+ tx_sort: Callable = iter_by_dt,
+
+) -> Generator[dict, None, None]:
+ '''
+ Indempotently create and read in a trade log file from the
+ ``/ledgers/`` directory.
+
+ Files are named per broker account of the form
+ ``_.toml``. The ``accountname`` here is the
+ name as defined in the user's ``brokers.toml`` config.
+
+ '''
+ ledger_dict, fpath = config.load_ledger(broker, account)
+ cpy = ledger_dict.copy()
+ ledger = TransactionLedger(
+ ledger_dict=cpy,
+ file_path=fpath,
+ tx_sort=tx_sort,
+ )
+ try:
+ yield ledger
+ finally:
+ if ledger.data != ledger_dict:
+
+ # TODO: show diff output?
+ # https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
+ log.info(f'Updating ledger for {fpath}:\n')
+ ledger.write_config()
diff --git a/piker/accounting/_mktinfo.py b/piker/accounting/_mktinfo.py
new file mode 100644
index 00000000..046195ca
--- /dev/null
+++ b/piker/accounting/_mktinfo.py
@@ -0,0 +1,668 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+Market (pair) meta-info layer: sane addressing semantics and meta-data
+for cross-provider marketplaces.
+
+We intoduce the concept of,
+
+- a FQMA: fully qualified market address,
+- a sane schema for FQMAs including derivatives,
+- a msg-serializeable description of markets for
+ easy sharing with other pikers B)
+
+'''
+from __future__ import annotations
+from decimal import (
+ Decimal,
+ ROUND_HALF_EVEN,
+)
+from typing import (
+ Any,
+ Literal,
+)
+
+from ..data.types import Struct
+
+
+_underlyings: list[str] = [
+ 'stock',
+ 'bond',
+ 'crypto',
+ 'fiat',
+ 'commodity',
+]
+
+
+_derivs: list[str] = [
+ 'swap',
+ 'future',
+ 'continuous_future',
+ 'option',
+ 'futures_option',
+
+ # if we can't figure it out, presume the worst XD
+ 'unknown',
+]
+
+# NOTE: a tag for other subsystems to try
+# and do default settings for certain things:
+# - allocator does unit vs. dolla size limiting.
+AssetTypeName: Literal[
+ _underlyings
+ +
+ _derivs
+]
+
+# egs. stock, futer, option, bond etc.
+
+
+def dec_digits(
+ value: float | str | Decimal,
+
+) -> int:
+ '''
+ Return the number of precision digits read from a decimal or float
+ value.
+
+ '''
+ if value == 0:
+ return 0
+
+ return int(
+ -Decimal(str(value)).as_tuple().exponent
+ )
+
+
+float_digits = dec_digits
+
+
+def digits_to_dec(
+ ndigits: int,
+) -> Decimal:
+ '''
+ Return the minimum float value for an input integer value.
+
+ eg. 3 -> 0.001
+
+ '''
+ if ndigits == 0:
+ return Decimal('0')
+
+ return Decimal('0.' + '0'*(ndigits-1) + '1')
+
+
+class Asset(Struct, frozen=True):
+ '''
+ Container type describing any transactable asset and its
+ contract-like and/or underlying technology meta-info.
+
+ '''
+ name: str
+ atype: str # AssetTypeName
+
+ # minimum transaction size / precision.
+ # eg. for buttcoin this is a "satoshi".
+ tx_tick: Decimal
+
+ # NOTE: additional info optionally packed in by the backend, but
+ # should not be explicitly required in our generic API.
+ info: dict = {} # make it frozen?
+
+ # TODO?
+ # _to_dict_skip = {'info'}
+
+ def __str__(self) -> str:
+ return self.name
+
+ def quantize(
+ self,
+ size: float,
+
+ ) -> Decimal:
+ '''
+ Truncate input ``size: float`` using ``Decimal``
+ quantized form of the digit precision defined
+ by ``self.lot_tick_size``.
+
+ '''
+ digits = float_digits(self.tx_tick)
+ return Decimal(size).quantize(
+ Decimal(f'1.{"0".ljust(digits, "0")}'),
+ rounding=ROUND_HALF_EVEN
+ )
+
+ @classmethod
+ def guess_from_mkt_ep_key(
+ cls,
+ mkt_ep_key: str,
+ atype: str | None = None,
+
+ ) -> Asset:
+ '''
+ A hacky guess method for presuming a (target) asset's properties
+ based on either the actualy market endpoint key, or config settings
+ from the user.
+
+ '''
+ atype = atype or 'unknown'
+
+ # attempt to strip off any source asset
+ # via presumed syntax of:
+ # - /
+ # - .
+ # - etc.
+ for char in ['/', '.']:
+ dst, _, src = mkt_ep_key.partition(char)
+ if src:
+ if not atype:
+ atype = 'fiat'
+ break
+
+ return Asset(
+ name=dst,
+ atype=atype,
+ tx_tick=Decimal('0.01'),
+ )
+
+
+def maybe_cons_tokens(
+ tokens: list[Any],
+ delim_char: str = '.',
+) -> str:
+ '''
+ Construct `str` output from a maybe-concatenation of input
+ sequence of elements in ``tokens``.
+
+ '''
+ return delim_char.join(filter(bool, tokens)).lower()
+
+
+class MktPair(Struct, frozen=True):
+ '''
+ Market description for a pair of assets which are tradeable:
+ a market which enables transactions of the form,
+ buy: source asset -> destination asset
+ sell: destination asset -> source asset
+
+ The main intention of this type is for a **simple** cross-asset
+ venue/broker normalized descrption type from which all
+ market-auctions can be mapped from FQME identifiers.
+
+ TODO: our eventual target fqme format/schema is:
+ /.... -> ..
+ ^ -- optional tokens ------------------------------- ^
+
+ '''
+ dst: str | Asset
+ # "destination asset" (name) used to buy *to*
+ # (or used to sell *from*)
+
+ price_tick: Decimal # minimum price increment
+ size_tick: Decimal # minimum size (aka vlm) increment
+ # the tick size is the number describing the smallest step in value
+ # available in this market between the source and destination
+ # assets.
+ # https://en.wikipedia.org/wiki/Tick_size
+ # https://en.wikipedia.org/wiki/Commodity_tick
+ # https://en.wikipedia.org/wiki/Percentage_in_point
+
+ # unique "broker id" since every market endpoint provider
+ # has their own nomenclature and schema for market maps.
+ bs_mktid: str
+ broker: str # the middle man giving access
+
+ # NOTE: to start this field is optional but should eventually be
+ # required; the reason is for backward compat since more positioning
+ # calculations were not originally stored with a src asset..
+
+ src: str | Asset = ''
+ # "source asset" (name) used to buy *from*
+ # (or used to sell *to*).
+
+ venue: str = '' # market venue provider name
+ expiry: str = '' # for derivs, expiry datetime parseable str
+
+ # destination asset's financial type/classification name
+ # NOTE: this is required for the order size allocator system,
+ # since we use different default settings based on the type
+ # of the destination asset, eg. futes use a units limits vs.
+ # equities a $limit.
+ # dst_type: AssetTypeName | None = None
+
+ # source asset's financial type/classification name
+ # TODO: is a src type required for trading?
+ # there's no reason to need any more then the one-way alloc-limiter
+ # config right?
+ # src_type: AssetTypeName
+
+ # for derivs, info describing contract, egs.
+ # strike price, call or put, swap type, exercise model, etc.
+ contract_info: list[str] | None = None
+
+ _atype: str = ''
+
+ # NOTE: when cast to `str` return fqme
+ def __str__(self) -> str:
+ return self.fqme
+
+ @classmethod
+ def from_msg(
+ cls,
+ msg: dict[str, Any],
+
+ ) -> MktPair:
+ '''
+ Constructor for a received msg-dict normally received over IPC.
+
+ '''
+ dst_asset_msg = msg.pop('dst')
+ src_asset_msg = msg.pop('src')
+
+ if isinstance(dst_asset_msg, str):
+ src: str = str(src_asset_msg)
+ assert isinstance(src, str)
+ return cls.from_fqme(
+ dst_asset_msg,
+ src=src,
+ **msg,
+ )
+
+ else:
+ # NOTE: we call `.copy()` here to ensure
+ # type casting!
+ dst = Asset(**dst_asset_msg).copy()
+ if not isinstance(src_asset_msg, str):
+ src = Asset(**src_asset_msg).copy()
+ else:
+ src = str(src_asset_msg)
+
+ return cls(
+ dst=dst,
+ src=src,
+ **msg,
+ # XXX NOTE: ``msgspec`` can encode `Decimal`
+ # but it doesn't decide to it by default since
+ # we aren't spec-cing these msgs as structs, SO
+ # we have to ensure we do a struct type case (which `.copy()`
+ # does) to ensure we get the right type!
+ ).copy()
+
+ @property
+ def resolved(self) -> bool:
+ return isinstance(self.dst, Asset)
+
+ @classmethod
+ def from_fqme(
+ cls,
+ fqme: str,
+
+ price_tick: float | str,
+ size_tick: float | str,
+ bs_mktid: str,
+
+ broker: str | None = None,
+ **kwargs,
+
+ ) -> MktPair:
+
+ _fqme: str = fqme
+ if (
+ broker
+ and broker not in fqme
+ ):
+ _fqme = f'{fqme}.{broker}'
+
+ broker, mkt_ep_key, venue, suffix = unpack_fqme(_fqme)
+ dst: Asset = Asset.guess_from_mkt_ep_key(
+ mkt_ep_key,
+ atype=kwargs.get('_atype'),
+ )
+
+ # XXX: loading from a fqme string will
+ # leave this pair as "un resolved" meaning
+ # we don't yet have `.dst` set as an `Asset`
+ # which we expect to be filled in by some
+ # backend client with access to that data-info.
+ return cls(
+ # XXX: not resolved to ``Asset`` :(
+ dst=dst,
+
+ broker=broker,
+ venue=venue,
+ # XXX NOTE: we presume this token
+ # if the expiry for now!
+ expiry=suffix,
+
+ price_tick=price_tick,
+ size_tick=size_tick,
+ bs_mktid=bs_mktid,
+
+ **kwargs,
+
+ ).copy()
+
+ @property
+ def key(self) -> str:
+ '''
+ The "endpoint key" for this market.
+
+ '''
+ return self.pair
+
+ def pair(
+ self,
+ delim_char: str | None = None,
+ ) -> str:
+ '''
+ The "endpoint asset pair key" for this market.
+ Eg. mnq/usd or btc/usdt or xmr/btc
+
+ In most other tina platforms this is referred to as the
+ "symbol".
+
+ '''
+ return maybe_cons_tokens(
+ [str(self.dst),
+ str(self.src)],
+ # TODO: make the default '/'
+ delim_char=delim_char or '',
+ )
+
+ @property
+ def suffix(self) -> str:
+ '''
+ The "contract suffix" for this market.
+
+ Eg. mnq/usd.20230616.cme.ib
+ ^ ----- ^
+ or tsla/usd.20230324.200c.cboe.ib
+ ^ ---------- ^
+
+ In most other tina platforms they only show you these details in
+ some kinda "meta data" format, we have FQMEs so we do this up
+ front and explicit.
+
+ '''
+ field_strs = [self.expiry]
+ con_info = self.contract_info
+ if con_info is not None:
+ field_strs.extend(con_info)
+
+ return maybe_cons_tokens(field_strs)
+
+ def get_fqme(
+ self,
+
+ # NOTE: allow dropping the source asset from the
+ # market endpoint's pair key. Eg. to change
+ # mnq/usd.<> -> mnq.<> which is useful when
+ # searching (legacy) stock exchanges.
+ without_src: bool = False,
+ delim_char: str | None = None,
+
+ ) -> str:
+ '''
+ Return the fully qualified market endpoint-address for the
+ pair of transacting assets.
+
+ fqme = "fully qualified market endpoint"
+
+ And yes, you pronounce it colloquially as read..
+
+ Basically the idea here is for all client code (consumers of piker's
+ APIs which query the data/broker-provider agnostic layer(s)) should be
+ able to tell which backend / venue / derivative each data feed/flow is
+ from by an explicit string-key of the current form:
+
+
+ .
+ .
+ .
+ .
+
+ eg. for an explicit daq mini futes contract: mnq.cme.20230317.ib
+
+ TODO: I have thoughts that we should actually change this to be
+ more like an "attr lookup" (like how the web should have done
+ urls, but marketting peeps ruined it etc. etc.)
+
+ ...
+
+ TODO:
+ See community discussion on naming and nomenclature, order
+ of addressing hierarchy, general schema, internal representation:
+
+ https://github.com/pikers/piker/issues/467
+
+ '''
+ key: str = (
+ self.pair(delim_char=delim_char)
+ if not without_src
+ else str(self.dst)
+ )
+
+ return maybe_cons_tokens([
+ key, # final "pair name" (eg. qqq[/usd], btcusdt)
+ self.venue,
+ self.suffix, # includes expiry and other con info
+ self.broker,
+ ])
+
+ # NOTE: the main idea behind an fqme is to map a "market address"
+ # to some endpoint from a transaction provider (eg. a broker) such
+ # that we build a table of `fqme: str -> bs_mktid: Any` where any "piker
+ # market address" maps 1-to-1 to some broker trading endpoint.
+ # @cached_property
+ fqme = property(get_fqme)
+
+ def get_bs_fqme(
+ self,
+ **kwargs,
+ ) -> str:
+ '''
+ FQME sin broker part XD
+
+ '''
+ sin_broker, *_ = self.get_fqme(**kwargs).rpartition('.')
+ return sin_broker
+
+ bs_fqme = property(get_bs_fqme)
+
+ @property
+ def fqsn(self) -> str:
+ return self.fqme
+
+ def quantize(
+ self,
+ size: float,
+
+ quantity_type: Literal['price', 'size'] = 'size',
+
+ ) -> Decimal:
+ '''
+ Truncate input ``size: float`` using ``Decimal``
+ and ``.size_tick``'s # of digits.
+
+ '''
+ match quantity_type:
+ case 'price':
+ digits = float_digits(self.price_tick)
+ case 'size':
+ digits = float_digits(self.size_tick)
+
+ return Decimal(size).quantize(
+ Decimal(f'1.{"0".ljust(digits, "0")}'),
+ rounding=ROUND_HALF_EVEN
+ )
+
+ # TODO: BACKWARD COMPAT, TO REMOVE?
+ @property
+ def type_key(self) -> str:
+ if isinstance(self.dst, Asset):
+ return str(self.dst.atype)
+
+ return self._atype
+
+ @property
+ def price_tick_digits(self) -> int:
+ return float_digits(self.price_tick)
+
+ @property
+ def size_tick_digits(self) -> int:
+ return float_digits(self.size_tick)
+
+
+def unpack_fqme(
+ fqme: str,
+
+ broker: str | None = None
+
+) -> tuple[str, ...]:
+ '''
+ Unpack a fully-qualified-symbol-name to ``tuple``.
+
+ '''
+ venue = ''
+ suffix = ''
+
+ # TODO: probably reverse the order of all this XD
+ tokens = fqme.split('.')
+
+ match tokens:
+ case [mkt_ep, broker]:
+ # probably crypto
+ return (
+ broker,
+ mkt_ep,
+ '',
+ '',
+ )
+
+ # TODO: swap venue and suffix/deriv-info here?
+ case [mkt_ep, venue, suffix, broker]:
+ pass
+
+ # handle `bs_mktid` + `broker` input case
+ case [
+ mkt_ep, venue, suffix
+ ] if (
+ broker
+ and suffix != broker
+ ):
+ pass
+
+ case [mkt_ep, venue, broker]:
+ suffix = ''
+
+ case _:
+ raise ValueError(f'Invalid fqme: {fqme}')
+
+ return (
+ broker,
+ mkt_ep,
+ venue,
+ # '.'.join([mkt_ep, venue]),
+ suffix,
+ )
+
+
+class Symbol(Struct):
+ '''
+ I guess this is some kinda container thing for dealing with
+ all the different meta-data formats from brokers?
+
+ '''
+ key: str
+
+ broker: str = ''
+ venue: str = ''
+
+ # precision descriptors for price and vlm
+ tick_size: Decimal = Decimal('0.01')
+ lot_tick_size: Decimal = Decimal('0.0')
+
+ suffix: str = ''
+ broker_info: dict[str, dict[str, Any]] = {}
+
+ @classmethod
+ def from_fqme(
+ cls,
+ fqsn: str,
+ info: dict[str, Any],
+
+ ) -> Symbol:
+ broker, mktep, venue, suffix = unpack_fqme(fqsn)
+ tick_size = info.get('price_tick_size', 0.01)
+ lot_size = info.get('lot_tick_size', 0.0)
+
+ return Symbol(
+ broker=broker,
+ key=mktep,
+ tick_size=tick_size,
+ lot_tick_size=lot_size,
+ venue=venue,
+ suffix=suffix,
+ broker_info={broker: info},
+ )
+
+ @property
+ def type_key(self) -> str:
+ return list(self.broker_info.values())[0]['asset_type']
+
+ @property
+ def tick_size_digits(self) -> int:
+ return float_digits(self.tick_size)
+
+ @property
+ def lot_size_digits(self) -> int:
+ return float_digits(self.lot_tick_size)
+
+ @property
+ def price_tick(self) -> Decimal:
+ return Decimal(str(self.tick_size))
+
+ @property
+ def size_tick(self) -> Decimal:
+ return Decimal(str(self.lot_tick_size))
+
+ @property
+ def broker(self) -> str:
+ return list(self.broker_info.keys())[0]
+
+ @property
+ def fqme(self) -> str:
+ return maybe_cons_tokens([
+ self.key, # final "pair name" (eg. qqq[/usd], btcusdt)
+ self.venue,
+ self.suffix, # includes expiry and other con info
+ self.broker,
+ ])
+
+ def quantize(
+ self,
+ size: float,
+ ) -> Decimal:
+ digits = float_digits(self.lot_tick_size)
+ return Decimal(size).quantize(
+ Decimal(f'1.{"0".ljust(digits, "0")}'),
+ rounding=ROUND_HALF_EVEN
+ )
+
+ # NOTE: when cast to `str` return fqme
+ def __str__(self) -> str:
+ return self.fqme
diff --git a/piker/pp.py b/piker/accounting/_pos.py
similarity index 59%
rename from piker/pp.py
rename to piker/accounting/_pos.py
index a01bdc4e..3af0eeef 100644
--- a/piker/pp.py
+++ b/piker/accounting/_pos.py
@@ -12,158 +12,104 @@
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
-
# along with this program. If not, see .
+
'''
Personal/Private position parsing, calculating, summarizing in a way
that doesn't try to cuk most humans who prefer to not lose their moneys..
+
(looking at you `ib` and dirt-bird friends)
'''
from __future__ import annotations
from contextlib import contextmanager as cm
-from pprint import pformat
-import os
-from os import path
+from decimal import Decimal
from math import copysign
-import re
-import time
+from pprint import pformat
+from pathlib import Path
from typing import (
Any,
Iterator,
- Optional,
Union,
Generator
)
import pendulum
from pendulum import datetime, now
-import tomli
-import toml
+import tomlkit
-from . import config
-from .brokers import get_brokermod
-from .clearing._messages import BrokerdPosition, Status
-from .data._source import Symbol, unpack_fqsn
-from .log import get_logger
-from .data.types import Struct
+from ._ledger import (
+ Transaction,
+ iter_by_dt,
+ open_trade_ledger,
+)
+from ._mktinfo import (
+ MktPair,
+ Asset,
+ unpack_fqme,
+)
+from .. import config
+from ..brokers import get_brokermod
+from ..clearing._messages import (
+ BrokerdPosition,
+ Status,
+)
+from ..data.types import Struct
+from ..log import get_logger
log = get_logger(__name__)
-@cm
-def open_trade_ledger(
- broker: str,
- account: str,
-
-) -> Generator[dict, None, None]:
- '''
- Indempotently create and read in a trade log file from the
- ``/ledgers/`` directory.
-
- Files are named per broker account of the form
- ``_.toml``. The ``accountname`` here is the
- name as defined in the user's ``brokers.toml`` config.
-
- '''
- ldir = path.join(config._config_dir, 'ledgers')
- if not path.isdir(ldir):
- os.makedirs(ldir)
-
- fname = f'trades_{broker}_{account}.toml'
- tradesfile = path.join(ldir, fname)
-
- if not path.isfile(tradesfile):
- log.info(
- f'Creating new local trades ledger: {tradesfile}'
- )
- with open(tradesfile, 'w') as cf:
- pass # touch
- with open(tradesfile, 'rb') as cf:
- start = time.time()
- ledger = tomli.load(cf)
- log.info(f'Ledger load took {time.time() - start}s')
- cpy = ledger.copy()
-
- try:
- yield cpy
- finally:
- if cpy != ledger:
-
- # TODO: show diff output?
- # https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
- log.info(f'Updating ledger for {tradesfile}:\n')
- ledger.update(cpy)
-
- # we write on close the mutated ledger data
- with open(tradesfile, 'w') as cf:
- toml.dump(ledger, cf)
-
-
-class Transaction(Struct, frozen=True):
- # TODO: should this be ``.to`` (see below)?
- fqsn: str
-
- sym: Symbol
- tid: Union[str, int] # unique transaction id
- size: float
- price: float
- cost: float # commisions or other additional costs
- dt: datetime
- expiry: datetime | None = None
-
- # optional key normally derived from the broker
- # backend which ensures the instrument-symbol this record
- # is for is truly unique.
- bsuid: Union[str, int] | None = None
-
- # optional fqsn for the source "asset"/money symbol?
- # from: Optional[str] = None
-
-
-def iter_by_dt(
- clears: dict[str, Any],
-) -> Iterator[tuple[str, dict]]:
- '''
- Iterate entries of a ``clears: dict`` table sorted by entry recorded
- datetime presumably set at the ``'dt'`` field in each entry.
-
- '''
- for tid, data in sorted(
- list(clears.items()),
- key=lambda item: item[1]['dt'],
- ):
- yield tid, data
-
-
class Position(Struct):
'''
- Basic pp (personal/piker position) model with attached clearing
- transaction history.
+ An asset "position" model with attached clearing transaction history.
+
+ A financial "position" in `piker` terms is a summary of accounting
+ metrics computed from a transaction ledger; generally it describes
+ some acumulative "size" and "average price" from the summarized
+ underlying transaction set.
+
+ In piker we focus on the `.ppu` (price per unit) and the `.bep`
+ (break even price) including all transaction entries and exits since
+ the last "net-zero" size of the destination asset's holding.
+
+ This interface serves as an object API for computing and tracking
+ positions as well as supports serialization for storage in the local
+ file system (in TOML) and to interchange as a msg over IPC.
'''
- symbol: Symbol
+ mkt: MktPair
# can be +ve or -ve for long/short
size: float
- # "breakeven price" above or below which pnl moves above and below
- # zero for the entirety of the current "trade state".
+ # "price-per-unit price" above or below which pnl moves above and
+ # below zero for the entirety of the current "trade state". The ppu
+ # is only modified on "increases of" the absolute size of a position
+ # in one of a long/short "direction" (i.e. abs(.size_i) > 0 after
+ # the next transaction given .size was > 0 before that tx, and vice
+ # versa for -ve sized positions).
ppu: float
- # unique backend symbol id
- bsuid: str
+ # TODO: break-even-price support!
+ # bep: float
- split_ratio: Optional[int] = None
+ # unique "backend system market id"
+ bs_mktid: str
+
+ split_ratio: int | None = None
# ordered record of known constituent trade messages
clears: dict[
Union[str, int, Status], # trade id
dict[str, Any], # transaction history summaries
] = {}
- first_clear_dt: Optional[datetime] = None
+ first_clear_dt: datetime | None = None
- expiry: Optional[datetime] = None
+ expiry: datetime | None = None
+
+ def __repr__(self) -> str:
+ return pformat(self.to_dict())
def to_dict(self) -> dict:
return {
@@ -192,37 +138,40 @@ class Position(Struct):
# listing venue here even when the backend isn't providing
# it via the trades ledger..
# drop symbol obj in serialized form
- s = d.pop('symbol')
- fqsn = s.front_fqsn()
+ mkt: MktPair = d.pop('mkt')
+ assert isinstance(mkt, MktPair)
- broker, key, suffix = unpack_fqsn(fqsn)
- sym_info = s.broker_info[broker]
+ fqme = mkt.fqme
+ broker, mktep, venue, suffix = unpack_fqme(fqme)
- d['asset_type'] = sym_info['asset_type']
- d['price_tick_size'] = (
- sym_info.get('price_tick_size')
- or
- s.tick_size
- )
- d['lot_tick_size'] = (
- sym_info.get('lot_tick_size')
- or
- s.lot_tick_size
- )
+ # an asset resolved mkt where we have ``Asset`` info about
+ # each tradeable asset in the market.
+ if mkt.resolved:
+ dst: Asset = mkt.dst
+ d['asset_type'] = dst.atype
+
+ d['price_tick'] = mkt.price_tick
+ d['size_tick'] = mkt.size_tick
if self.expiry is None:
d.pop('expiry', None)
elif expiry:
d['expiry'] = str(expiry)
- toml_clears_list = []
+ clears_table: tomlkit.Array = tomlkit.array()
+ clears_table.multiline(
+ multiline=True,
+ indent='',
+ )
# reverse sort so latest clears are at top of section?
for tid, data in iter_by_dt(clears):
- inline_table = toml.TomlDecoder().get_empty_inline_table()
+
+ inline_table = tomlkit.inline_table()
# serialize datetime to parsable `str`
- inline_table['dt'] = str(data['dt'])
+ dtstr = inline_table['dt'] = data['dt'].isoformat('T')
+ assert 'Datetime' not in dtstr
# insert optional clear fields in column order
for k in ['ppu', 'accum_size']:
@@ -235,11 +184,11 @@ class Position(Struct):
inline_table[k] = data[k]
inline_table['tid'] = tid
- toml_clears_list.append(inline_table)
+ clears_table.append(inline_table)
- d['clears'] = toml_clears_list
+ d['clears'] = clears_table
- return fqsn, d
+ return fqme, d
def ensure_state(self) -> None:
'''
@@ -249,7 +198,9 @@ class Position(Struct):
'''
clears = list(self.clears.values())
- self.first_clear_dt = min(list(entry['dt'] for entry in clears))
+ self.first_clear_dt = min(
+ list(entry['dt'] for entry in clears)
+ )
last_clear = clears[-1]
csize = self.calc_size()
@@ -294,22 +245,19 @@ class Position(Struct):
) -> None:
# XXX: better place to do this?
- symbol = self.symbol
+ mkt = self.mkt
+ size_tick_digits = mkt.size_tick_digits
+ price_tick_digits = mkt.price_tick_digits
- lot_size_digits = symbol.lot_size_digits
- ppu, size = (
- round(
- msg['avg_price'],
- ndigits=symbol.tick_size_digits
- ),
- round(
- msg['size'],
- ndigits=lot_size_digits
- ),
+ self.ppu = round(
+ # TODO: change this to ppu?
+ msg['avg_price'],
+ ndigits=price_tick_digits,
+ )
+ self.size = round(
+ msg['size'],
+ ndigits=size_tick_digits,
)
-
- self.ppu = ppu
- self.size = size
@property
def dsize(self) -> float:
@@ -337,10 +285,16 @@ class Position(Struct):
datetime-stamped order.
'''
- return iter_by_dt(self.clears)
+ # sort on the already existing datetime that should have
+ # been generated for the entry's table
+ return iter_by_dt(
+ self.clears,
+ key=lambda entry: entry[1]['dt']
+ )
def calc_ppu(
self,
+
# include transaction cost in breakeven price
# and presume the worst case of the same cost
# to exit this transaction (even though in reality
@@ -471,20 +425,28 @@ class Position(Struct):
asset using the clears/trade event table; zero if expired.
'''
- size: float = 0
+ size: float = 0.
# time-expired pps (normally derivatives) are "closed"
# and have a zero size.
if self.expired():
- return 0
+ return 0.
for tid, entry in self.clears.items():
size += entry['size']
+ # XXX: do we need it every step?
+ # no right since rounding is an LT?
+ # size = self.mkt.quantize(
+ # size + entry['size'],
+ # quantity_type='size',
+ # )
if self.split_ratio is not None:
size = round(size * self.split_ratio)
- return float(self.symbol.quantize_size(size))
+ return float(
+ self.mkt.quantize(size),
+ )
def minimize_clears(
self,
@@ -506,7 +468,9 @@ class Position(Struct):
# scan for the last "net zero" position by iterating
# transactions until the next net-zero size, rinse, repeat.
for tid, clear in self.clears.items():
- size += clear['size']
+ size = float(
+ self.mkt.quantize(size + clear['size'])
+ )
clears_since_zero.append((tid, clear))
if size == 0:
@@ -543,8 +507,8 @@ class Position(Struct):
return clear
- def sugest_split(self) -> float:
- ...
+ # def sugest_split(self) -> float:
+ # ...
class PpTable(Struct):
@@ -552,7 +516,8 @@ class PpTable(Struct):
brokername: str
acctid: str
pps: dict[str, Position]
- conf: Optional[dict] = {}
+ conf_path: Path
+ conf: dict | None = {}
def update_from_trans(
self,
@@ -564,24 +529,38 @@ class PpTable(Struct):
pps = self.pps
updated: dict[str, Position] = {}
- # lifo update all pps from records
- for tid, t in trans.items():
-
- pp = pps.setdefault(
- t.bsuid,
+ # lifo update all pps from records, ensuring
+ # we compute the PPU and size sorted in time!
+ for t in sorted(
+ trans.values(),
+ key=lambda t: t.dt,
+ reverse=True,
+ ):
+ fqme = t.fqme
+ bs_mktid = t.bs_mktid
+ # template the mkt-info presuming a legacy market ticks
+ # if no info exists in the transactions..
+ mkt: MktPair = t.sys
+ pp = pps.get(bs_mktid)
+ if not pp:
# if no existing pp, allocate fresh one.
- Position(
- Symbol.from_fqsn(
- t.fqsn,
- info={},
- ) if not t.sym else t.sym,
+ pp = pps[bs_mktid] = Position(
+ mkt=mkt,
size=0.0,
ppu=0.0,
- bsuid=t.bsuid,
+ bs_mktid=bs_mktid,
expiry=t.expiry,
)
- )
+ else:
+ # NOTE: if for some reason a "less resolved" mkt pair
+ # info has been set (based on the `.fqme` being
+ # a shorter string), instead use the one from the
+ # transaction since it likely has (more) full
+ # information from the provider.
+ if len(pp.mkt.fqme) < len(fqme):
+ pp.mkt = mkt
+
clears = pp.clears
if clears:
first_clear_dt = pp.first_clear_dt
@@ -590,7 +569,10 @@ class PpTable(Struct):
# included in the current pps state.
if (
t.tid in clears
- or first_clear_dt and t.dt < first_clear_dt
+ or (
+ first_clear_dt
+ and t.dt < first_clear_dt
+ )
):
# NOTE: likely you'll see repeats of the same
# ``Transaction`` passed in here if/when you are restarting
@@ -601,12 +583,14 @@ class PpTable(Struct):
# update clearing table
pp.add_clear(t)
- updated[t.bsuid] = pp
+ updated[t.bs_mktid] = pp
# minimize clears tables and update sizing.
- for bsuid, pp in updated.items():
+ for bs_mktid, pp in updated.items():
pp.ensure_state()
+ # deliver only the position entries that were actually updated
+ # (modified the state) from the input transaction set.
return updated
def dump_active(
@@ -630,14 +614,8 @@ class PpTable(Struct):
open_pp_objs: dict[str, Position] = {}
pp_objs = self.pps
- for bsuid in list(pp_objs):
- pp = pp_objs[bsuid]
-
- # XXX: debug hook for size mismatches
- # qqqbsuid = 320227571
- # if bsuid == qqqbsuid:
- # breakpoint()
-
+ for bs_mktid in list(pp_objs):
+ pp = pp_objs[bs_mktid]
pp.ensure_state()
if (
@@ -656,37 +634,42 @@ class PpTable(Struct):
# ignored; the closed positions won't be written to the
# ``pps.toml`` since ``pp_active_entries`` above is what's
# written.
- closed_pp_objs[bsuid] = pp
+ closed_pp_objs[bs_mktid] = pp
else:
- open_pp_objs[bsuid] = pp
+ open_pp_objs[bs_mktid] = pp
return open_pp_objs, closed_pp_objs
def to_toml(
self,
+ active: dict[str, Position] | None = None,
+
) -> dict[str, Any]:
- active, closed = self.dump_active()
+ if active is None:
+ active, _ = self.dump_active()
- # ONLY dict-serialize all active positions; those that are closed
- # we don't store in the ``pps.toml``.
+ # ONLY dict-serialize all active positions; those that are
+ # closed we don't store in the ``pps.toml``.
to_toml_dict = {}
- for bsuid, pos in active.items():
-
- # keep the minimal amount of clears that make up this
+ pos: Position
+ for bs_mktid, pos in active.items():
+ # NOTE: we only store the minimal amount of clears that make up this
# position since the last net-zero state.
pos.minimize_clears()
pos.ensure_state()
# serialize to pre-toml form
- fqsn, asdict = pos.to_pretoml()
- log.info(f'Updating active pp: {fqsn}')
+ fqme, asdict = pos.to_pretoml()
+
+ assert 'Datetime' not in asdict['clears'][0]['dt']
+ log.info(f'Updating active pp: {fqme}')
# XXX: ugh, it's cuz we push the section under
# the broker name.. maybe we need to rethink this?
- brokerless_key = fqsn.removeprefix(f'{self.brokername}.')
+ brokerless_key = fqme.removeprefix(f'{self.brokername}.')
to_toml_dict[brokerless_key] = asdict
return to_toml_dict
@@ -699,33 +682,55 @@ class PpTable(Struct):
# TODO: show diff output?
# https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
# active, closed_pp_objs = table.dump_active()
- pp_entries = self.to_toml()
+
+ active, closed = self.dump_active()
+ pp_entries = self.to_toml(active=active)
if pp_entries:
- log.info(f'Updating ``pps.toml`` for {path}:\n')
- log.info(f'Current positions:\n{pp_entries}')
- self.conf[self.brokername][self.acctid] = pp_entries
+ log.info(
+ f'Updating positions in ``{self.conf_path}``:\n'
+ f'n{pformat(pp_entries)}'
+ )
- elif (
- self.brokername in self.conf and
- self.acctid in self.conf[self.brokername]
- ):
- del self.conf[self.brokername][self.acctid]
- if len(self.conf[self.brokername]) == 0:
- del self.conf[self.brokername]
+ if self.brokername in self.conf:
+ log.warning(
+ f'Rewriting {self.conf_path} keys to drop !'
+ )
+ # legacy key schema including , so
+ # rewrite all entries to drop those tables since we now
+ # put that in the filename!
+ accounts = self.conf.pop(self.brokername)
+ assert len(accounts) == 1
+ entries = accounts.pop(self.acctid)
+ self.conf.update(entries)
- # TODO: why tf haven't they already done this for inline
- # tables smh..
- enc = PpsEncoder(preserve=True)
- # table_bs_type = type(toml.TomlDecoder().get_empty_inline_table())
- enc.dump_funcs[
- toml.decoder.InlineTableDict
- ] = enc.dump_inline_table
+ self.conf.update(pp_entries)
+
+ # drop any entries that are computed as net-zero
+ # we don't care about storing in the pps file.
+ if closed:
+ bs_mktid: str
+ for bs_mktid, pos in closed.items():
+ fqme: str = pos.mkt.fqme
+ if fqme in self.conf:
+ self.conf.pop(fqme)
+ else:
+ # TODO: we reallly need a diff set of
+ # loglevels/colors per subsys.
+ log.warning(
+ f'Recent position for {fqme} was closed!'
+ )
+
+ # if there are no active position entries according
+ # to the toml dump output above, then clear the config
+ # file of all entries.
+ elif self.conf:
+ for entry in list(self.conf):
+ del self.conf[entry]
config.write(
- self.conf,
- 'pps',
- encoder=enc,
- fail_empty=False
+ config=self.conf,
+ path=self.conf_path,
+ fail_empty=False,
)
@@ -735,7 +740,7 @@ def load_pps_from_ledger(
acctname: str,
# post normalization filter on ledger entries to be processed
- filter_by: Optional[list[dict]] = None,
+ filter_by: list[dict] | None = None,
) -> tuple[
dict[str, Transaction],
@@ -745,7 +750,7 @@ def load_pps_from_ledger(
Open a ledger file by broker name and account and read in and
process any trade records into our normalized ``Transaction`` form
and then update the equivalent ``Pptable`` and deliver the two
- bsuid-mapped dict-sets of the transactions and pps.
+ bs_mktid-mapped dict-sets of the transactions and pps.
'''
with (
@@ -761,9 +766,9 @@ def load_pps_from_ledger(
if filter_by:
records = {}
- bsuids = set(filter_by)
+ bs_mktids = set(filter_by)
for tid, r in src_records.items():
- if r.bsuid in bsuids:
+ if r.bs_mktid in bs_mktids:
records[tid] = r
else:
records = src_records
@@ -773,151 +778,33 @@ def load_pps_from_ledger(
return records, updated
-# TODO: instead see if we can hack tomli and tomli-w to do the same:
-# - https://github.com/hukkin/tomli
-# - https://github.com/hukkin/tomli-w
-class PpsEncoder(toml.TomlEncoder):
- '''
- Special "styled" encoder that makes a ``pps.toml`` redable and
- compact by putting `.clears` tables inline and everything else
- flat-ish.
-
- '''
- separator = ','
-
- def dump_list(self, v):
- '''
- Dump an inline list with a newline after every element and
- with consideration for denoted inline table types.
-
- '''
- retval = "[\n"
- for u in v:
- if isinstance(u, toml.decoder.InlineTableDict):
- out = self.dump_inline_table(u)
- else:
- out = str(self.dump_value(u))
-
- retval += " " + out + "," + "\n"
- retval += "]"
- return retval
-
- def dump_inline_table(self, section):
- """Preserve inline table in its compact syntax instead of expanding
- into subsection.
- https://github.com/toml-lang/toml#user-content-inline-table
- """
- val_list = []
- for k, v in section.items():
- # if isinstance(v, toml.decoder.InlineTableDict):
- if isinstance(v, dict):
- val = self.dump_inline_table(v)
- else:
- val = str(self.dump_value(v))
-
- val_list.append(k + " = " + val)
-
- retval = "{ " + ", ".join(val_list) + " }"
- return retval
-
- def dump_sections(self, o, sup):
- retstr = ""
- if sup != "" and sup[-1] != ".":
- sup += '.'
- retdict = self._dict()
- arraystr = ""
- for section in o:
- qsection = str(section)
- value = o[section]
-
- if not re.match(r'^[A-Za-z0-9_-]+$', section):
- qsection = toml.encoder._dump_str(section)
-
- # arrayoftables = False
- if (
- self.preserve
- and isinstance(value, toml.decoder.InlineTableDict)
- ):
- retstr += (
- qsection
- +
- " = "
- +
- self.dump_inline_table(o[section])
- +
- '\n' # only on the final terminating left brace
- )
-
- # XXX: this code i'm pretty sure is just blatantly bad
- # and/or wrong..
- # if isinstance(o[section], list):
- # for a in o[section]:
- # if isinstance(a, dict):
- # arrayoftables = True
- # if arrayoftables:
- # for a in o[section]:
- # arraytabstr = "\n"
- # arraystr += "[[" + sup + qsection + "]]\n"
- # s, d = self.dump_sections(a, sup + qsection)
- # if s:
- # if s[0] == "[":
- # arraytabstr += s
- # else:
- # arraystr += s
- # while d:
- # newd = self._dict()
- # for dsec in d:
- # s1, d1 = self.dump_sections(d[dsec], sup +
- # qsection + "." +
- # dsec)
- # if s1:
- # arraytabstr += ("[" + sup + qsection +
- # "." + dsec + "]\n")
- # arraytabstr += s1
- # for s1 in d1:
- # newd[dsec + "." + s1] = d1[s1]
- # d = newd
- # arraystr += arraytabstr
-
- elif isinstance(value, dict):
- retdict[qsection] = o[section]
-
- elif o[section] is not None:
- retstr += (
- qsection
- +
- " = "
- +
- str(self.dump_value(o[section]))
- )
-
- # if not isinstance(value, dict):
- if not isinstance(value, toml.decoder.InlineTableDict):
- # inline tables should not contain newlines:
- # https://toml.io/en/v1.0.0#inline-table
- retstr += '\n'
-
- else:
- raise ValueError(value)
-
- retstr += arraystr
- return (retstr, retdict)
-
-
@cm
def open_pps(
brokername: str,
acctid: str,
write_on_exit: bool = False,
+
) -> Generator[PpTable, None, None]:
'''
Read out broker-specific position entries from
incremental update file: ``pps.toml``.
'''
- conf, path = config.load('pps')
- brokersection = conf.setdefault(brokername, {})
- pps = brokersection.setdefault(acctid, {})
+ conf: dict
+ conf_path: Path
+ conf, conf_path = config.load_account(brokername, acctid)
+
+ if brokername in conf:
+ log.warning(
+ f'Rewriting {conf_path} keys to drop !'
+ )
+ # legacy key schema including , so
+ # rewrite all entries to drop those tables since we now
+ # put that in the filename!
+ accounts = conf.pop(brokername)
+ for acctid in accounts.copy():
+ entries = accounts.pop(acctid)
+ conf.update(entries)
# TODO: ideally we can pass in an existing
# pps state to this right? such that we
@@ -934,61 +821,72 @@ def open_pps(
brokername,
acctid,
pp_objs,
+ conf_path,
conf=conf,
)
# unmarshal/load ``pps.toml`` config entries into object form
# and update `PpTable` obj entries.
- for fqsn, entry in pps.items():
- bsuid = entry['bsuid']
- symbol = Symbol.from_fqsn(
- fqsn,
+ for fqme, entry in conf.items():
- # NOTE & TODO: right now we fill in the defaults from
- # `.data._source.Symbol` but eventually these should always
- # either be already written to the pos table or provided at
- # write time to ensure always having these values somewhere
- # and thus allowing us to get our pos sizing precision
- # correct!
- info={
- 'asset_type': entry.get('asset_type', ''),
- 'price_tick_size': entry.get('price_tick_size', 0.01),
- 'lot_tick_size': entry.get('lot_tick_size', 0.0),
- }
+ # atype = entry.get('asset_type', '')
+
+ # unique broker market id
+ bs_mktid = str(
+ entry.get('bsuid')
+ or entry.get('bs_mktid')
)
+ price_tick = Decimal(str(
+ entry.get('price_tick_size')
+ or entry.get('price_tick')
+ or '0.01'
+ ))
+ size_tick = Decimal(str(
+ entry.get('lot_tick_size')
+ or entry.get('size_tick')
+ or '0.0'
+ ))
+
+ # load the pair using the fqme which
+ # will make the pair "unresolved" until
+ # the backend broker actually loads
+ # the market and position info.
+ mkt = MktPair.from_fqme(
+ fqme,
+ price_tick=price_tick,
+ size_tick=size_tick,
+ bs_mktid=bs_mktid
+ )
+
+ # TODO: RE: general "events" instead of just "clears":
+ # - make this an `events` field and support more event types
+ # such as 'split', 'name_change', 'mkt_info', etc..
+ # - should be make a ``Struct`` for clear/event entries? convert
+ # "clear events table" from the toml config (list of a dicts)
+ # and load it into object form for use in position processing of
+ # new clear events.
# convert clears sub-tables (only in this form
# for toml re-presentation) back into a master table.
- clears_list = entry['clears']
-
- # index clears entries in "object" form by tid in a top
- # level dict instead of a list (as is presented in our
- # ``pps.toml``).
- clears = pp_objs.setdefault(bsuid, {})
-
- # TODO: should be make a ``Struct`` for clear/event entries?
- # convert "clear events table" from the toml config (list of
- # a dicts) and load it into object form for use in position
- # processing of new clear events.
+ toml_clears_list: list[dict[str, Any]] = entry['clears']
trans: list[Transaction] = []
+ for clears_table in toml_clears_list:
- for clears_table in clears_list:
- tid = clears_table.pop('tid')
+ tid = clears_table.get('tid')
dtstr = clears_table['dt']
dt = pendulum.parse(dtstr)
clears_table['dt'] = dt
trans.append(Transaction(
- fqsn=bsuid,
- sym=symbol,
- bsuid=bsuid,
+ fqme=bs_mktid,
+ sym=mkt,
+ bs_mktid=bs_mktid,
tid=tid,
size=clears_table['size'],
price=clears_table['price'],
cost=clears_table['cost'],
dt=dt,
))
- clears[tid] = clears_table
size = entry['size']
@@ -1004,13 +902,13 @@ def open_pps(
if expiry:
expiry = pendulum.parse(expiry)
- pp = pp_objs[bsuid] = Position(
- symbol,
+ pp = pp_objs[bs_mktid] = Position(
+ mkt,
size=size,
ppu=ppu,
split_ratio=split_ratio,
expiry=expiry,
- bsuid=entry['bsuid'],
+ bs_mktid=bs_mktid,
)
# XXX: super critical, we need to be sure to include
@@ -1029,19 +927,3 @@ def open_pps(
finally:
if write_on_exit:
table.write_config()
-
-
-if __name__ == '__main__':
- import sys
-
- args = sys.argv
- assert len(args) > 1, 'Specifiy account(s) from `brokers.toml`'
- args = args[1:]
- for acctid in args:
- broker, name = acctid.split('.')
- trans, updated_pps = load_pps_from_ledger(broker, name)
- print(
- f'Processing transactions into pps for {broker}:{acctid}\n'
- f'{pformat(trans)}\n\n'
- f'{pformat(updated_pps)}'
- )
diff --git a/piker/accounting/cli.py b/piker/accounting/cli.py
new file mode 100644
index 00000000..ee91d1b3
--- /dev/null
+++ b/piker/accounting/cli.py
@@ -0,0 +1,234 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+CLI front end for trades ledger and position tracking management.
+
+'''
+from typing import (
+ Any,
+)
+
+from rich.console import Console
+from rich.markdown import Markdown
+import tractor
+import trio
+import typer
+
+from ..log import get_logger
+from ..service import (
+ open_piker_runtime,
+)
+from ..clearing._messages import BrokerdPosition
+from ..calc import humanize
+
+
+ledger = typer.Typer()
+
+
+def broker_init(
+ brokername: str,
+ loglevel: str | None = None,
+
+ **start_actor_kwargs,
+
+) -> dict:
+ '''
+ Given an input broker name, load all named arguments
+ which can be passed to a daemon + context spawn for
+ the relevant `brokerd` service endpoint.
+
+ '''
+ from ..brokers import get_brokermod
+ brokermod = get_brokermod(brokername)
+ modpath = brokermod.__name__
+
+ start_actor_kwargs['name'] = f'brokerd.{brokername}'
+ start_actor_kwargs.update(
+ getattr(
+ brokermod,
+ '_spawn_kwargs',
+ {},
+ )
+ )
+
+ # lookup actor-enabled modules declared by the backend offering the
+ # `brokerd` endpoint(s).
+ enabled = start_actor_kwargs['enable_modules'] = [modpath]
+ for submodname in getattr(
+ brokermod,
+ '__enable_modules__',
+ [],
+ ):
+ subpath = f'{modpath}.{submodname}'
+ enabled.append(subpath)
+
+ # TODO XXX: DO WE NEED THIS?
+ # enabled.append('piker.data.feed')
+
+ # non-blocking setup of brokerd service nursery
+ from ..brokers._daemon import _setup_persistent_brokerd
+
+ return (
+ start_actor_kwargs, # to `ActorNursery.start_actor()`
+ _setup_persistent_brokerd, # service task ep
+ getattr( # trades endpoint
+ brokermod,
+ 'trades_dialogue',
+ None,
+ ),
+ )
+
+
+@ledger.command()
+def sync(
+ fully_qualified_account_name: str,
+ pdb: bool = False,
+
+ loglevel: str = typer.Option(
+ 'error',
+ "-l",
+ ),
+):
+ log = get_logger(loglevel)
+ console = Console()
+
+ try:
+ brokername, account = fully_qualified_account_name.split('.')
+ except ValueError:
+ md = Markdown(
+ f'=> `{fully_qualified_account_name}` <=\n\n'
+ 'is not a valid '
+ '__fully qualified account name?__\n\n'
+ 'Your account name needs to be of the form '
+ '`.`\n'
+ )
+ console.print(md)
+ return
+
+ start_kwargs, _, trades_ep = broker_init(
+ brokername,
+ loglevel=loglevel,
+ )
+
+ async def main():
+
+ async with (
+ open_piker_runtime(
+ name='ledger_cli',
+ loglevel=loglevel,
+ debug_mode=pdb,
+
+ ) as (actor, sockaddr),
+
+ tractor.open_nursery() as an,
+ ):
+ log.info(
+ f'Piker runtime up as {actor.uid}@{sockaddr}'
+ )
+
+ portal = await an.start_actor(
+ loglevel=loglevel,
+ debug_mode=pdb,
+ **start_kwargs,
+ )
+
+ if (
+ brokername == 'paper'
+ or trades_ep is None
+ ):
+ from ..clearing import _paper_engine as paper
+ open_trades_endpoint = paper.open_paperboi(
+ fqme=None, # tell paper to not start clearing loop
+ broker=brokername,
+ loglevel=loglevel,
+ )
+ else:
+ # open live brokerd trades endpoint
+ open_trades_endpoint = portal.open_context(
+ trades_ep,
+ loglevel=loglevel,
+ )
+
+ positions: dict[str, Any]
+ accounts: list[str]
+ async with (
+ open_trades_endpoint as (
+ brokerd_ctx,
+ (positions, accounts),
+ ),
+ ):
+ assert len(accounts) == 1
+ summary: str = (
+ '[dim underline]Piker Position Summary[/] '
+ f'[dim blue underline]{brokername}[/]'
+ '[dim].[/]'
+ f'[blue underline]{account}[/]'
+ f'[dim underline] -> total pps: [/]'
+ f'[green]{len(positions)}[/]\n'
+ )
+ for ppdict in positions:
+ ppmsg = BrokerdPosition(**ppdict)
+ size = ppmsg.size
+ if size:
+ ppu: float = round(
+ ppmsg.avg_price,
+ ndigits=2,
+ )
+ cost_basis: str = humanize(size * ppu)
+ h_size: str = humanize(size)
+
+ if size < 0:
+ pcolor = 'red'
+ else:
+ pcolor = 'green'
+
+ # sematic-highlight of fqme
+ fqme = ppmsg.symbol
+ tokens = fqme.split('.')
+ styled_fqme = f'[blue underline]{tokens[0]}[/]'
+ for tok in tokens[1:]:
+ styled_fqme += '[dim].[/]'
+ styled_fqme += f'[dim blue underline]{tok}[/]'
+
+ # TODO: instead display in a ``rich.Table``?
+ summary += (
+ styled_fqme +
+ '[dim]: [/]'
+ f'[{pcolor}]{h_size}[/]'
+ '[dim blue]u @[/]'
+ f'[{pcolor}]{ppu}[/]'
+ '[dim blue] = [/]'
+ f'[{pcolor}]$ {cost_basis}\n[/]'
+ )
+
+ console.print(summary)
+
+ # exit via ctx cancellation.
+ await brokerd_ctx.cancel(timeout=1)
+ # TODO: once ported to newer tractor branch we should
+ # be able to do a loop like this:
+ # while brokerd_ctx.cancel_called_remote is None:
+ # await trio.sleep(0.01)
+ # await brokerd_ctx.cancel()
+
+ await portal.cancel_actor()
+
+ trio.run(main)
+
+
+if __name__ == "__main__":
+ ledger() # this is called from ``>> ledger ``
diff --git a/piker/brokers/__init__.py b/piker/brokers/__init__.py
index c67f4003..93393654 100644
--- a/piker/brokers/__init__.py
+++ b/piker/brokers/__init__.py
@@ -25,6 +25,7 @@ __brokers__ = [
'ib',
'kraken',
'kucoin'
+
# broken but used to work
# 'questrade',
# 'robinhood',
diff --git a/piker/brokers/_daemon.py b/piker/brokers/_daemon.py
new file mode 100644
index 00000000..8a81b1d6
--- /dev/null
+++ b/piker/brokers/_daemon.py
@@ -0,0 +1,169 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+Broker-daemon-actor "endpoint-hooks": the service task entry points for
+``brokerd``.
+
+'''
+from contextlib import (
+ asynccontextmanager as acm,
+)
+
+import tractor
+import trio
+
+from . import _util
+from . import get_brokermod
+
+# `brokerd` enabled modules
+# TODO: move this def to the `.data` subpkg..
+# NOTE: keeping this list as small as possible is part of our caps-sec
+# model and should be treated with utmost care!
+_data_mods = [
+ 'piker.brokers.core',
+ 'piker.brokers.data',
+ 'piker.brokers._daemon',
+ 'piker.data',
+ 'piker.data.feed',
+ 'piker.data._sampling'
+]
+
+
+# TODO: we should rename the daemon to datad prolly once we split up
+# broker vs. data tasks into separate actors?
+@tractor.context
+async def _setup_persistent_brokerd(
+ ctx: tractor.Context,
+ brokername: str,
+ loglevel: str | None = None,
+
+) -> None:
+ '''
+ Allocate a actor-wide service nursery in ``brokerd``
+ such that feeds can be run in the background persistently by
+ the broker backend as needed.
+
+ '''
+ log = _util.get_console_log(
+ loglevel or tractor.current_actor().loglevel,
+ name=f'{_util.subsys}.{brokername}',
+ )
+ # set global for this actor to this new process-wide instance B)
+ _util.log = log
+
+ from piker.data.feed import (
+ _bus,
+ get_feed_bus,
+ )
+ global _bus
+ assert not _bus
+
+ async with trio.open_nursery() as service_nursery:
+ # assign a nursery to the feeds bus for spawning
+ # background tasks from clients
+ get_feed_bus(brokername, service_nursery)
+
+ # unblock caller
+ await ctx.started()
+
+ # we pin this task to keep the feeds manager active until the
+ # parent actor decides to tear it down
+ await trio.sleep_forever()
+
+
+async def spawn_brokerd(
+
+ brokername: str,
+ loglevel: str | None = None,
+
+ **tractor_kwargs,
+
+) -> bool:
+
+ from piker.service import Services
+ from piker.service._util import log # use service mngr log
+
+ log.info(f'Spawning {brokername} broker daemon')
+
+ brokermod = get_brokermod(brokername)
+ dname = f'brokerd.{brokername}'
+
+ extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {})
+ tractor_kwargs.update(extra_tractor_kwargs)
+
+ # ask `pikerd` to spawn a new sub-actor and manage it under its
+ # actor nursery
+ modpath = brokermod.__name__
+ broker_enable = [modpath]
+ for submodname in getattr(
+ brokermod,
+ '__enable_modules__',
+ [],
+ ):
+ subpath = f'{modpath}.{submodname}'
+ broker_enable.append(subpath)
+
+ portal = await Services.actor_n.start_actor(
+ dname,
+ enable_modules=_data_mods + broker_enable,
+ loglevel=loglevel,
+ debug_mode=Services.debug_mode,
+ **tractor_kwargs
+ )
+
+ # non-blocking setup of brokerd service nursery
+ await Services.start_service_task(
+ dname,
+ portal,
+
+ # signature of target root-task endpoint
+ _setup_persistent_brokerd,
+ brokername=brokername,
+ loglevel=loglevel,
+ )
+ return True
+
+
+@acm
+async def maybe_spawn_brokerd(
+
+ brokername: str,
+ loglevel: str | None = None,
+
+ **pikerd_kwargs,
+
+) -> tractor.Portal:
+ '''
+ Helper to spawn a brokerd service *from* a client
+ who wishes to use the sub-actor-daemon.
+
+ '''
+ from piker.service import maybe_spawn_daemon
+
+ async with maybe_spawn_daemon(
+
+ f'brokerd.{brokername}',
+ service_task_target=spawn_brokerd,
+ spawn_args={
+ 'brokername': brokername,
+ },
+ loglevel=loglevel,
+
+ **pikerd_kwargs,
+
+ ) as portal:
+ yield portal
diff --git a/piker/brokers/_util.py b/piker/brokers/_util.py
index d1b2aac5..7e7a3ec7 100644
--- a/piker/brokers/_util.py
+++ b/piker/brokers/_util.py
@@ -15,13 +15,29 @@
# along with this program. If not, see .
"""
-Handy utils.
+Handy cross-broker utils.
+
"""
+from functools import partial
+
import json
import asks
import logging
-from ..log import colorize_json
+from ..log import (
+ get_logger,
+ get_console_log,
+ colorize_json,
+)
+subsys: str = 'piker.brokers'
+
+# NOTE: level should be reset by any actor that is spawned
+log = get_logger(subsys)
+
+get_console_log = partial(
+ get_console_log,
+ name=subsys,
+)
class BrokerError(Exception):
@@ -69,7 +85,6 @@ class DataThrottle(BrokerError):
# TODO: add in throttle metrics/feedback
-
def resproc(
resp: asks.response_objects.Response,
log: logging.Logger,
diff --git a/piker/brokers/binance.py b/piker/brokers/binance.py
index 37377136..48b28d6f 100644
--- a/piker/brokers/binance.py
+++ b/piker/brokers/binance.py
@@ -1,5 +1,8 @@
# piker: trading gear for hackers
-# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
+# Copyright (C)
+# Guillermo Rodriguez
+# Tyler Goodlet
+# (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -18,15 +21,19 @@
Binance backend
"""
-from contextlib import asynccontextmanager as acm
+from contextlib import (
+ asynccontextmanager as acm,
+ aclosing,
+)
from datetime import datetime
+from decimal import Decimal
+import itertools
from typing import (
Any, Union, Optional,
AsyncGenerator, Callable,
)
import time
-from trio_util import trio_async_generator
import trio
from trio_typing import TaskStatus
import pendulum
@@ -34,26 +41,30 @@ import asks
from fuzzywuzzy import process as fuzzy
import numpy as np
import tractor
-import wsproto
+from .._cacheables import async_lifo_cache
+from ..accounting._mktinfo import (
+ Asset,
+ MktPair,
+ digits_to_dec,
+)
from .._cacheables import open_cached_client
from ._util import (
resproc,
SymbolNotFound,
DataUnavailable,
)
-from ..log import (
- get_logger,
+from ._util import (
+ log,
get_console_log,
)
from ..data.types import Struct
+from ..data.validate import FeedInit
from ..data._web_bs import (
open_autorecon_ws,
NoBsWs,
)
-log = get_logger(__name__)
-
_url = 'https://api.binance.com'
@@ -88,6 +99,9 @@ _show_wap_in_history = False
# https://binance-docs.github.io/apidocs/spot/en/#exchange-information
+
+# TODO: make this frozen again by pre-processing the
+# filters list to a dict at init time?
class Pair(Struct, frozen=True):
symbol: str
status: str
@@ -114,9 +128,24 @@ class Pair(Struct, frozen=True):
defaultSelfTradePreventionMode: str
allowedSelfTradePreventionModes: list[str]
- filters: list[dict[str, Union[str, int, float]]]
+ filters: dict[
+ str,
+ Union[str, int, float]
+ ]
permissions: list[str]
+ @property
+ def price_tick(self) -> Decimal:
+ # XXX: lul, after manually inspecting the response format we
+ # just directly pick out the info we need
+ step_size: str = self.filters['PRICE_FILTER']['tickSize'].rstrip('0')
+ return Decimal(step_size)
+
+ @property
+ def size_tick(self) -> Decimal:
+ step_size: str = self.filters['LOT_SIZE']['stepSize'].rstrip('0')
+ return Decimal(step_size)
+
class OHLC(Struct):
'''
@@ -147,6 +176,18 @@ class OHLC(Struct):
bar_wap: float = 0.0
+class L1(Struct):
+ # https://binance-docs.github.io/apidocs/spot/en/#individual-symbol-book-ticker-streams
+
+ update_id: int
+ sym: str
+
+ bid: float
+ bsize: float
+ ask: float
+ asize: float
+
+
# convert datetime obj timestamp to unixtime in milliseconds
def binance_timestamp(
when: datetime
@@ -159,7 +200,7 @@ class Client:
def __init__(self) -> None:
self._sesh = asks.Session(connections=4)
self._sesh.base_location = _url
- self._pairs: dict[str, Any] = {}
+ self._pairs: dict[str, Pair] = {}
async def _api(
self,
@@ -173,48 +214,58 @@ class Client:
)
return resproc(resp, log)
- async def symbol_info(
+ async def exch_info(
self,
- sym: Optional[str] = None,
+ sym: str | None = None,
- ) -> dict[str, Any]:
- '''Get symbol info for the exchange.
+ ) -> dict[str, Pair] | Pair:
+ '''
+ Fresh exchange-pairs info query for symbol ``sym: str``:
+ https://binance-docs.github.io/apidocs/spot/en/#exchange-information
'''
- # TODO: we can load from our self._pairs cache
- # on repeat calls...
+ cached_pair = self._pairs.get(sym)
+ if cached_pair:
+ return cached_pair
- # will retrieve all symbols by default
+ # retrieve all symbols by default
params = {}
-
if sym is not None:
sym = sym.lower()
params = {'symbol': sym}
- resp = await self._api(
- 'exchangeInfo',
- params=params,
- )
-
+ resp = await self._api('exchangeInfo', params=params)
entries = resp['symbols']
if not entries:
- raise SymbolNotFound(f'{sym} not found')
+ raise SymbolNotFound(f'{sym} not found:\n{resp}')
- syms = {item['symbol']: item for item in entries}
+ # pre-process .filters field into a table
+ pairs = {}
+ for item in entries:
+ symbol = item['symbol']
+ filters = {}
+ filters_ls: list = item.pop('filters')
+ for entry in filters_ls:
+ ftype = entry['filterType']
+ filters[ftype] = entry
+
+ pairs[symbol] = Pair(
+ filters=filters,
+ **item,
+ )
+
+ # pairs = {
+ # item['symbol']: Pair(**item) for item in entries
+ # }
+ self._pairs.update(pairs)
if sym is not None:
- return syms[sym]
+ return pairs[sym]
else:
- return syms
+ return self._pairs
- async def cache_symbols(
- self,
- ) -> dict:
- if not self._pairs:
- self._pairs = await self.symbol_info()
-
- return self._pairs
+ symbol_info = exch_info
async def search_symbols(
self,
@@ -224,7 +275,7 @@ class Client:
if self._pairs is not None:
data = self._pairs
else:
- data = await self.symbol_info()
+ data = await self.exch_info()
matches = fuzzy.extractBests(
pattern,
@@ -299,7 +350,8 @@ class Client:
@acm
async def get_client() -> Client:
client = Client()
- await client.cache_symbols()
+ log.info('Caching exchange infos..')
+ await client.exch_info()
yield client
@@ -318,67 +370,93 @@ class AggTrade(Struct):
M: bool # Ignore
-@trio_async_generator
async def stream_messages(
ws: NoBsWs,
) -> AsyncGenerator[NoBsWs, dict]:
- timeouts = 0
- while True:
+ # TODO: match syntax here!
+ msg: dict[str, Any]
+ async for msg in ws:
+ match msg:
+ # for l1 streams binance doesn't add an event type field so
+ # identify those messages by matching keys
+ # https://binance-docs.github.io/apidocs/spot/en/#individual-symbol-book-ticker-streams
+ case {
+ # NOTE: this is never an old value it seems, so
+ # they are always sending real L1 spread updates.
+ 'u': upid, # update id
+ 's': sym,
+ 'b': bid,
+ 'B': bsize,
+ 'a': ask,
+ 'A': asize,
+ }:
+ # TODO: it would be super nice to have a `L1` piker type
+ # which "renders" incremental tick updates from a packed
+ # msg-struct:
+ # - backend msgs after packed into the type such that we
+ # can reduce IPC usage but without each backend having
+ # to do that incremental update logic manually B)
+ # - would it maybe be more efficient to use this instead?
+ # https://binance-docs.github.io/apidocs/spot/en/#diff-depth-stream
+ l1 = L1(
+ update_id=upid,
+ sym=sym,
+ bid=bid,
+ bsize=bsize,
+ ask=ask,
+ asize=asize,
+ )
+ l1.typecast()
- with trio.move_on_after(3) as cs:
- msg = await ws.recv_msg()
+ # repack into piker's tick-quote format
+ yield 'l1', {
+ 'symbol': l1.sym,
+ 'ticks': [
+ {
+ 'type': 'bid',
+ 'price': l1.bid,
+ 'size': l1.bsize,
+ },
+ {
+ 'type': 'bsize',
+ 'price': l1.bid,
+ 'size': l1.bsize,
+ },
+ {
+ 'type': 'ask',
+ 'price': l1.ask,
+ 'size': l1.asize,
+ },
+ {
+ 'type': 'asize',
+ 'price': l1.ask,
+ 'size': l1.asize,
+ }
+ ]
+ }
- if cs.cancelled_caught:
-
- timeouts += 1
- if timeouts > 2:
- log.error("binance feed seems down and slow af? rebooting...")
- await ws._connect()
-
- continue
-
- # for l1 streams binance doesn't add an event type field so
- # identify those messages by matching keys
- # https://binance-docs.github.io/apidocs/spot/en/#individual-symbol-book-ticker-streams
-
- if msg.get('u'):
- sym = msg['s']
- bid = float(msg['b'])
- bsize = float(msg['B'])
- ask = float(msg['a'])
- asize = float(msg['A'])
-
- yield 'l1', {
- 'symbol': sym,
- 'ticks': [
- {'type': 'bid', 'price': bid, 'size': bsize},
- {'type': 'bsize', 'price': bid, 'size': bsize},
- {'type': 'ask', 'price': ask, 'size': asize},
- {'type': 'asize', 'price': ask, 'size': asize}
- ]
- }
-
- elif msg.get('e') == 'aggTrade':
-
- # NOTE: this is purely for a definition, ``msgspec.Struct``
- # does not runtime-validate until you decode/encode.
- # see: https://jcristharif.com/msgspec/structs.html#type-validation
- msg = AggTrade(**msg)
-
- # TODO: type out and require this quote format
- # from all backends!
- yield 'trade', {
- 'symbol': msg.s,
- 'last': msg.p,
- 'brokerd_ts': time.time(),
- 'ticks': [{
- 'type': 'trade',
- 'price': float(msg.p),
- 'size': float(msg.q),
- 'broker_ts': msg.T,
- }],
- }
+ # https://binance-docs.github.io/apidocs/spot/en/#aggregate-trade-streams
+ case {
+ 'e': 'aggTrade',
+ }:
+ # NOTE: this is purely for a definition,
+ # ``msgspec.Struct`` does not runtime-validate until you
+ # decode/encode, see:
+ # https://jcristharif.com/msgspec/structs.html#type-validation
+ msg = AggTrade(**msg)
+ msg.typecast()
+ yield 'trade', {
+ 'symbol': msg.s,
+ 'last': msg.p,
+ 'brokerd_ts': time.time(),
+ 'ticks': [{
+ 'type': 'trade',
+ 'price': msg.p,
+ 'size': msg.q,
+ 'broker_ts': msg.T,
+ }],
+ }
def make_sub(pairs: list[str], sub_name: str, uid: int) -> dict[str, str]:
@@ -398,10 +476,12 @@ def make_sub(pairs: list[str], sub_name: str, uid: int) -> dict[str, str]:
@acm
async def open_history_client(
- symbol: str,
+ mkt: MktPair,
) -> tuple[Callable, int]:
+ symbol: str = mkt.bs_fqme
+
# TODO implement history getter for the new storage layer.
async with open_cached_client('binance') as client:
@@ -439,6 +519,35 @@ async def open_history_client(
yield get_ohlc, {'erlangs': 3, 'rate': 3}
+@async_lifo_cache()
+async def get_mkt_info(
+ fqme: str,
+
+) -> tuple[MktPair, Pair]:
+
+ async with open_cached_client('binance') as client:
+
+ pair: Pair = await client.exch_info(fqme.upper())
+ mkt = MktPair(
+ dst=Asset(
+ name=pair.baseAsset,
+ atype='crypto',
+ tx_tick=digits_to_dec(pair.baseAssetPrecision),
+ ),
+ src=Asset(
+ name=pair.quoteAsset,
+ atype='crypto',
+ tx_tick=digits_to_dec(pair.quoteAssetPrecision),
+ ),
+ price_tick=pair.price_tick,
+ size_tick=pair.size_tick,
+ bs_mktid=pair.symbol,
+ broker='binance',
+ )
+ both = mkt, pair
+ return both
+
+
async def stream_quotes(
send_chan: trio.abc.SendChannel,
@@ -453,67 +562,43 @@ async def stream_quotes(
# XXX: required to propagate ``tractor`` loglevel to piker logging
get_console_log(loglevel or tractor.current_actor().loglevel)
- sym_infos = {}
- uid = 0
-
async with (
- open_cached_client('binance') as client,
send_chan as send_chan,
):
-
- # keep client cached for real-time section
- cache = await client.cache_symbols()
-
+ init_msgs: list[FeedInit] = []
for sym in symbols:
- d = cache[sym.upper()]
- syminfo = Pair(**d) # validation
+ mkt, pair = await get_mkt_info(sym)
- si = sym_infos[sym] = syminfo.to_dict()
- filters = {}
- for entry in syminfo.filters:
- ftype = entry['filterType']
- filters[ftype] = entry
-
- # XXX: after manually inspecting the response format we
- # just directly pick out the info we need
- si['price_tick_size'] = float(
- filters['PRICE_FILTER']['tickSize']
+ # build out init msgs according to latest spec
+ init_msgs.append(
+ FeedInit(mkt_info=mkt)
)
- si['lot_tick_size'] = float(
- filters['LOT_SIZE']['stepSize']
- )
- si['asset_type'] = 'crypto'
- symbol = symbols[0]
-
- init_msgs = {
- # pass back token, and bool, signalling if we're the writer
- # and that history has been written
- symbol: {
- 'symbol_info': sym_infos[sym],
- 'shm_write_opts': {'sum_tick_vml': False},
- 'fqsn': sym,
- },
- }
+ iter_subids = itertools.count()
@acm
- async def subscribe(ws: wsproto.WSConnection):
+ async def subscribe(ws: NoBsWs):
# setup subs
+ subid: int = next(iter_subids)
+
# trade data (aka L1)
# https://binance-docs.github.io/apidocs/spot/en/#symbol-order-book-ticker
- l1_sub = make_sub(symbols, 'bookTicker', uid)
+ l1_sub = make_sub(symbols, 'bookTicker', subid)
await ws.send_msg(l1_sub)
# aggregate (each order clear by taker **not** by maker)
# trades data:
# https://binance-docs.github.io/apidocs/spot/en/#aggregate-trade-streams
- agg_trades_sub = make_sub(symbols, 'aggTrade', uid)
+ agg_trades_sub = make_sub(symbols, 'aggTrade', subid)
await ws.send_msg(agg_trades_sub)
- # ack from ws server
+ # might get ack from ws server, or maybe some
+ # other msg still in transit..
res = await ws.recv_msg()
- assert res['id'] == uid
+ subid: str | None = res.get('id')
+ if subid:
+ assert res['id'] == subid
yield
@@ -527,7 +612,7 @@ async def stream_quotes(
await ws.send_msg({
"method": "UNSUBSCRIBE",
"params": subs,
- "id": uid,
+ "id": subid,
})
# XXX: do we need to ack the unsub?
@@ -543,7 +628,7 @@ async def stream_quotes(
) as ws,
# avoid stream-gen closure from breaking trio..
- stream_messages(ws) as msg_gen,
+ aclosing(stream_messages(ws)) as msg_gen,
):
typ, quote = await anext(msg_gen)
@@ -579,13 +664,13 @@ async def open_symbol_search(
async with open_cached_client('binance') as client:
# load all symbols locally for fast search
- cache = await client.cache_symbols()
+ cache = await client.exch_info()
await ctx.started()
async with ctx.open_stream() as stream:
async for pattern in stream:
- # results = await client.symbol_info(sym=pattern.upper())
+ # results = await client.exch_info(sym=pattern.upper())
matches = fuzzy.extractBests(
pattern,
@@ -593,7 +678,7 @@ async def open_symbol_search(
score_cutoff=50,
)
# repack in dict form
- await stream.send(
- {item[0]['symbol']: item[0]
- for item in matches}
- )
+ await stream.send({
+ item[0].symbol: item[0]
+ for item in matches
+ })
diff --git a/piker/brokers/cli.py b/piker/brokers/cli.py
index f86c679e..1bfb05d6 100644
--- a/piker/brokers/cli.py
+++ b/piker/brokers/cli.py
@@ -28,7 +28,13 @@ import tractor
from ..cli import cli
from .. import watchlists as wl
-from ..log import get_console_log, colorize_json, get_logger
+from ..log import (
+ colorize_json,
+)
+from ._util import (
+ log,
+ get_console_log,
+)
from ..service import (
maybe_spawn_brokerd,
maybe_open_pikerd,
@@ -38,9 +44,7 @@ from ..brokers import (
get_brokermod,
data,
)
-
-log = get_logger('cli')
-DEFAULT_BROKER = 'questrade'
+DEFAULT_BROKER = 'binance'
_config_dir = click.get_app_dir('piker')
_watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
diff --git a/piker/brokers/core.py b/piker/brokers/core.py
index 3e9e1614..b3651c1d 100644
--- a/piker/brokers/core.py
+++ b/piker/brokers/core.py
@@ -26,15 +26,12 @@ from typing import List, Dict, Any, Optional
import trio
-from ..log import get_logger
+from ._util import log
from . import get_brokermod
from ..service import maybe_spawn_brokerd
from .._cacheables import open_cached_client
-log = get_logger(__name__)
-
-
async def api(brokername: str, methname: str, **kwargs) -> dict:
"""Make (proxy through) a broker API call by name and return its result.
"""
diff --git a/piker/brokers/data.py b/piker/brokers/data.py
index 5183d2c4..6d178b51 100644
--- a/piker/brokers/data.py
+++ b/piker/brokers/data.py
@@ -41,13 +41,13 @@ import tractor
from tractor.experimental import msgpub
from async_generator import asynccontextmanager
-from ..log import get_logger, get_console_log
+from ._util import (
+ log,
+ get_console_log,
+)
from . import get_brokermod
-log = get_logger(__name__)
-
-
async def wait_for_network(
net_func: Callable,
sleep: int = 1
diff --git a/piker/brokers/deribit/feed.py b/piker/brokers/deribit/feed.py
index deb0422f..a9420402 100644
--- a/piker/brokers/deribit/feed.py
+++ b/piker/brokers/deribit/feed.py
@@ -62,9 +62,10 @@ log = get_logger(__name__)
@acm
async def open_history_client(
- instrument: str,
+ mkt: MktPair,
) -> tuple[Callable, int]:
+ fnstrument: str = mkt.bs_fqme
# TODO implement history getter for the new storage layer.
async with open_cached_client('deribit') as client:
diff --git a/piker/brokers/ib/README.rst b/piker/brokers/ib/README.rst
index c8661317..d56b52ca 100644
--- a/piker/brokers/ib/README.rst
+++ b/piker/brokers/ib/README.rst
@@ -127,7 +127,7 @@ your ``pps.toml`` file will have position entries like,
[ib.algopaper."mnq.globex.20221216"]
size = -1.0
ppu = 12423.630576923071
- bsuid = 515416577
+ bs_mktid = 515416577
expiry = "2022-12-16T00:00:00+00:00"
clears = [
{ dt = "2022-08-31T18:54:46+00:00", ppu = 12423.630576923071, accum_size = -19.0, price = 12372.75, size = 1.0, cost = 0.57, tid = "0000e1a7.630f5e5a.01.01" },
diff --git a/piker/brokers/ib/__init__.py b/piker/brokers/ib/__init__.py
index 48024dc8..80bc228f 100644
--- a/piker/brokers/ib/__init__.py
+++ b/piker/brokers/ib/__init__.py
@@ -35,7 +35,6 @@ from .feed import (
)
from .broker import (
trades_dialogue,
- norm_trade_records,
)
__all__ = [
@@ -46,14 +45,23 @@ __all__ = [
'stream_quotes',
]
-
-# tractor RPC enable arg
-__enable_modules__: list[str] = [
+_brokerd_mods: list[str] = [
'api',
- 'feed',
'broker',
]
+_datad_mods: list[str] = [
+ 'feed',
+]
+
+
+# tractor RPC enable arg
+__enable_modules__: list[str] = (
+ _brokerd_mods
+ +
+ _datad_mods
+)
+
# passed to ``tractor.ActorNursery.start_actor()``
_spawn_kwargs = {
'infect_asyncio': True,
diff --git a/piker/brokers/ib/_flex_reports.py b/piker/brokers/ib/_flex_reports.py
new file mode 100644
index 00000000..2f34d037
--- /dev/null
+++ b/piker/brokers/ib/_flex_reports.py
@@ -0,0 +1,191 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+"""
+"FLEX" report processing utils.
+
+"""
+from bidict import bidict
+import pendulum
+from pprint import pformat
+from typing import Any
+
+from .api import (
+ get_config,
+ log,
+)
+from piker.accounting import (
+ open_trade_ledger,
+)
+
+
+def parse_flex_dt(
+ record: str,
+) -> pendulum.datetime:
+ '''
+ Parse stupid flex record datetime stamps for the `dateTime` field..
+
+ '''
+ date, ts = record.split(';')
+ dt = pendulum.parse(date)
+ ts = f'{ts[:2]}:{ts[2:4]}:{ts[4:]}'
+ tsdt = pendulum.parse(ts)
+ return dt.set(hour=tsdt.hour, minute=tsdt.minute, second=tsdt.second)
+
+
+def flex_records_to_ledger_entries(
+ accounts: bidict,
+ trade_entries: list[object],
+
+) -> dict:
+ '''
+ Convert flex report entry objects into ``dict`` form, pretty much
+ straight up without modification except add a `pydatetime` field
+ from the parsed timestamp.
+
+ '''
+ trades_by_account = {}
+ for t in trade_entries:
+ entry = t.__dict__
+
+ # XXX: LOL apparently ``toml`` has a bug
+ # where a section key error will show up in the write
+ # if you leave a table key as an `int`? So i guess
+ # cast to strs for all keys..
+
+ # oddly for some so-called "BookTrade" entries
+ # this field seems to be blank, no cuckin clue.
+ # trade['ibExecID']
+ tid = str(entry.get('ibExecID') or entry['tradeID'])
+ # date = str(entry['tradeDate'])
+
+ # XXX: is it going to cause problems if a account name
+ # get's lost? The user should be able to find it based
+ # on the actual exec history right?
+ acctid = accounts[str(entry['accountId'])]
+
+ # probably a flex record with a wonky non-std timestamp..
+ dt = entry['pydatetime'] = parse_flex_dt(entry['dateTime'])
+ entry['datetime'] = str(dt)
+
+ if not tid:
+ # this is likely some kind of internal adjustment
+ # transaction, likely one of the following:
+ # - an expiry event that will show a "book trade" indicating
+ # some adjustment to cash balances: zeroing or itm settle.
+ # - a manual cash balance position adjustment likely done by
+ # the user from the accounts window in TWS where they can
+ # manually set the avg price and size:
+ # https://api.ibkr.com/lib/cstools/faq/web1/index.html#/tag/DTWS_ADJ_AVG_COST
+ log.warning(f'Skipping ID-less ledger entry:\n{pformat(entry)}')
+ continue
+
+ trades_by_account.setdefault(
+ acctid, {}
+ )[tid] = entry
+
+ for acctid in trades_by_account:
+ trades_by_account[acctid] = dict(sorted(
+ trades_by_account[acctid].items(),
+ key=lambda entry: entry[1]['pydatetime'],
+ ))
+
+ return trades_by_account
+
+
+def load_flex_trades(
+ path: str | None = None,
+
+) -> dict[str, Any]:
+
+ from ib_insync import flexreport, util
+
+ conf = get_config()
+
+ if not path:
+ # load ``brokers.toml`` and try to get the flex
+ # token and query id that must be previously defined
+ # by the user.
+ token = conf.get('flex_token')
+ if not token:
+ raise ValueError(
+ 'You must specify a ``flex_token`` field in your'
+ '`brokers.toml` in order load your trade log, see our'
+ 'intructions for how to set this up here:\n'
+ 'PUT LINK HERE!'
+ )
+
+ qid = conf['flex_trades_query_id']
+
+ # TODO: hack this into our logging
+ # system like we do with the API client..
+ util.logToConsole()
+
+ # TODO: rewrite the query part of this with async..httpx?
+ report = flexreport.FlexReport(
+ token=token,
+ queryId=qid,
+ )
+
+ else:
+ # XXX: another project we could potentially look at,
+ # https://pypi.org/project/ibflex/
+ report = flexreport.FlexReport(path=path)
+
+ trade_entries = report.extract('Trade')
+ ln = len(trade_entries)
+ log.info(f'Loaded {ln} trades from flex query')
+
+ trades_by_account = flex_records_to_ledger_entries(
+ conf['accounts'].inverse, # reverse map to user account names
+ trade_entries,
+ )
+
+ ledger_dict: dict | None = None
+
+ for acctid in trades_by_account:
+ trades_by_id = trades_by_account[acctid]
+
+ with open_trade_ledger('ib', acctid) as ledger_dict:
+ tid_delta = set(trades_by_id) - set(ledger_dict)
+ log.info(
+ 'New trades detected\n'
+ f'{pformat(tid_delta)}'
+ )
+ if tid_delta:
+ sorted_delta = dict(sorted(
+ {tid: trades_by_id[tid] for tid in tid_delta}.items(),
+ key=lambda entry: entry[1].pop('pydatetime'),
+ ))
+ ledger_dict.update(sorted_delta)
+
+ return ledger_dict
+
+
+if __name__ == '__main__':
+ import sys
+ import os
+
+ args = sys.argv
+ if len(args) > 1:
+ args = args[1:]
+ for arg in args:
+ path = os.path.abspath(arg)
+ load_flex_trades(path=path)
+ else:
+ # expect brokers.toml to have an entry and
+ # pull from the web service.
+ load_flex_trades()
diff --git a/piker/brokers/ib/_util.py b/piker/brokers/ib/_util.py
index 14fd4d0b..585ea18d 100644
--- a/piker/brokers/ib/_util.py
+++ b/piker/brokers/ib/_util.py
@@ -19,14 +19,23 @@
runnable script-programs.
'''
-from typing import Literal
+from __future__ import annotations
+from functools import partial
+from typing import (
+ Literal,
+ TYPE_CHECKING,
+)
import subprocess
import tractor
-from piker.log import get_logger
+from .._util import log
-log = get_logger(__name__)
+if TYPE_CHECKING:
+ from .api import (
+ MethodProxy,
+ ib_Client
+ )
_reset_tech: Literal[
@@ -41,7 +50,8 @@ _reset_tech: Literal[
async def data_reset_hack(
- reset_type: str = 'data',
+ vnc_host: str,
+ reset_type: Literal['data', 'connection'],
) -> None:
'''
@@ -71,18 +81,40 @@ async def data_reset_hack(
that need to be wrangle.
'''
+
+ no_setup_msg:str = (
+ 'No data reset hack test setup for {vnc_host}!\n'
+ 'See setup @\n'
+ 'https://github.com/pikers/piker/tree/master/piker/brokers/ib'
+ )
global _reset_tech
match _reset_tech:
case 'vnc':
try:
- await tractor.to_asyncio.run_task(vnc_click_hack)
+ await tractor.to_asyncio.run_task(
+ partial(
+ vnc_click_hack,
+ host=vnc_host,
+ )
+ )
except OSError:
- _reset_tech = 'i3ipc_xdotool'
+ if vnc_host != 'localhost':
+ log.warning(no_setup_msg)
+ return False
+
+ try:
+ import i3ipc
+ except ModuleNotFoundError:
+ log.warning(no_setup_msg)
+ return False
+
try:
i3ipc_xdotool_manual_click_hack()
+ _reset_tech = 'i3ipc_xdotool'
return True
except OSError:
+ log.exception(no_setup_msg)
return False
case 'i3ipc_xdotool':
@@ -96,19 +128,32 @@ async def data_reset_hack(
async def vnc_click_hack(
+ host: str = 'localhost',
reset_type: str = 'data'
) -> None:
'''
- Reset the data or netowork connection for the VNC attached
+ Reset the data or network connection for the VNC attached
ib gateway using magic combos.
'''
- key = {'data': 'f', 'connection': 'r'}[reset_type]
+ try:
+ import asyncvnc
+ except ModuleNotFoundError:
+ log.warning(
+ "In order to leverage `piker`'s built-in data reset hacks, install "
+ "the `asyncvnc` project: https://github.com/barneygale/asyncvnc"
+ )
+ return
- import asyncvnc
+ # two different hot keys which trigger diff types of reset
+ # requests B)
+ key = {
+ 'data': 'f',
+ 'connection': 'r'
+ }[reset_type]
async with asyncvnc.connect(
- 'localhost',
+ host,
port=3003,
# password='ibcansmbz',
) as client:
@@ -124,9 +169,11 @@ async def vnc_click_hack(
def i3ipc_xdotool_manual_click_hack() -> None:
- import i3ipc
-
i3 = i3ipc.Connection()
+
+ # TODO: might be worth offering some kinda api for grabbing
+ # the window id from the pid?
+ # https://stackoverflow.com/a/2250879
t = i3.get_tree()
orig_win_id = t.find_focused().window
diff --git a/piker/brokers/ib/api.py b/piker/brokers/ib/api.py
index bfa66a9d..8636ddd2 100644
--- a/piker/brokers/ib/api.py
+++ b/piker/brokers/ib/api.py
@@ -14,21 +14,27 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-"""
-``ib`` core API client machinery; mostly sane wrapping around
-``ib_insync``.
+'''
+Core API client machinery; mostly sane/useful wrapping around `ib_insync`..
-"""
+'''
from __future__ import annotations
-from contextlib import asynccontextmanager as acm
+from contextlib import (
+ asynccontextmanager as acm,
+ contextmanager as cm,
+)
from contextlib import AsyncExitStack
from dataclasses import asdict, astuple
from datetime import datetime
-from functools import partial
+from functools import (
+ partial,
+ # lru_cache,
+)
import itertools
from math import isnan
from typing import (
Any,
+ Callable,
Optional,
Union,
)
@@ -44,6 +50,7 @@ import trio
import tractor
from tractor import to_asyncio
import pendulum
+from eventkit import Event
import ib_insync as ibis
from ib_insync.contract import (
Contract,
@@ -67,13 +74,13 @@ from ib_insync.client import Client as ib_Client
import numpy as np
from piker import config
-from piker.log import get_logger
+from piker.brokers._util import (
+ log,
+ get_logger,
+)
from piker.data._source import base_ohlc_dtype
-log = get_logger(__name__)
-
-
_time_units = {
's': ' sec',
'm': ' mins',
@@ -130,11 +137,13 @@ class NonShittyWrapper(Wrapper):
class NonShittyIB(ibis.IB):
- """The beginning of overriding quite a few decisions in this lib.
+ '''
+ The beginning of overriding quite a few decisions in this lib.
- Don't use datetimes
- Don't use named tuples
- """
+
+ '''
def __init__(self):
# override `ib_insync` internal loggers so we can see wtf
@@ -172,6 +181,8 @@ _adhoc_cmdty_set = {
'xagusd.cmdty', # silver spot
}
+# NOTE: if you aren't seeing one of these symbol's futues contracts
+# show up, it's likely the `.` part is wrong!
_adhoc_futes_set = {
# equities
@@ -183,6 +194,7 @@ _adhoc_futes_set = {
# cypto$
'brr.cme',
+ 'mbt.cme', # micro
'ethusdrr.cme',
# agriculture
@@ -197,7 +209,7 @@ _adhoc_futes_set = {
'mgc.comex', # micro
# oil & gas
- 'cl.comex',
+ 'cl.nymex',
'ni.comex', # silver futes
'qi.comex', # mini-silver futes
@@ -311,6 +323,22 @@ _samplings: dict[int, tuple[str, str]] = {
}
+@cm
+def remove_handler_on_err(
+ event: Event,
+ handler: Callable,
+) -> None:
+ try:
+ yield
+ except trio.BrokenResourceError:
+ # XXX: eventkit's ``Event.emit()`` for whatever redic
+ # reason will catch and ignore regular exceptions
+ # resulting in tracebacks spammed to console..
+ # Manually do the dereg ourselves.
+ log.exception(f'Disconnected from {event} updates')
+ event.disconnect(handler)
+
+
class Client:
'''
IB wrapped for our broker backend API.
@@ -330,7 +358,7 @@ class Client:
self.ib.RaiseRequestErrors = True
# contract cache
- self._feeds: dict[str, trio.abc.SendChannel] = {}
+ self._cons: dict[str, Contract] = {}
# NOTE: the ib.client here is "throttled" to 45 rps by default
@@ -359,7 +387,7 @@ class Client:
async def bars(
self,
- fqsn: str,
+ fqme: str,
# EST in ISO 8601 format is required... below is EPOCH
start_dt: Union[datetime, str] = "1970-01-01T00:00:00.000000-05:00",
@@ -376,7 +404,7 @@ class Client:
) -> tuple[BarDataList, np.ndarray, pendulum.Duration]:
'''
- Retreive OHLCV bars for a fqsn over a range to the present.
+ Retreive OHLCV bars for a fqme over a range to the present.
'''
# See API docs here:
@@ -386,8 +414,7 @@ class Client:
bar_size, duration, dt_duration = _samplings[sample_period_s]
global _enters
- # log.info(f'REQUESTING BARS {_enters} @ end={end_dt}')
- print(
+ log.info(
f"REQUESTING {duration}'s worth {bar_size} BARS\n"
f'{_enters} @ end={end_dt}"'
)
@@ -397,7 +424,7 @@ class Client:
_enters += 1
- contract = (await self.find_contracts(fqsn))[0]
+ contract = (await self.find_contracts(fqme))[0]
bars_kwargs.update(getattr(contract, 'bars_kwargs', {}))
bars = await self.ib.reqHistoricalDataAsync(
@@ -473,7 +500,7 @@ class Client:
# nested dataclass we probably don't need and that won't
# IPC serialize..
d.secIdList = ''
- key, calc_price = con2fqsn(d.contract)
+ key, calc_price = con2fqme(d.contract)
details[key] = d
return details
@@ -614,15 +641,22 @@ class Client:
return con
+ # TODO: make this work with our `MethodProxy`..
+ # @lru_cache(maxsize=None)
async def get_con(
self,
conid: int,
) -> Contract:
- return await self.ib.qualifyContractsAsync(
- ibis.Contract(conId=conid)
- )
+ try:
+ return self._cons[conid]
+ except KeyError:
+ con: Contract = await self.ib.qualifyContractsAsync(
+ ibis.Contract(conId=conid)
+ )
+ self._cons[conid] = con
+ return con
- def parse_patt2fqsn(
+ def parse_patt2fqme(
self,
pattern: str,
@@ -641,11 +675,11 @@ class Client:
currency = ''
- # fqsn parsing stage
+ # fqme parsing stage
# ------------------
if '.ib' in pattern:
- from ..data._source import unpack_fqsn
- _, symbol, expiry = unpack_fqsn(pattern)
+ from piker.accounting import unpack_fqme
+ _, symbol, venue, expiry = unpack_fqme(pattern)
else:
symbol = pattern
@@ -687,7 +721,7 @@ class Client:
) -> Contract:
if pattern is not None:
- symbol, currency, exch, expiry = self.parse_patt2fqsn(
+ symbol, currency, exch, expiry = self.parse_patt2fqme(
pattern,
)
sectype = ''
@@ -722,7 +756,7 @@ class Client:
)
elif (
- exch in ('IDEALPRO')
+ exch in {'IDEALPRO'}
or sectype == 'CASH'
):
# if '/' in symbol:
@@ -806,14 +840,14 @@ class Client:
async def get_head_time(
self,
- fqsn: str,
+ fqme: str,
) -> datetime:
'''
Return the first datetime stamp for ``contract``.
'''
- contract = (await self.find_contracts(fqsn))[0]
+ contract = (await self.find_contracts(fqme))[0]
return await self.ib.reqHeadTimeStampAsync(
contract,
whatToShow='TRADES',
@@ -825,29 +859,34 @@ class Client:
self,
symbol: str,
- ) -> tuple[Contract, Ticker, ContractDetails]:
+ ) -> tuple[
+ Contract,
+ ContractDetails,
+ ]:
+ '''
+ Get summary (meta) data for a given symbol str including
+ ``Contract`` and its details and a (first snapshot of the)
+ ``Ticker``.
+ '''
contract = (await self.find_contracts(symbol))[0]
+ details_fute = self.ib.reqContractDetailsAsync(contract)
+ details = (await details_fute)[0]
+ return contract, details
+
+ async def get_quote(
+ self,
+ contract: Contract,
+
+ ) -> Ticker:
+ '''
+ Return a single (snap) quote for symbol.
+
+ '''
ticker: Ticker = self.ib.reqMktData(
contract,
snapshot=True,
)
- details_fute = self.ib.reqContractDetailsAsync(contract)
- details = (await details_fute)[0]
-
- return contract, ticker, details
-
- async def get_quote(
- self,
- symbol: str,
-
- ) -> tuple[Contract, Ticker, ContractDetails]:
- '''
- Return a single quote for symbol.
-
- '''
- contract, ticker, details = await self.get_sym_details(symbol)
-
ready = ticker.updateEvent
# ensure a last price gets filled in before we deliver quote
@@ -864,21 +903,22 @@ class Client:
else:
if not warnset:
log.warning(
- f'Quote for {symbol} timed out: market is closed?'
+ f'Quote for {contract} timed out: market is closed?'
)
warnset = True
else:
- log.info(f'Got first quote for {symbol}')
+ log.info(f'Got first quote for {contract}')
break
else:
if not warnset:
log.warning(
- f'Symbol {symbol} is not returning a quote '
- 'it may be outside trading hours?')
+ f'Contract {contract} is not returning a quote '
+ 'it may be outside trading hours?'
+ )
warnset = True
- return contract, ticker, details
+ return ticker
# async to be consistent for the client proxy, and cuz why not.
def submit_limit(
@@ -1008,6 +1048,21 @@ class Client:
self.ib.errorEvent.connect(push_err)
+ api_err = self.ib.client.apiError
+
+ def report_api_err(msg: str) -> None:
+ with remove_handler_on_err(
+ api_err,
+ report_api_err,
+ ):
+ to_trio.send_nowait((
+ 'error',
+ msg,
+ ))
+ api_err.clear() # drop msg history
+
+ api_err.connect(report_api_err)
+
def positions(
self,
account: str = '',
@@ -1019,13 +1074,13 @@ class Client:
return self.ib.positions(account=account)
-def con2fqsn(
+def con2fqme(
con: Contract,
_cache: dict[int, (str, bool)] = {}
) -> tuple[str, bool]:
'''
- Convert contracts to fqsn-style strings to be used both in symbol-search
+ Convert contracts to fqme-style strings to be used both in symbol-search
matching and as feed tokens passed to the front end data deed layer.
Previously seen contracts are cached by id.
@@ -1085,12 +1140,12 @@ def con2fqsn(
if expiry:
suffix += f'.{expiry}'
- fqsn_key = symbol.lower()
+ fqme_key = symbol.lower()
if suffix:
- fqsn_key = '.'.join((fqsn_key, suffix)).lower()
+ fqme_key = '.'.join((fqme_key, suffix)).lower()
- _cache[con.conId] = fqsn_key, calc_price
- return fqsn_key, calc_price
+ _cache[con.conId] = fqme_key, calc_price
+ return fqme_key, calc_price
# per-actor API ep caching
@@ -1137,7 +1192,7 @@ async def load_aio_clients(
# the API TCP in `ib_insync` connection can be flaky af so instead
# retry a few times to get the client going..
connect_retries: int = 3,
- connect_timeout: float = 0.5,
+ connect_timeout: float = 1,
disconnect_on_exit: bool = True,
) -> dict[str, Client]:
@@ -1191,9 +1246,14 @@ async def load_aio_clients(
for host, port in combos:
sockaddr = (host, port)
+
+ maybe_client = _client_cache.get(sockaddr)
if (
- sockaddr in _client_cache
- or sockaddr in _scan_ignore
+ sockaddr in _scan_ignore
+ or (
+ maybe_client
+ and maybe_client.ib.isConnected()
+ )
):
continue
@@ -1204,9 +1264,9 @@ async def load_aio_clients(
await ib.connectAsync(
host,
port,
- clientId=client_id,
+ clientId=client_id + i,
- # this timeout is sensative on windows and will
+ # this timeout is sensitive on windows and will
# fail without a good "timeout error" so be
# careful.
timeout=connect_timeout,
@@ -1230,15 +1290,10 @@ async def load_aio_clients(
OSError,
) as ce:
_err = ce
-
- if i > 8:
- # cache logic to avoid rescanning if we already have all
- # clients loaded.
- _scan_ignore.add(sockaddr)
- raise
-
log.warning(
- f'Failed to connect on {port} for {i} time, retrying...')
+ f'Failed to connect on {port} for {i} time with,\n'
+ f'{ib.client.apiError.value()}\n'
+ 'retrying with a new client id..')
# Pre-collect all accounts available for this
# connection and map account names to this client
@@ -1299,19 +1354,13 @@ async def load_clients_for_trio(
a ``tractor.to_asyncio.open_channel_from()``.
'''
- global _accounts2clients
+ async with load_aio_clients() as accts2clients:
- if _accounts2clients:
- to_trio.send_nowait(_accounts2clients)
+ to_trio.send_nowait(accts2clients)
+
+ # TODO: maybe a sync event to wait on instead?
await asyncio.sleep(float('inf'))
- else:
- async with load_aio_clients() as accts2clients:
- to_trio.send_nowait(accts2clients)
-
- # TODO: maybe a sync event to wait on instead?
- await asyncio.sleep(float('inf'))
-
@acm
async def open_client_proxies() -> tuple[
@@ -1400,6 +1449,14 @@ class MethodProxy:
while not chan.closed():
# send through method + ``kwargs: dict`` as pair
msg = await chan.receive()
+
+ # TODO: implement reconnect functionality like
+ # in our `.data._web_bs.NoBsWs`
+ # try:
+ # msg = await chan.receive()
+ # except ConnectionError:
+ # self.reset()
+
# print(f'NEXT MSG: {msg}')
# TODO: py3.10 ``match:`` syntax B)
@@ -1451,6 +1508,7 @@ async def open_aio_client_method_relay(
) -> None:
+ # sync with `open_client_proxy()` caller
to_trio.send_nowait(client)
# TODO: separate channel for error handling?
@@ -1460,25 +1518,34 @@ async def open_aio_client_method_relay(
# back results
while not to_trio._closed:
msg = await from_trio.get()
- if msg is None:
- print('asyncio PROXY-RELAY SHUTDOWN')
- break
- meth_name, kwargs = msg
- meth = getattr(client, meth_name)
+ match msg:
+ case None: # termination sentinel
+ print('asyncio PROXY-RELAY SHUTDOWN')
+ break
- try:
- resp = await meth(**kwargs)
- # echo the msg back
- to_trio.send_nowait({'result': resp})
+ case (meth_name, kwargs):
+ meth_name, kwargs = msg
+ meth = getattr(client, meth_name)
- except (
- RequestError,
+ try:
+ resp = await meth(**kwargs)
+ # echo the msg back
+ to_trio.send_nowait({'result': resp})
- # TODO: relay all errors to trio?
- # BaseException,
- ) as err:
- to_trio.send_nowait({'exception': err})
+ except (
+ RequestError,
+
+ # TODO: relay all errors to trio?
+ # BaseException,
+ ) as err:
+ to_trio.send_nowait({'exception': err})
+
+ case {'error': content}:
+ to_trio.send_nowait({'exception': content})
+
+ case _:
+ raise ValueError(f'Unhandled msg {msg}')
@acm
@@ -1509,7 +1576,8 @@ async def open_client_proxy(
# mock all remote methods on ib ``Client``.
for name, method in inspect.getmembers(
- Client, predicate=inspect.isfunction
+ Client,
+ predicate=inspect.isfunction,
):
if '_' == name[0]:
continue
diff --git a/piker/brokers/ib/broker.py b/piker/brokers/ib/broker.py
index 56756a76..73477c0a 100644
--- a/piker/brokers/ib/broker.py
+++ b/piker/brokers/ib/broker.py
@@ -13,6 +13,7 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+
"""
Order and trades endpoints for use with ``piker``'s EMS.
@@ -21,6 +22,7 @@ from __future__ import annotations
from bisect import insort
from contextlib import ExitStack
from dataclasses import asdict
+from decimal import Decimal
from functools import partial
from pprint import pformat
import time
@@ -37,6 +39,7 @@ from trio_typing import TaskStatus
import tractor
from ib_insync.contract import (
Contract,
+ Option,
)
from ib_insync.order import (
Trade,
@@ -51,14 +54,17 @@ from ib_insync.objects import Position as IbPosition
import pendulum
from piker import config
-from piker.pp import (
+from piker.accounting import (
+ dec_digits,
+ digits_to_dec,
Position,
Transaction,
open_trade_ledger,
+ iter_by_dt,
open_pps,
PpTable,
)
-from piker.log import get_console_log
+from .._util import get_console_log
from piker.clearing._messages import (
Order,
Status,
@@ -70,36 +76,39 @@ from piker.clearing._messages import (
BrokerdFill,
BrokerdError,
)
-from piker.data._source import (
- Symbol,
- float_digits,
+from piker.accounting import (
+ MktPair,
)
from .api import (
_accounts2clients,
- con2fqsn,
+ con2fqme,
log,
get_config,
open_client_proxies,
Client,
MethodProxy,
)
+from ._flex_reports import parse_flex_dt
def pack_position(
pos: IbPosition
-) -> dict[str, Any]:
+) -> tuple[
+ str,
+ dict[str, Any]
+]:
con = pos.contract
- fqsn, calc_price = con2fqsn(con)
+ fqme, calc_price = con2fqme(con)
# TODO: options contracts into a sane format..
return (
- con.conId,
+ str(con.conId),
BrokerdPosition(
broker='ib',
account=pos.account,
- symbol=fqsn,
+ symbol=fqme,
currency=con.currency,
size=float(pos.position),
avg_price=float(pos.avgCost) / float(con.multiplier or 1.0),
@@ -281,18 +290,21 @@ async def recv_trade_updates(
async def update_ledger_from_api_trades(
trade_entries: list[dict[str, Any]],
client: Union[Client, MethodProxy],
+ accounts_def_inv: bidict[str, str],
) -> tuple[
dict[str, Transaction],
dict[str, dict],
]:
-
# XXX; ERRGGG..
# pack in the "primary/listing exchange" value from a
# contract lookup since it seems this isn't available by
# default from the `.fills()` method endpoint...
for entry in trade_entries:
condict = entry['contract']
+ # print(
+ # f"{condict['symbol']}: GETTING CONTRACT INFO!\n"
+ # )
conid = condict['conId']
pexch = condict['primaryExchange']
@@ -310,9 +322,8 @@ async def update_ledger_from_api_trades(
# pack in the ``Contract.secType``
entry['asset_type'] = condict['secType']
- conf = get_config()
entries = api_trades_to_ledger_entries(
- conf['accounts'].inverse,
+ accounts_def_inv,
trade_entries,
)
# normalize recent session's trades to the `Transaction` type
@@ -334,15 +345,17 @@ async def update_and_audit_msgs(
) -> list[BrokerdPosition]:
msgs: list[BrokerdPosition] = []
+ p: Position
for p in pps:
- bsuid = p.bsuid
+ bs_mktid = p.bs_mktid
# retreive equivalent ib reported position message
# for comparison/audit versus the piker equivalent
# breakeven pp calcs.
- ibppmsg = cids2pps.get((acctid, bsuid))
+ ibppmsg = cids2pps.get((acctid, bs_mktid))
if ibppmsg:
+ symbol = ibppmsg.symbol
msg = BrokerdPosition(
broker='ib',
@@ -353,13 +366,16 @@ async def update_and_audit_msgs(
# table..
account=ibppmsg.account,
# XXX: the `.ib` is stripped..?
- symbol=ibppmsg.symbol,
+ symbol=symbol,
currency=ibppmsg.currency,
size=p.size,
avg_price=p.ppu,
)
msgs.append(msg)
+ ibfmtmsg = pformat(ibppmsg.to_dict())
+ pikerfmtmsg = pformat(msg.to_dict())
+
if validate:
ibsize = ibppmsg.size
pikersize = msg.size
@@ -379,26 +395,24 @@ async def update_and_audit_msgs(
# raise ValueError(
log.error(
- f'POSITION MISMATCH ib <-> piker ledger:\n'
- f'ib: {ibppmsg}\n'
- f'piker: {msg}\n'
- f'reverse_split_ratio: {reverse_split_ratio}\n'
- f'split_ratio: {split_ratio}\n\n'
- 'FIGURE OUT WHY TF YOUR LEDGER IS OFF!?!?\n\n'
+ f'Pos mismatch in ib vs. the piker ledger!\n'
+ f'IB:\n{ibfmtmsg}\n\n'
+ f'PIKER:\n{pikerfmtmsg}\n\n'
'If you are expecting a (reverse) split in this '
- 'instrument you should probably put the following '
- f'in the `pps.toml` section:\n{entry}'
+ 'instrument you should probably put the following'
+ 'in the `pps.toml` section:\n'
+ f'{entry}\n'
+ # f'reverse_split_ratio: {reverse_split_ratio}\n'
+ # f'split_ratio: {split_ratio}\n\n'
)
msg.size = ibsize
if ibppmsg.avg_price != msg.avg_price:
-
- # TODO: make this a "propoganda" log level?
+ # TODO: make this a "propaganda" log level?
log.warning(
- 'The mega-cucks at IB want you to believe with their '
- f'"FIFO" positioning for {msg.symbol}:\n'
- f'"ib" mega-cucker avg price: {ibppmsg.avg_price}\n'
- f'piker, LIFO breakeven PnL price: {msg.avg_price}'
+ f'IB "FIFO" avg price for {msg.symbol} is DIFF:\n'
+ f'ib: {ibppmsg.avg_price}\n'
+ f'piker: {msg.avg_price}'
)
else:
@@ -414,7 +428,7 @@ async def update_and_audit_msgs(
# right since `.broker` is already included?
account=f'ib.{acctid}',
# XXX: the `.ib` is stripped..?
- symbol=p.symbol.front_fqsn(),
+ symbol=p.mkt.fqme,
# currency=ibppmsg.currency,
size=p.size,
avg_price=p.ppu,
@@ -422,16 +436,89 @@ async def update_and_audit_msgs(
if validate and p.size:
# raise ValueError(
log.error(
- f'UNEXPECTED POSITION says ib:\n'
- f'piker: {msg}\n'
- 'YOU SHOULD FIGURE OUT WHY TF YOUR LEDGER IS OFF!?\n'
- 'THEY LIQUIDATED YOU OR YOUR MISSING LEDGER RECORDS!?'
+ f'UNEXPECTED POSITION says IB => {msg.symbol}\n'
+ 'Maybe they LIQUIDATED YOU or are missing ledger entries?\n'
)
msgs.append(msg)
return msgs
+async def aggr_open_orders(
+ order_msgs: list[Status],
+ client: Client,
+ proxy: MethodProxy,
+ accounts_def: bidict[str, str],
+
+) -> None:
+ '''
+ Collect all open orders from client and fill in `order_msgs: list`.
+
+ '''
+ trades: list[Trade] = client.ib.openTrades()
+ for trade in trades:
+ order = trade.order
+ quant = trade.order.totalQuantity
+ action = order.action.lower()
+ size = {
+ 'sell': -1,
+ 'buy': 1,
+ }[action] * quant
+ con = trade.contract
+
+ # TODO: in the case of the SMART venue (aka ib's
+ # router-clearing sys) we probably should handle
+ # showing such orders overtop of the fqme for the
+ # primary exchange, how to map this easily is going
+ # to be a bit tricky though?
+ deats = await proxy.con_deats(contracts=[con])
+ fqme = list(deats)[0]
+
+ reqid = order.orderId
+
+ # TODO: maybe embed a ``BrokerdOrder`` instead
+ # since then we can directly load it on the client
+ # side in the order mode loop?
+ msg = Status(
+ time_ns=time.time_ns(),
+ resp='open',
+ oid=str(reqid),
+ reqid=reqid,
+
+ # embedded order info
+ req=Order(
+ action=action,
+ exec_mode='live',
+ oid=str(reqid),
+ symbol=fqme,
+ account=accounts_def.inverse[order.account],
+ price=order.lmtPrice,
+ size=size,
+ ),
+ src='ib',
+ )
+ order_msgs.append(msg)
+
+ return order_msgs
+
+
+# proxy wrapper for starting trade event stream
+async def open_trade_event_stream(
+ client: Client,
+ task_status: TaskStatus[
+ trio.abc.ReceiveChannel
+ ] = trio.TASK_STATUS_IGNORED,
+):
+ # each api client has a unique event stream
+ async with tractor.to_asyncio.open_channel_from(
+ recv_trade_updates,
+ client=client,
+ ) as (first, trade_event_stream):
+
+ task_status.started(trade_event_stream)
+ await trio.sleep_forever()
+
+
@tractor.context
async def trades_dialogue(
@@ -465,7 +552,10 @@ async def trades_dialogue(
# we might also want to delegate a specific actor for
# ledger writing / reading for speed?
async with (
- open_client_proxies() as (proxies, aioclients),
+ open_client_proxies() as (
+ proxies,
+ aioclients,
+ ),
):
# Open a trade ledgers stack for appending trade records over
# multiple accounts.
@@ -473,6 +563,9 @@ async def trades_dialogue(
ledgers: dict[str, dict] = {}
tables: dict[str, PpTable] = {}
order_msgs: list[Status] = []
+ conf = get_config()
+ accounts_def_inv: bidict[str, str] = bidict(conf['accounts']).inverse
+
with (
ExitStack() as lstack,
):
@@ -489,148 +582,15 @@ async def trades_dialogue(
open_trade_ledger(
'ib',
acctid,
- )
- )
- table = tables[acctid] = lstack.enter_context(
- open_pps(
- 'ib',
- acctid,
- write_on_exit=True,
- )
- )
-
- for account, proxy in proxies.items():
- client = aioclients[account]
- trades: list[Trade] = client.ib.openTrades()
- for trade in trades:
- order = trade.order
- quant = trade.order.totalQuantity
- action = order.action.lower()
- size = {
- 'sell': -1,
- 'buy': 1,
- }[action] * quant
- con = trade.contract
-
- # TODO: in the case of the SMART venue (aka ib's
- # router-clearing sys) we probably should handle
- # showing such orders overtop of the fqsn for the
- # primary exchange, how to map this easily is going
- # to be a bit tricky though?
- deats = await proxy.con_deats(contracts=[con])
- fqsn = list(deats)[0]
-
- reqid = order.orderId
-
- # TODO: maybe embed a ``BrokerdOrder`` instead
- # since then we can directly load it on the client
- # side in the order mode loop?
- msg = Status(
- time_ns=time.time_ns(),
- resp='open',
- oid=str(reqid),
- reqid=reqid,
-
- # embedded order info
- req=Order(
- action=action,
- exec_mode='live',
- oid=str(reqid),
- symbol=fqsn,
- account=accounts_def.inverse[order.account],
- price=order.lmtPrice,
- size=size,
+ tx_sort=partial(
+ iter_by_dt,
+ parsers={
+ 'dateTime': parse_flex_dt,
+ 'datetime': pendulum.parse,
+ },
),
- src='ib',
)
- order_msgs.append(msg)
-
- # process pp value reported from ib's system. we only use these
- # to cross-check sizing since average pricing on their end uses
- # the so called (bs) "FIFO" style which more or less results in
- # a price that's not useful for traders who want to not lose
- # money.. xb
- for pos in client.positions():
-
- # collect all ib-pp reported positions so that we can be
- # sure know which positions to update from the ledger if
- # any are missing from the ``pps.toml``
- bsuid, msg = pack_position(pos)
-
- acctid = msg.account = accounts_def.inverse[msg.account]
- acctid = acctid.strip('ib.')
- cids2pps[(acctid, bsuid)] = msg
- assert msg.account in accounts, (
- f'Position for unknown account: {msg.account}')
-
- ledger = ledgers[acctid]
- table = tables[acctid]
-
- pp = table.pps.get(bsuid)
- if (
- not pp
- or pp.size != msg.size
- ):
- trans = norm_trade_records(ledger)
- table.update_from_trans(trans)
-
- # update trades ledgers for all accounts from connected
- # api clients which report trades for **this session**.
- trades = await proxy.trades()
- (
- trans_by_acct,
- api_to_ledger_entries,
- ) = await update_ledger_from_api_trades(
- trades,
- proxy,
- )
-
- # if new trades are detected from the API, prepare
- # them for the ledger file and update the pptable.
- if api_to_ledger_entries:
- trade_entries = api_to_ledger_entries.get(acctid)
-
- if trade_entries:
- # write ledger with all new trades **AFTER**
- # we've updated the `pps.toml` from the
- # original ledger state! (i.e. this is
- # currently done on exit)
- ledger.update(trade_entries)
-
- trans = trans_by_acct.get(acctid)
- if trans:
- table.update_from_trans(trans)
-
- # XXX: not sure exactly why it wouldn't be in
- # the updated output (maybe this is a bug?) but
- # if you create a pos from TWS and then load it
- # from the api trades it seems we get a key
- # error from ``update[bsuid]`` ?
- pp = table.pps.get(bsuid)
- if not pp:
- log.error(
- f'The contract id for {msg} may have '
- f'changed to {bsuid}\nYou may need to '
- 'adjust your ledger for this, skipping '
- 'for now.'
- )
- continue
-
- # XXX: not sure exactly why it wouldn't be in
- # the updated output (maybe this is a bug?) but
- # if you create a pos from TWS and then load it
- # from the api trades it seems we get a key
- # error from ``update[bsuid]`` ?
- pp = table.pps[bsuid]
- pairinfo = pp.symbol
- if msg.size != pp.size:
- log.error(
- f'Pos size mismatch {pairinfo.front_fqsn()}:\n'
- f'ib: {msg.size}\n'
- f'piker: {pp.size}\n'
- )
-
- active_pps, closed_pps = table.dump_active()
+ )
# load all positions from `pps.toml`, cross check with
# ib's positions data, and relay re-formatted pps as
@@ -641,6 +601,105 @@ async def trades_dialogue(
# - no new trades yet but we want to reload and audit any
# positions reported by ib's sys that may not yet be in
# piker's ``pps.toml`` state-file.
+ tables[acctid] = lstack.enter_context(
+ open_pps(
+ 'ib',
+ acctid,
+ write_on_exit=True,
+ )
+ )
+
+ for account, proxy in proxies.items():
+ client = aioclients[account]
+
+ # order_msgs is filled in by this helper
+ await aggr_open_orders(
+ order_msgs,
+ client,
+ proxy,
+ accounts_def,
+ )
+ acctid: str = account.strip('ib.')
+ ledger: dict = ledgers[acctid]
+ table: PpTable = tables[acctid]
+
+ # update trades ledgers for all accounts from connected
+ # api clients which report trades for **this session**.
+ api_trades = await proxy.trades()
+ if api_trades:
+
+ trans_by_acct: dict[str, Transaction]
+ api_to_ledger_entries: dict[str, dict]
+ (
+ trans_by_acct,
+ api_to_ledger_entries,
+ ) = await update_ledger_from_api_trades(
+ api_trades,
+ proxy,
+ accounts_def_inv,
+ )
+
+ # if new api_trades are detected from the API, prepare
+ # them for the ledger file and update the pptable.
+ if api_to_ledger_entries:
+ trade_entries = api_to_ledger_entries.get(acctid)
+
+ # TODO: fix this `tractor` BUG!
+ # https://github.com/goodboy/tractor/issues/354
+ # await tractor.breakpoint()
+
+ if trade_entries:
+ # write ledger with all new api_trades
+ # **AFTER** we've updated the `pps.toml`
+ # from the original ledger state! (i.e. this
+ # is currently done on exit)
+ for tid, entry in trade_entries.items():
+ ledger.setdefault(tid, {}).update(entry)
+
+ trans = trans_by_acct.get(acctid)
+ if trans:
+ table.update_from_trans(trans)
+
+ # update position table with latest ledger from all
+ # gathered transactions: ledger file + api records.
+ trans: dict[str, Transaction] = norm_trade_records(ledger)
+ table.update_from_trans(trans)
+
+ # process pp value reported from ib's system. we only
+ # use these to cross-check sizing since average pricing
+ # on their end uses the so called (bs) "FIFO" style
+ # which more or less results in a price that's not
+ # useful for traders who want to not lose money.. xb
+ # -> collect all ib-pp reported positions so that we can be
+ # sure know which positions to update from the ledger if
+ # any are missing from the ``pps.toml``
+
+ pos: IbPosition # named tuple subtype
+ for pos in client.positions():
+
+ # NOTE XXX: we skip options for now since we don't
+ # yet support the symbology nor the live feeds.
+ if isinstance(pos.contract, Option):
+ log.warning(
+ f'Option contracts not supported for now:\n'
+ f'{pos._asdict()}'
+ )
+ continue
+
+ bs_mktid, msg = pack_position(pos)
+ acctid = msg.account = accounts_def.inverse[msg.account]
+ acctid = acctid.strip('ib.')
+ cids2pps[(acctid, bs_mktid)] = msg
+
+ assert msg.account in accounts, (
+ f'Position for unknown account: {msg.account}')
+
+ # iterate all (newly) updated pps tables for every
+ # client-account and build out position msgs to deliver to
+ # EMS.
+ for acctid, table in tables.items():
+ active_pps, closed_pps = table.dump_active()
+
for pps in [active_pps, closed_pps]:
msgs = await update_and_audit_msgs(
acctid,
@@ -661,22 +720,6 @@ async def trades_dialogue(
tuple(name for name in accounts_def if name in accounts),
))
- # proxy wrapper for starting trade event stream
- async def open_trade_event_stream(
- client: Client,
- task_status: TaskStatus[
- trio.abc.ReceiveChannel
- ] = trio.TASK_STATUS_IGNORED,
- ):
- # each api client has a unique event stream
- async with tractor.to_asyncio.open_channel_from(
- recv_trade_updates,
- client=client,
- ) as (first, trade_event_stream):
-
- task_status.started(trade_event_stream)
- await trio.sleep_forever()
-
async with (
ctx.open_stream() as ems_stream,
trio.open_nursery() as n,
@@ -723,44 +766,50 @@ async def trades_dialogue(
async def emit_pp_update(
ems_stream: tractor.MsgStream,
trade_entry: dict,
- accounts_def: bidict,
+ accounts_def: bidict[str, str],
proxies: dict,
cids2pps: dict,
- ledgers,
- tables,
+ ledgers: dict[str, dict[str, Any]],
+ tables: dict[str, PpTable],
) -> None:
# compute and relay incrementally updated piker pp
- acctid = accounts_def.inverse[trade_entry['execution']['acctNumber']]
- proxy = proxies[acctid]
-
- acctid = acctid.strip('ib.')
+ accounts_def_inv: bidict[str, str] = accounts_def.inverse
+ fq_acctid = accounts_def_inv[trade_entry['execution']['acctNumber']]
+ proxy = proxies[fq_acctid]
(
records_by_acct,
api_to_ledger_entries,
) = await update_ledger_from_api_trades(
[trade_entry],
proxy,
+ accounts_def_inv,
)
- trans = records_by_acct[acctid]
+ trans = records_by_acct[fq_acctid]
r = list(trans.values())[0]
+ acctid = fq_acctid.strip('ib.')
table = tables[acctid]
table.update_from_trans(trans)
active, closed = table.dump_active()
# NOTE: update ledger with all new trades
- for acctid, trades_by_id in api_to_ledger_entries.items():
+ for fq_acctid, trades_by_id in api_to_ledger_entries.items():
+ acctid = fq_acctid.strip('ib.')
ledger = ledgers[acctid]
- ledger.update(trades_by_id)
+
+ for tid, tdict in trades_by_id.items():
+ # NOTE: don't override flex/previous entries with new API
+ # ones, just update with new fields!
+ ledger.setdefault(tid, {}).update(tdict)
# generate pp msgs and cross check with ib's positions data, relay
# re-formatted pps as msgs to the ems.
for pos in filter(
bool,
- [active.get(r.bsuid), closed.get(r.bsuid)]
+ [active.get(r.bs_mktid), closed.get(r.bs_mktid)]
):
msgs = await update_and_audit_msgs(
acctid,
@@ -859,8 +908,8 @@ async def deliver_trade_events(
# https://github.com/erdewit/ib_insync/issues/363
# acctid = accounts_def.inverse[trade.order.account]
- # # double check there is no error when
- # # cancelling.. gawwwd
+ # double check there is no error when
+ # cancelling.. gawwwd
# if ib_status_key == 'cancelled':
# last_log = trade.log[-1]
# if (
@@ -1000,6 +1049,7 @@ async def deliver_trade_events(
accounts_def,
proxies,
cids2pps,
+
ledgers,
tables,
)
@@ -1034,6 +1084,7 @@ async def deliver_trade_events(
accounts_def,
proxies,
cids2pps,
+
ledgers,
tables,
)
@@ -1095,7 +1146,7 @@ async def deliver_trade_events(
def norm_trade_records(
ledger: dict[str, Any],
-) -> list[Transaction]:
+) -> dict[str, Transaction]:
'''
Normalize a flex report or API retrieved executions
ledger into our standard record format.
@@ -1110,7 +1161,6 @@ def norm_trade_records(
comms = -1*record['ibCommission']
price = record.get('price') or record['tradePrice']
- price_tick_digits = float_digits(price)
# the api doesn't do the -/+ on the quantity for you but flex
# records do.. are you fucking serious ib...!?
@@ -1122,6 +1172,12 @@ def norm_trade_records(
exch = record['exchange']
lexch = record.get('listingExchange')
+ # NOTE: remove null values since `tomlkit` can't serialize
+ # them to file.
+ dnc = record.pop('deltaNeutralContract', False)
+ if dnc is not None:
+ record['deltaNeutralContract'] = dnc
+
suffix = lexch or exch
symbol = record['symbol']
@@ -1153,7 +1209,9 @@ def norm_trade_records(
# special handling of symbol extraction from
# flex records using some ad-hoc schema parsing.
- asset_type: str = record.get('assetCategory') or record['secType']
+ asset_type: str = record.get(
+ 'assetCategory'
+ ) or record.get('secType', 'STK')
# TODO: XXX: WOA this is kinda hacky.. probably
# should figure out the correct future pair key more
@@ -1161,58 +1219,54 @@ def norm_trade_records(
if asset_type == 'FUT':
# (flex) ledger entries don't have any simple 3-char key?
symbol = record['symbol'][:3]
+ asset_type: str = 'future'
+
+ elif asset_type == 'STK':
+ asset_type: str = 'stock'
+
+ # try to build out piker fqme from record.
+ expiry = (
+ record.get('lastTradeDateOrContractMonth')
+ or record.get('expiry')
+ )
- # try to build out piker fqsn from record.
- expiry = record.get(
- 'lastTradeDateOrContractMonth') or record.get('expiry')
if expiry:
expiry = str(expiry).strip(' ')
suffix = f'{exch}.{expiry}'
expiry = pendulum.parse(expiry)
- src: str = record['currency']
+ # src: str = record['currency']
+ price_tick: Decimal = digits_to_dec(dec_digits(price))
- pair = Symbol.from_fqsn(
- fqsn=f'{symbol}.{suffix}.ib',
- info={
- 'tick_size_digits': price_tick_digits,
+ pair = MktPair.from_fqme(
+ fqme=f'{symbol}.{suffix}.ib',
+ bs_mktid=str(conid),
+ _atype=str(asset_type), # XXX: can't serlialize `tomlkit.String`
- # NOTE: for "legacy" assets, volume is normally discreet, not
- # a float, but we keep a digit in case the suitz decide
- # to get crazy and change it; we'll be kinda ready
- # schema-wise..
- 'lot_size_digits': 1,
-
- # TODO: remove when we switching from
- # ``Symbol`` -> ``MktPair``
- 'asset_type': asset_type,
-
- # TODO: figure out a target fin-type name
- # set and normalize to that here!
- 'dst_type': asset_type.lower(),
-
- # starting to use new key naming as in ``MktPair``
- # type have drafted...
- 'src': src,
- 'src_type': 'fiat',
- },
+ price_tick=price_tick,
+ # NOTE: for "legacy" assets, volume is normally discreet, not
+ # a float, but we keep a digit in case the suitz decide
+ # to get crazy and change it; we'll be kinda ready
+ # schema-wise..
+ size_tick='1',
)
- fqsn = pair.front_fqsn().rstrip('.ib')
- # NOTE: for flex records the normal fields for defining an fqsn
+ fqme = pair.fqme
+
+ # NOTE: for flex records the normal fields for defining an fqme
# sometimes won't be available so we rely on two approaches for
- # the "reverse lookup" of piker style fqsn keys:
+ # the "reverse lookup" of piker style fqme keys:
# - when dealing with API trade records received from
# `IB.trades()` we do a contract lookup at he time of processing
# - when dealing with flex records, it is assumed the record
# is at least a day old and thus the TWS position reporting system
# should already have entries if the pps are still open, in
- # which case, we can pull the fqsn from that table (see
+ # which case, we can pull the fqme from that table (see
# `trades_dialogue()` above).
insort(
records,
Transaction(
- fqsn=fqsn,
+ fqme=fqme,
sym=pair,
tid=tid,
size=size,
@@ -1220,7 +1274,7 @@ def norm_trade_records(
cost=comms,
dt=dt,
expiry=expiry,
- bsuid=conid,
+ bs_mktid=str(conid),
),
key=lambda t: t.dt
)
@@ -1228,18 +1282,8 @@ def norm_trade_records(
return {r.tid: r for r in records}
-def parse_flex_dt(
- record: str,
-) -> pendulum.datetime:
- date, ts = record.split(';')
- dt = pendulum.parse(date)
- ts = f'{ts[:2]}:{ts[2:4]}:{ts[4:]}'
- tsdt = pendulum.parse(ts)
- return dt.set(hour=tsdt.hour, minute=tsdt.minute, second=tsdt.second)
-
-
def api_trades_to_ledger_entries(
- accounts: bidict,
+ accounts: bidict[str, str],
# TODO: maybe we should just be passing through the
# ``ib_insync.order.Trade`` instance directly here
@@ -1309,148 +1353,3 @@ def api_trades_to_ledger_entries(
))
return trades_by_account
-
-
-def flex_records_to_ledger_entries(
- accounts: bidict,
- trade_entries: list[object],
-
-) -> dict:
- '''
- Convert flex report entry objects into ``dict`` form, pretty much
- straight up without modification except add a `pydatetime` field
- from the parsed timestamp.
-
- '''
- trades_by_account = {}
- for t in trade_entries:
- entry = t.__dict__
-
- # XXX: LOL apparently ``toml`` has a bug
- # where a section key error will show up in the write
- # if you leave a table key as an `int`? So i guess
- # cast to strs for all keys..
-
- # oddly for some so-called "BookTrade" entries
- # this field seems to be blank, no cuckin clue.
- # trade['ibExecID']
- tid = str(entry.get('ibExecID') or entry['tradeID'])
- # date = str(entry['tradeDate'])
-
- # XXX: is it going to cause problems if a account name
- # get's lost? The user should be able to find it based
- # on the actual exec history right?
- acctid = accounts[str(entry['accountId'])]
-
- # probably a flex record with a wonky non-std timestamp..
- dt = entry['pydatetime'] = parse_flex_dt(entry['dateTime'])
- entry['datetime'] = str(dt)
-
- if not tid:
- # this is likely some kind of internal adjustment
- # transaction, likely one of the following:
- # - an expiry event that will show a "book trade" indicating
- # some adjustment to cash balances: zeroing or itm settle.
- # - a manual cash balance position adjustment likely done by
- # the user from the accounts window in TWS where they can
- # manually set the avg price and size:
- # https://api.ibkr.com/lib/cstools/faq/web1/index.html#/tag/DTWS_ADJ_AVG_COST
- log.warning(f'Skipping ID-less ledger entry:\n{pformat(entry)}')
- continue
-
- trades_by_account.setdefault(
- acctid, {}
- )[tid] = entry
-
- for acctid in trades_by_account:
- trades_by_account[acctid] = dict(sorted(
- trades_by_account[acctid].items(),
- key=lambda entry: entry[1]['pydatetime'],
- ))
-
- return trades_by_account
-
-
-def load_flex_trades(
- path: Optional[str] = None,
-
-) -> dict[str, Any]:
-
- from ib_insync import flexreport, util
-
- conf = get_config()
-
- if not path:
- # load ``brokers.toml`` and try to get the flex
- # token and query id that must be previously defined
- # by the user.
- token = conf.get('flex_token')
- if not token:
- raise ValueError(
- 'You must specify a ``flex_token`` field in your'
- '`brokers.toml` in order load your trade log, see our'
- 'intructions for how to set this up here:\n'
- 'PUT LINK HERE!'
- )
-
- qid = conf['flex_trades_query_id']
-
- # TODO: hack this into our logging
- # system like we do with the API client..
- util.logToConsole()
-
- # TODO: rewrite the query part of this with async..httpx?
- report = flexreport.FlexReport(
- token=token,
- queryId=qid,
- )
-
- else:
- # XXX: another project we could potentially look at,
- # https://pypi.org/project/ibflex/
- report = flexreport.FlexReport(path=path)
-
- trade_entries = report.extract('Trade')
- ln = len(trade_entries)
- log.info(f'Loaded {ln} trades from flex query')
-
- trades_by_account = flex_records_to_ledger_entries(
- conf['accounts'].inverse, # reverse map to user account names
- trade_entries,
- )
-
- ledger_dict: Optional[dict] = None
-
- for acctid in trades_by_account:
- trades_by_id = trades_by_account[acctid]
-
- with open_trade_ledger('ib', acctid) as ledger_dict:
- tid_delta = set(trades_by_id) - set(ledger_dict)
- log.info(
- 'New trades detected\n'
- f'{pformat(tid_delta)}'
- )
- if tid_delta:
- sorted_delta = dict(sorted(
- {tid: trades_by_id[tid] for tid in tid_delta}.items(),
- key=lambda entry: entry[1].pop('pydatetime'),
- ))
- ledger_dict.update(sorted_delta)
-
- return ledger_dict
-
-
-if __name__ == '__main__':
- import sys
- import os
-
- args = sys.argv
- if len(args) > 1:
- args = args[1:]
- for arg in args:
- path = os.path.abspath(arg)
- load_flex_trades(path=path)
- else:
- # expect brokers.toml to have an entry and
- # pull from the web service.
- load_flex_trades()
diff --git a/piker/brokers/ib/feed.py b/piker/brokers/ib/feed.py
index 358bb066..b4edae17 100644
--- a/piker/brokers/ib/feed.py
+++ b/piker/brokers/ib/feed.py
@@ -19,7 +19,11 @@ Data feed endpoints pre-wrapped and ready for use with ``tractor``/``trio``.
"""
from __future__ import annotations
import asyncio
-from contextlib import asynccontextmanager as acm
+from contextlib import (
+ asynccontextmanager as acm,
+ nullcontext,
+)
+from decimal import Decimal
from dataclasses import asdict
from datetime import datetime
from functools import partial
@@ -46,7 +50,7 @@ from .._util import (
)
from .api import (
# _adhoc_futes_set,
- con2fqsn,
+ con2fqme,
log,
load_aio_clients,
ibis,
@@ -54,10 +58,18 @@ from .api import (
open_client_proxies,
get_preferred_data_client,
Ticker,
- RequestError,
Contract,
+ RequestError,
)
from ._util import data_reset_hack
+from piker._cacheables import (
+ async_lifo_cache,
+)
+from piker.accounting import (
+ Asset,
+ MktPair,
+)
+from piker.data.validate import FeedInit
# https://interactivebrokers.github.io/tws-api/tick_types.html
@@ -108,7 +120,7 @@ async def open_data_client() -> MethodProxy:
@acm
async def open_history_client(
- fqsn: str,
+ mkt: MktPair,
) -> tuple[Callable, int]:
'''
@@ -116,7 +128,7 @@ async def open_history_client(
that takes in ``pendulum.datetime`` and returns ``numpy`` arrays.
'''
- # TODO:
+ # TODO: mostly meta-data processing to drive shm and tsdb storage..
# - add logic to handle tradable hours and only grab
# valid bars in the range?
# - we want to avoid overrunning the underlying shm array buffer and
@@ -125,8 +137,33 @@ async def open_history_client(
# the shm size will be driven by user config and available sys
# memory.
+ # IB's internal symbology does not expect the "source asset" in
+ # the "symbol name", what we call the "market name". This is
+ # common in most legacy market brokers since it's presumed that
+ # given a certain stock exchange, listed assets are traded
+ # "from" a particular source fiat, normally something like USD.
+ if (
+ mkt.src
+ and mkt.src.atype == 'fiat'
+ ):
+ fqme_kwargs: dict[str, Any] = {}
+
+ if mkt.dst.atype == 'forex':
+
+ # XXX: for now we do need the src token kept in since
+ fqme_kwargs = {
+ 'without_src': False, # default is True
+ 'delim_char': '', # bc they would normally use a frickin `.` smh
+ }
+
+ fqme: str = mkt.get_bs_fqme(**(fqme_kwargs))
+
+ else:
+ fqme = mkt.bs_fqme
+
async with open_data_client() as proxy:
+
max_timeout: float = 2.
mean: float = 0
count: int = 0
@@ -134,10 +171,10 @@ async def open_history_client(
head_dt: None | datetime = None
if (
# fx cons seem to not provide this endpoint?
- 'idealpro' not in fqsn
+ 'idealpro' not in fqme
):
try:
- head_dt = await proxy.get_head_time(fqsn=fqsn)
+ head_dt = await proxy.get_head_time(fqme=fqme)
except RequestError:
head_dt = None
@@ -152,7 +189,7 @@ async def open_history_client(
query_start = time.time()
out, timedout = await get_bars(
proxy,
- fqsn,
+ fqme,
timeframe,
end_dt=end_dt,
)
@@ -211,7 +248,7 @@ _pacing: str = (
async def wait_on_data_reset(
proxy: MethodProxy,
reset_type: str = 'data',
- timeout: float = 16,
+ timeout: float = 16, # float('inf'),
task_status: TaskStatus[
tuple[
@@ -227,7 +264,7 @@ async def wait_on_data_reset(
'HMDS data farm connection is OK:ushmds'
)
- # XXX: other event messages we might want to try and
+ # TODO: other event messages we might want to try and
# wait for but i wasn't able to get any of this
# reliable..
# reconnect_start = proxy.status_event(
@@ -238,14 +275,21 @@ async def wait_on_data_reset(
# )
# try to wait on the reset event(s) to arrive, a timeout
# will trigger a retry up to 6 times (for now).
+ client = proxy._aio_ns.ib.client
done = trio.Event()
with trio.move_on_after(timeout) as cs:
task_status.started((cs, done))
- log.warning('Sending DATA RESET request')
- res = await data_reset_hack(reset_type=reset_type)
+ log.warning(
+ 'Sending DATA RESET request:\n'
+ f'{client}'
+ )
+ res = await data_reset_hack(
+ vnc_host=client.host,
+ reset_type=reset_type,
+ )
if not res:
log.warning(
@@ -279,12 +323,12 @@ async def wait_on_data_reset(
_data_resetter_task: trio.Task | None = None
-
+_failed_resets: int = 0
async def get_bars(
proxy: MethodProxy,
- fqsn: str,
+ fqme: str,
timeframe: int,
# blank to start which tells ib to look up the latest datum
@@ -298,6 +342,7 @@ async def get_bars(
# history queries for instrument, presuming that most don't
# not trade for a week XD
max_nodatas: int = 6,
+ max_failed_resets: int = 6,
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
@@ -307,7 +352,7 @@ async def get_bars(
a ``MethoProxy``.
'''
- global _data_resetter_task
+ global _data_resetter_task, _failed_resets
nodatas_count: int = 0
data_cs: trio.CancelScope | None = None
@@ -320,11 +365,14 @@ async def get_bars(
result_ready = trio.Event()
async def query():
+
+ global _failed_resets
nonlocal result, data_cs, end_dt, nodatas_count
- while True:
+
+ while _failed_resets < max_failed_resets:
try:
out = await proxy.bars(
- fqsn=fqsn,
+ fqme=fqme,
end_dt=end_dt,
sample_period_s=timeframe,
@@ -339,7 +387,10 @@ async def get_bars(
bars, bars_array, dt_duration = out
- if not bars:
+ if (
+ not bars
+ and end_dt
+ ):
log.warning(
f'History is blank for {dt_duration} from {end_dt}'
)
@@ -347,7 +398,7 @@ async def get_bars(
continue
if bars_array is None:
- raise SymbolNotFound(fqsn)
+ raise SymbolNotFound(fqme)
first_dt = pendulum.from_timestamp(
bars[0].date.timestamp())
@@ -378,52 +429,51 @@ async def get_bars(
if 'No market data permissions for' in msg:
# TODO: signalling for no permissions searches
raise NoData(
- f'Symbol: {fqsn}',
+ f'Symbol: {fqme}',
)
- elif err.code == 162:
- if (
- 'HMDS query returned no data' in msg
- ):
- # XXX: this is now done in the storage mgmt
- # layer and we shouldn't implicitly decrement
- # the frame dt index since the upper layer may
- # be doing so concurrently and we don't want to
- # be delivering frames that weren't asked for.
- # try to decrement start point and look further back
- # end_dt = end_dt.subtract(seconds=2000)
- logmsg = "SUBTRACTING DAY from DT index"
- if end_dt is not None:
- end_dt = end_dt.subtract(days=1)
- elif end_dt is None:
- end_dt = pendulum.now().subtract(days=1)
+ elif (
+ 'HMDS query returned no data' in msg
+ ):
+ # XXX: this is now done in the storage mgmt
+ # layer and we shouldn't implicitly decrement
+ # the frame dt index since the upper layer may
+ # be doing so concurrently and we don't want to
+ # be delivering frames that weren't asked for.
+ # try to decrement start point and look further back
+ # end_dt = end_dt.subtract(seconds=2000)
+ logmsg = "SUBTRACTING DAY from DT index"
+ if end_dt is not None:
+ end_dt = end_dt.subtract(days=1)
+ elif end_dt is None:
+ end_dt = pendulum.now().subtract(days=1)
- log.warning(
- f'NO DATA found ending @ {end_dt}\n'
- + logmsg
+ log.warning(
+ f'NO DATA found ending @ {end_dt}\n'
+ + logmsg
+ )
+
+ if nodatas_count >= max_nodatas:
+ raise DataUnavailable(
+ f'Presuming {fqme} has no further history '
+ f'after {max_nodatas} tries..'
)
- if nodatas_count >= max_nodatas:
- raise DataUnavailable(
- f'Presuming {fqsn} has no further history '
- f'after {max_nodatas} tries..'
- )
+ nodatas_count += 1
+ continue
- nodatas_count += 1
- continue
-
- elif 'API historical data query cancelled' in err.message:
- log.warning(
- 'Query cancelled by IB (:eyeroll:):\n'
- f'{err.message}'
- )
- continue
- elif (
- 'Trading TWS session is connected from a different IP'
- in err.message
- ):
- log.warning("ignoring ip address warning")
- continue
+ elif 'API historical data query cancelled' in err.message:
+ log.warning(
+ 'Query cancelled by IB (:eyeroll:):\n'
+ f'{err.message}'
+ )
+ continue
+ elif (
+ 'Trading TWS session is connected from a different IP'
+ in err.message
+ ):
+ log.warning("ignoring ip address warning")
+ continue
# XXX: more or less same as above timeout case
elif _pacing in msg:
@@ -432,8 +482,11 @@ async def get_bars(
'Resetting farms with `ctrl-alt-f` hack\n'
)
+ client = proxy._aio_ns.ib.client
+
# cancel any existing reset task
if data_cs:
+ log.cancel(f'Cancelling existing reset for {client}')
data_cs.cancel()
# spawn new data reset task
@@ -441,10 +494,13 @@ async def get_bars(
partial(
wait_on_data_reset,
proxy,
- timeout=float('inf'),
reset_type='connection'
)
)
+ if reset_done:
+ _failed_resets = 0
+ else:
+ _failed_resets += 1
continue
else:
@@ -481,7 +537,7 @@ async def get_bars(
partial(
wait_on_data_reset,
proxy,
- timeout=float('inf'),
+ reset_type='data',
)
)
# sync wait on reset to complete
@@ -491,7 +547,9 @@ async def get_bars(
return result, data_cs is not None
-asset_type_map = {
+# re-mapping to piker asset type names
+# https://github.com/erdewit/ib_insync/blob/master/ib_insync/contract.py#L113
+_asset_type_map = {
'STK': 'stock',
'OPT': 'option',
'FUT': 'future',
@@ -532,7 +590,7 @@ async def _setup_quote_stream(
'294', # Trade rate / minute
'295', # Vlm rate / minute
),
- contract: Optional[Contract] = None,
+ contract: Contract | None = None,
) -> trio.abc.ReceiveChannel:
'''
@@ -618,7 +676,7 @@ async def _setup_quote_stream(
async def open_aio_quote_stream(
symbol: str,
- contract: Optional[Contract] = None,
+ contract: Contract | None = None,
) -> trio.abc.ReceiveStream:
@@ -661,7 +719,7 @@ def normalize(
# check for special contract types
con = ticker.contract
- fqsn, calc_price = con2fqsn(con)
+ fqme, calc_price = con2fqme(con)
# convert named tuples to dicts so we send usable keys
new_ticks = []
@@ -691,9 +749,9 @@ def normalize(
# serialize for transport
data = asdict(ticker)
- # generate fqsn with possible specialized suffix
+ # generate fqme with possible specialized suffix
# for derivatives, note the lowercase.
- data['symbol'] = data['fqsn'] = fqsn
+ data['symbol'] = data['fqme'] = fqme
# convert named tuples to dicts for transport
tbts = data.get('tickByTicks')
@@ -713,6 +771,98 @@ def normalize(
return data
+@async_lifo_cache()
+async def get_mkt_info(
+ fqme: str,
+
+ proxy: MethodProxy | None = None,
+
+) -> tuple[MktPair, ibis.ContractDetails]:
+
+ # XXX: we don't need to split off any fqme broker part?
+ # bs_fqme, _, broker = fqme.partition('.')
+
+ proxy: MethodProxy
+ get_details: bool = False
+ if proxy is not None:
+ client_ctx = nullcontext(proxy)
+ else:
+ client_ctx = open_data_client
+
+ async with client_ctx as proxy:
+ try:
+ (
+ con, # Contract
+ details, # ContractDetails
+ ) = await proxy.get_sym_details(symbol=fqme)
+ except ConnectionError:
+ log.exception(f'Proxy is ded {proxy._aio_ns}')
+ raise
+
+ # TODO: more consistent field translation
+ init_info: dict = {}
+ atype = _asset_type_map[con.secType]
+
+ if atype == 'commodity':
+ venue: str = 'cmdty'
+ else:
+ venue = con.primaryExchange or con.exchange
+
+ price_tick: Decimal = Decimal(str(details.minTick))
+
+ if atype == 'stock':
+ # XXX: GRRRR they don't support fractional share sizes for
+ # stocks from the API?!
+ # if con.secType == 'STK':
+ size_tick = Decimal('1')
+ else:
+ size_tick: Decimal = Decimal(str(details.minSize).rstrip('0'))
+ # |-> TODO: there is also the Contract.sizeIncrement, bt wtf is it?
+
+ # NOTE: this is duplicate from the .broker.norm_trade_records()
+ # routine, we should factor all this parsing somewhere..
+ expiry_str = str(con.lastTradeDateOrContractMonth)
+ # if expiry:
+ # expiry_str: str = str(pendulum.parse(
+ # str(expiry).strip(' ')
+ # ))
+
+ # TODO: currently we can't pass the fiat src asset because
+ # then we'll get a `MNQUSD` request for history data..
+ # we need to figure out how we're going to handle this (later?)
+ # but likely we want all backends to eventually handle
+ # ``dst/src.venue.`` style !?
+ src: str | Asset = ''
+ if atype == 'forex':
+ src = Asset(
+ name=str(con.currency),
+ atype='fiat',
+ tx_tick=Decimal('0.01'), # right?
+ )
+
+ mkt = MktPair(
+ dst=Asset(
+ name=con.symbol.lower(),
+ atype=atype,
+ tx_tick=size_tick,
+ ),
+ src=src,
+
+ price_tick=price_tick,
+ size_tick=size_tick,
+
+ bs_mktid=str(con.conId),
+ venue=str(venue),
+ expiry=expiry_str,
+ broker='ib',
+
+ # TODO: options contract info as str?
+ # contract_info=
+ )
+
+ return mkt, details
+
+
async def stream_quotes(
send_chan: trio.abc.SendChannel,
@@ -735,80 +885,49 @@ async def stream_quotes(
sym = symbols[0]
log.info(f'request for real-time quotes: {sym}')
+ init_msgs: list[FeedInit] = []
+
+ proxy: MethodProxy
+ mkt: MktPair
+ details: ibis.ContractDetails
async with open_data_client() as proxy:
+ mkt, details = await get_mkt_info(
+ sym,
+ proxy=proxy, # passed to avoid implicit client load
+ )
- con, first_ticker, details = await proxy.get_sym_details(symbol=sym)
- first_quote = normalize(first_ticker)
- # print(f'first quote: {first_quote}')
+ init_msg = FeedInit(mkt_info=mkt)
- def mk_init_msgs() -> dict[str, dict]:
- '''
- Collect a bunch of meta-data useful for feed startup and
- pack in a `dict`-msg.
+ if mkt.dst.atype in {
+ 'forex',
+ 'index',
+ 'commodity',
+ }:
+ # tell sampler config that it shouldn't do vlm summing.
+ init_msg.shm_write_opts['sum_tick_vlm'] = False
+ init_msg.shm_write_opts['has_vlm'] = False
- '''
- # pass back some symbol info like min_tick, trading_hours, etc.
- syminfo = asdict(details)
- syminfo.update(syminfo['contract'])
+ init_msgs.append(init_msg)
- # nested dataclass we probably don't need and that won't IPC
- # serialize
- syminfo.pop('secIdList')
-
- # TODO: more consistent field translation
- atype = syminfo['asset_type'] = asset_type_map[syminfo['secType']]
-
- if atype in {
- 'forex',
- 'index',
- 'commodity',
- }:
- syminfo['no_vlm'] = True
-
- # for stocks it seems TWS reports too small a tick size
- # such that you can't submit orders with that granularity?
- min_tick = 0.01 if atype == 'stock' else 0
-
- syminfo['price_tick_size'] = max(syminfo['minTick'], min_tick)
-
- # for "legacy" assets, volume is normally discreet, not
- # a float
- syminfo['lot_tick_size'] = 0.0
-
- ibclient = proxy._aio_ns.ib.client
- host, port = ibclient.host, ibclient.port
-
- # TODO: for loop through all symbols passed in
- init_msgs = {
- # pass back token, and bool, signalling if we're the writer
- # and that history has been written
- sym: {
- 'symbol_info': syminfo,
- 'fqsn': first_quote['fqsn'],
- },
- 'status': {
- 'data_ep': f'{host}:{port}',
- },
-
- }
- return init_msgs, syminfo
-
- init_msgs, syminfo = mk_init_msgs()
+ con: Contract = details.contract
+ first_ticker: Ticker = await proxy.get_quote(contract=con)
+ first_quote: dict = normalize(first_ticker)
+ log.runtime(f'FIRST QUOTE: {first_quote}')
# TODO: we should instead spawn a task that waits on a feed to start
# and let it wait indefinitely..instead of this hard coded stuff.
with trio.move_on_after(1):
- contract, first_ticker, details = await proxy.get_quote(symbol=sym)
+ first_ticker = await proxy.get_quote(contract=con)
# it might be outside regular trading hours so see if we can at
# least grab history.
if (
- isnan(first_ticker.last)
- and type(first_ticker.contract) not in (
- ibis.Commodity,
- ibis.Forex,
- ibis.Crypto,
- )
+ isnan(first_ticker.last) # last quote price value is nan
+ and mkt.dst.atype not in {
+ 'commodity',
+ 'forex',
+ 'crypto',
+ }
):
task_status.started((init_msgs, first_quote))
@@ -820,7 +939,7 @@ async def stream_quotes(
await trio.sleep_forever()
return # we never expect feed to come up?
- cs: Optional[trio.CancelScope] = None
+ cs: trio.CancelScope | None = None
startup: bool = True
while (
startup
@@ -860,13 +979,14 @@ async def stream_quotes(
nurse.start_soon(reset_on_feed)
async with aclosing(stream):
- if syminfo.get('no_vlm', False):
+ # if syminfo.get('no_vlm', False):
+ if not init_msg.shm_write_opts['has_vlm']:
# generally speaking these feeds don't
# include vlm data.
- atype = syminfo['asset_type']
+ atype = mkt.dst.atype
log.info(
- f'No-vlm {sym}@{atype}, skipping quote poll'
+ f'No-vlm {mkt.fqme}@{atype}, skipping quote poll'
)
else:
@@ -906,9 +1026,9 @@ async def stream_quotes(
# last = time.time()
async for ticker in stream:
quote = normalize(ticker)
- fqsn = quote['fqsn']
- # print(f'sending {fqsn}:\n{quote}')
- await send_chan.send({fqsn: quote})
+ fqme = quote['fqme']
+ # print(f'sending {fqme}:\n{quote}')
+ await send_chan.send({fqme: quote})
# ugh, clear ticks since we've consumed them
ticker.ticks = []
diff --git a/piker/brokers/kraken/README.rst b/piker/brokers/kraken/README.rst
index 80e56913..b85c0c3e 100644
--- a/piker/brokers/kraken/README.rst
+++ b/piker/brokers/kraken/README.rst
@@ -58,7 +58,7 @@ your ``pps.toml`` file will have position entries like,
[kraken.spot."xmreur.kraken"]
size = 4.80907954
ppu = 103.97000000
- bsuid = "XXMRZEUR"
+ bs_mktid = "XXMRZEUR"
clears = [
{ tid = "TFJBKK-SMBZS-VJ4UWS", cost = 0.8, price = 103.97, size = 4.80907954, dt = "2022-05-20T02:26:33.413397+00:00" },
]
diff --git a/piker/brokers/kraken/__init__.py b/piker/brokers/kraken/__init__.py
index cd36f4e5..cd04c950 100644
--- a/piker/brokers/kraken/__init__.py
+++ b/piker/brokers/kraken/__init__.py
@@ -34,6 +34,7 @@ from .api import (
get_client,
)
from .feed import (
+ get_mkt_info,
open_history_client,
open_symbol_search,
stream_quotes,
diff --git a/piker/brokers/kraken/api.py b/piker/brokers/kraken/api.py
index 94d6dc41..1ebdb759 100644
--- a/piker/brokers/kraken/api.py
+++ b/piker/brokers/kraken/api.py
@@ -20,10 +20,10 @@ Kraken web API wrapping.
'''
from contextlib import asynccontextmanager as acm
from datetime import datetime
+from decimal import Decimal
import itertools
from typing import (
Any,
- Optional,
Union,
)
import time
@@ -41,14 +41,17 @@ import trio
from piker import config
from piker.data.types import Struct
-from piker.data._source import Symbol
+from piker.accounting._mktinfo import (
+ Asset,
+ digits_to_dec,
+)
from piker.brokers._util import (
resproc,
SymbolNotFound,
BrokerError,
DataThrottle,
)
-from piker.pp import Transaction
+from piker.accounting import Transaction
from . import log
# //
@@ -155,12 +158,23 @@ class Pair(Struct):
short_position_limit: float = 0
long_position_limit: float = float('inf')
+ @property
+ def price_tick(self) -> Decimal:
+ return digits_to_dec(self.pair_decimals)
+
+ @property
+ def size_tick(self) -> Decimal:
+ return digits_to_dec(self.lot_decimals)
+
class Client:
- # global symbol normalization table
+ # symbol mapping from all names to the altname
_ntable: dict[str, str] = {}
- _atable: bidict[str, str] = bidict()
+
+ # 2-way map of symbol names to their "alt names" ffs XD
+ _altnames: bidict[str, str] = bidict()
+
_pairs: dict[str, Pair] = {}
def __init__(
@@ -176,11 +190,13 @@ class Client:
'User-Agent':
'krakenex/2.1.0 (+https://github.com/veox/python3-krakenex)'
})
- self.conf: dict[str, str] = config
self._name = name
self._api_key = api_key
self._secret = secret
+ self.conf: dict[str, str] = config
+ self.assets: dict[str, Asset] = {}
+
@property
def pairs(self) -> dict[str, Pair]:
if self._pairs is None:
@@ -247,20 +263,49 @@ class Client:
'Balance',
{},
)
- by_bsuid = resp['result']
+ by_bsmktid = resp['result']
+
+ # TODO: we need to pull out the "asset" decimals
+ # data and return a `decimal.Decimal` instead here!
+ # using the underlying Asset
return {
- self._atable[sym].lower(): float(bal)
- for sym, bal in by_bsuid.items()
+ self._altnames[sym].lower(): float(bal)
+ for sym, bal in by_bsmktid.items()
}
- async def get_assets(self) -> dict[str, dict]:
- resp = await self._public('Assets', {})
- return resp['result']
+ async def get_assets(self) -> dict[str, Asset]:
+ '''
+ Load and cache all asset infos and pack into
+ our native ``Asset`` struct.
- async def cache_assets(self) -> None:
- assets = self.assets = await self.get_assets()
- for bsuid, info in assets.items():
- self._atable[bsuid] = info['altname']
+ https://docs.kraken.com/rest/#tag/Market-Data/operation/getAssetInfo
+
+ return msg:
+ "asset1": {
+ "aclass": "string",
+ "altname": "string",
+ "decimals": 0,
+ "display_decimals": 0,
+ "collateral_value": 0,
+ "status": "string"
+ }
+
+ '''
+ resp = await self._public('Assets', {})
+ assets = resp['result']
+
+ for bs_mktid, info in assets.items():
+ altname = self._altnames[bs_mktid] = info['altname']
+ aclass: str = info['aclass']
+
+ self.assets[bs_mktid] = Asset(
+ name=altname.lower(),
+ atype=f'crypto_{aclass}',
+ tx_tick=digits_to_dec(info['decimals']),
+ info=info,
+ )
+
+ return self.assets
async def get_trades(
self,
@@ -323,10 +368,15 @@ class Client:
Currently only withdrawals are supported.
'''
- xfers: list[dict] = (await self.endpoint(
+ resp = await self.endpoint(
'WithdrawStatus',
{'asset': asset},
- ))['result']
+ )
+ try:
+ xfers: list[dict] = resp['result']
+ except KeyError:
+ log.exception(f'Kraken suxxx: {resp}')
+ return []
# eg. resp schema:
# 'result': [{'method': 'Bitcoin', 'aclass': 'currency', 'asset':
@@ -341,28 +391,21 @@ class Client:
# look up the normalized name and asset info
asset_key = entry['asset']
- asset_info = self.assets[asset_key]
- asset = self._atable[asset_key].lower()
+ asset = self.assets[asset_key]
+ asset_key = self._altnames[asset_key].lower()
# XXX: this is in the asset units (likely) so it isn't
# quite the same as a commisions cost necessarily..)
cost = float(entry['fee'])
- fqsn = asset + '.kraken'
- pairinfo = Symbol.from_fqsn(
- fqsn,
- info={
- 'asset_type': 'crypto',
- 'lot_tick_size': asset_info['decimals'],
- },
- )
+ fqme = asset_key + '.kraken'
- tran = Transaction(
- fqsn=fqsn,
- sym=pairinfo,
+ tx = Transaction(
+ fqme=fqme,
+ sym=asset,
tid=entry['txid'],
dt=pendulum.from_timestamp(entry['time']),
- bsuid=f'{asset}{src_asset}',
+ bs_mktid=f'{asset_key}{src_asset}',
size=-1*(
float(entry['amount'])
+
@@ -375,7 +418,7 @@ class Client:
# XXX: see note above
cost=cost,
)
- trans[tran.tid] = tran
+ trans[tx.tid] = tx
return trans
@@ -424,30 +467,44 @@ class Client:
# txid is a transaction id given by kraken
return await self.endpoint('CancelOrder', {"txid": reqid})
- async def symbol_info(
+ async def pair_info(
self,
- pair: Optional[str] = None,
+ pair_patt: str | None = None,
) -> dict[str, Pair] | Pair:
+ '''
+ Query for a tradeable asset pair (info), or all if no input
+ pattern is provided.
- if pair is not None:
- pairs = {'pair': pair}
- else:
- pairs = None # get all pairs
+ https://docs.kraken.com/rest/#tag/Market-Data/operation/getTradableAssetPairs
- resp = await self._public('AssetPairs', pairs)
+ '''
+ # get all pairs by default, or filter
+ # to whatever pattern is provided as input.
+ pairs: dict[str, str] | None = None
+ if pair_patt is not None:
+ pairs = {'pair': pair_patt}
+
+ resp = await self._public(
+ 'AssetPairs',
+ pairs,
+ )
err = resp['error']
if err:
- symbolname = pairs['pair'] if pair else None
- raise SymbolNotFound(f'{symbolname}.kraken')
+ raise SymbolNotFound(pair_patt)
- pairs = resp['result']
+ pairs: dict[str, Pair] = {
- if pair is not None:
- _, data = next(iter(pairs.items()))
- return Pair(**data)
- else:
- return {key: Pair(**data) for key, data in pairs.items()}
+ key: Pair(**data)
+ for key, data in resp['result'].items()
+ }
+ # always cache so we can possibly do faster lookup
+ self._pairs.update(pairs)
+
+ if pair_patt is not None:
+ return next(iter(pairs.items()))[1]
+
+ return pairs
async def cache_symbols(self) -> dict:
'''
@@ -460,17 +517,18 @@ class Client:
'''
if not self._pairs:
- self._pairs.update(await self.symbol_info())
+ pairs = await self.pair_info()
+ assert self._pairs == pairs
# table of all ws and rest keys to their alt-name values.
ntable: dict[str, str] = {}
- for rest_key in list(self._pairs.keys()):
+ for rest_key in list(pairs.keys()):
- pair: Pair = self._pairs[rest_key]
+ pair: Pair = pairs[rest_key]
altname = pair.altname
wsname = pair.wsname
- ntable[rest_key] = ntable[wsname] = altname
+ ntable[altname] = ntable[rest_key] = ntable[wsname] = altname
# register the pair under all monikers, a giant flat
# surjection of all possible names to each info obj.
@@ -483,7 +541,6 @@ class Client:
async def search_symbols(
self,
pattern: str,
- limit: int = None,
) -> dict[str, Any]:
'''
@@ -594,8 +651,7 @@ class Client:
the 'AssetPairs' endpoint, see methods above.
'''
- ticker = cls._ntable[ticker]
- return ticker.lower(), cls._pairs[ticker]
+ return cls._ntable[ticker].lower()
@acm
@@ -615,7 +671,7 @@ async def get_client() -> Client:
# at startup, load all symbols, and asset info in
# batch requests.
async with trio.open_nursery() as nurse:
- nurse.start_soon(client.cache_assets)
+ nurse.start_soon(client.get_assets)
await client.cache_symbols()
yield client
diff --git a/piker/brokers/kraken/broker.py b/piker/brokers/kraken/broker.py
index e09dd35a..28f5d026 100644
--- a/piker/brokers/kraken/broker.py
+++ b/piker/brokers/kraken/broker.py
@@ -21,7 +21,7 @@ Order api and machinery
from collections import ChainMap, defaultdict
from contextlib import (
asynccontextmanager as acm,
- contextmanager as cm,
+ aclosing,
)
from functools import partial
from itertools import count
@@ -35,20 +35,23 @@ from typing import (
Union,
)
-from async_generator import aclosing
from bidict import bidict
import pendulum
import trio
import tractor
-from piker.pp import (
+from piker.accounting import (
Position,
PpTable,
Transaction,
+ TransactionLedger,
open_trade_ledger,
open_pps,
+ get_likely_pair,
+)
+from piker.accounting._mktinfo import (
+ MktPair,
)
-from piker.data._source import Symbol
from piker.clearing._messages import (
Order,
Status,
@@ -67,7 +70,7 @@ from .api import (
get_client,
)
from .feed import (
- get_console_log,
+ get_mkt_info,
open_autorecon_ws,
NoBsWs,
stream_messages,
@@ -367,6 +370,8 @@ def trades2pps(
acctid: str,
new_trans: dict[str, Transaction] = {},
+ write_storage: bool = True,
+
) -> tuple[
list[BrokerdPosition],
list[Transaction],
@@ -397,13 +402,20 @@ def trades2pps(
# right since `.broker` is already
# included?
account='kraken.' + acctid,
- symbol=p.symbol.front_fqsn(),
+ symbol=p.mkt.fqme,
size=p.size,
avg_price=p.ppu,
currency='',
)
position_msgs.append(msg)
+ if write_storage:
+ # TODO: ideally this blocks the this task
+ # as little as possible. we need to either do
+ # these writes in another actor, or try out `trio`'s
+ # async file IO api?
+ table.write_config()
+
return position_msgs
@@ -414,14 +426,11 @@ async def trades_dialogue(
) -> AsyncIterator[dict[str, Any]]:
- # XXX: required to propagate ``tractor`` loglevel to ``piker`` logging
- get_console_log(loglevel or tractor.current_actor().loglevel)
-
async with get_client() as client:
if not client._api_key:
- raise RuntimeError(
- 'Missing Kraken API key in `brokers.toml`!?!?')
+ await ctx.started('paper')
+ return
# TODO: make ems flip to paper mode via
# some returned signal if the user only wants to use
@@ -467,40 +476,55 @@ async def trades_dialogue(
# update things correctly.
simulate_pp_update: bool = False
+ table: PpTable
+ ledger: TransactionLedger
with (
open_pps(
'kraken',
- acctid
+ acctid,
+ write_on_exit=True,
) as table,
open_trade_ledger(
'kraken',
- acctid
- ) as ledger_dict,
+ acctid,
+ ) as ledger,
):
# transaction-ify the ledger entries
- ledger_trans = norm_trade_records(ledger_dict)
+ ledger_trans = await norm_trade_records(ledger)
+
+ if not table.pps:
+ # NOTE: we can't use this since it first needs
+ # broker: str input support!
+ # table.update_from_trans(ledger.to_trans())
+ table.update_from_trans(ledger_trans)
+ table.write_config()
# TODO: eventually probably only load
# as far back as it seems is not deliverd in the
# most recent 50 trades and assume that by ordering we
# already have those records in the ledger.
tids2trades = await client.get_trades()
- ledger_dict.update(tids2trades)
- api_trans = norm_trade_records(tids2trades)
+ ledger.update(tids2trades)
+ if tids2trades:
+ ledger.write_config()
+
+ api_trans = await norm_trade_records(tids2trades)
# retrieve kraken reported balances
# and do diff with ledger to determine
# what amount of trades-transactions need
# to be reloaded.
balances = await client.get_balances()
+
for dst, size in balances.items():
+
# we don't care about tracking positions
# in the user's source fiat currency.
if (
dst == src_fiat
or not any(
- dst in bsuid for bsuid in table.pps
+ dst in bs_mktid for bs_mktid in table.pps
)
):
log.warning(
@@ -508,45 +532,20 @@ async def trades_dialogue(
)
continue
- def get_likely_pair(
- dst: str,
- bsuid: str,
- src_fiat: str = src_fiat
-
- ) -> str:
- '''
- Attempt to get the likely trading pair masting
- a given destination asset `dst: str`.
-
- '''
- try:
- src_name_start = bsuid.rindex(src_fiat)
- except (
- ValueError, # substr not found
- ):
- # TODO: handle nested positions..(i.e.
- # positions where the src fiat was used to
- # buy some other dst which was furhter used
- # to buy another dst..)
- log.warning(
- f'No src fiat {src_fiat} found in {bsuid}?'
- )
- return
-
- likely_dst = bsuid[:src_name_start]
- if likely_dst == dst:
- return bsuid
-
def has_pp(
dst: str,
size: float,
- ) -> Position | bool:
+ ) -> Position | None:
src2dst: dict[str, str] = {}
- for bsuid in table.pps:
- likely_pair = get_likely_pair(dst, bsuid)
+ for bs_mktid in table.pps:
+ likely_pair = get_likely_pair(
+ src_fiat,
+ dst,
+ bs_mktid,
+ )
if likely_pair:
src2dst[src_fiat] = dst
@@ -565,7 +564,7 @@ async def trades_dialogue(
):
log.warning(
f'`kraken` account says you have a ZERO '
- f'balance for {bsuid}:{pair}\n'
+ f'balance for {bs_mktid}:{pair}\n'
f'but piker seems to think `{pp.size}`\n'
'This is likely a discrepancy in piker '
'accounting if the above number is'
@@ -574,7 +573,7 @@ async def trades_dialogue(
)
return pp
- return False
+ return None # signal no entry
pos = has_pp(dst, size)
if not pos:
@@ -601,8 +600,12 @@ async def trades_dialogue(
# in the ``pps.toml`` for the necessary pair
# yet and thus this likely pair grabber will
# likely fail.
- for bsuid in table.pps:
- likely_pair = get_likely_pair(dst, bsuid)
+ for bs_mktid in table.pps:
+ likely_pair = get_likely_pair(
+ src_fiat,
+ dst,
+ bs_mktid,
+ )
if likely_pair:
break
else:
@@ -652,6 +655,12 @@ async def trades_dialogue(
)
await ctx.started((ppmsgs, [acc_name]))
+ # TODO: ideally this blocks the this task
+ # as little as possible. we need to either do
+ # these writes in another actor, or try out `trio`'s
+ # async file IO api?
+ table.write_config()
+
# Get websocket token for authenticated data stream
# Assert that a token was actually received.
resp = await client.endpoint('GetWebSocketsToken', {})
@@ -674,8 +683,6 @@ async def trades_dialogue(
aclosing(stream_messages(ws)) as stream,
trio.open_nursery() as nurse,
):
- stream = stream_messages(ws)
-
# task for processing inbound requests from ems
nurse.start_soon(
handle_order_requests,
@@ -724,8 +731,8 @@ async def handle_order_updates(
'''
Main msg handling loop for all things order management.
- This code is broken out to make the context explicit and state variables
- defined in the signature clear to the reader.
+ This code is broken out to make the context explicit and state
+ variables defined in the signature clear to the reader.
'''
async for msg in ws_stream:
@@ -818,7 +825,7 @@ async def handle_order_updates(
)
await ems_stream.send(status_msg)
- new_trans = norm_trade_records(trades)
+ new_trans = await norm_trade_records(trades)
ppmsgs = trades2pps(
table,
acctid,
@@ -827,8 +834,6 @@ async def handle_order_updates(
for pp_msg in ppmsgs:
await ems_stream.send(pp_msg)
- ledger_trans.update(new_trans)
-
# process and relay order state change events
# https://docs.kraken.com/websockets/#message-openOrders
case [
@@ -890,7 +895,7 @@ async def handle_order_updates(
ids.inverse.get(reqid) is None
):
# parse out existing live order
- fqsn = pair.replace('/', '').lower()
+ fqme = pair.replace('/', '').lower()
price = float(price)
size = float(vol)
@@ -917,7 +922,7 @@ async def handle_order_updates(
action=action,
exec_mode='live',
oid=oid,
- symbol=fqsn,
+ symbol=fqme,
account=acc_name,
price=price,
size=size,
@@ -1182,7 +1187,7 @@ async def handle_order_updates(
log.warning(f'Unhandled trades update msg: {msg}')
-def norm_trade_records(
+async def norm_trade_records(
ledger: dict[str, Any],
) -> dict[str, Transaction]:
@@ -1197,30 +1202,19 @@ def norm_trade_records(
}[record['type']]
# we normalize to kraken's `altname` always..
- bsuid, pair_info = Client.normalize_symbol(record['pair'])
- fqsn = f'{bsuid}.kraken'
-
- mktpair = Symbol.from_fqsn(
- fqsn,
- info={
- 'lot_size_digits': pair_info.lot_decimals,
- 'tick_size_digits': pair_info.pair_decimals,
- 'asset_type': 'crypto',
- },
- )
+ bs_mktid = Client.normalize_symbol(record['pair'])
+ fqme = f'{bs_mktid}.kraken'
+ mkt: MktPair = (await get_mkt_info(fqme))[0]
records[tid] = Transaction(
- fqsn=fqsn,
- sym=mktpair,
+ fqme=fqme,
+ sym=mkt,
tid=tid,
size=size,
price=float(record['price']),
cost=float(record['fee']),
dt=pendulum.from_timestamp(float(record['time'])),
- bsuid=bsuid,
-
- # XXX: there are no derivs on kraken right?
- # expiry=expiry,
+ bs_mktid=bs_mktid,
)
return records
diff --git a/piker/brokers/kraken/feed.py b/piker/brokers/kraken/feed.py
index b8228a55..526590fe 100644
--- a/piker/brokers/kraken/feed.py
+++ b/piker/brokers/kraken/feed.py
@@ -18,12 +18,16 @@
Real-time and historical data feed endpoints.
'''
-from contextlib import asynccontextmanager as acm
+from contextlib import (
+ asynccontextmanager as acm,
+ aclosing,
+)
from datetime import datetime
from typing import (
Any,
- Optional,
+ AsyncGenerator,
Callable,
+ Optional,
)
import time
@@ -31,18 +35,24 @@ from fuzzywuzzy import process as fuzzy
import numpy as np
import pendulum
from trio_typing import TaskStatus
-from trio_util import trio_async_generator
import tractor
import trio
-from piker._cacheables import open_cached_client
+from piker.accounting._mktinfo import (
+ Asset,
+ MktPair,
+)
+from piker._cacheables import (
+ open_cached_client,
+ async_lifo_cache,
+)
from piker.brokers._util import (
BrokerError,
DataThrottle,
DataUnavailable,
)
-from piker.log import get_console_log
from piker.data.types import Struct
+from piker.data.validate import FeedInit
from piker.data._web_bs import open_autorecon_ws, NoBsWs
from . import log
from .api import (
@@ -85,26 +95,9 @@ async def stream_messages(
though a single async generator.
'''
- too_slow_count = last_hb = 0
-
- while True:
-
- with trio.move_on_after(5) as cs:
- msg = await ws.recv_msg()
-
- # trigger reconnection if heartbeat is laggy
- if cs.cancelled_caught:
-
- too_slow_count += 1
-
- if too_slow_count > 20:
- log.warning(
- "Heartbeat is too slow, resetting ws connection")
-
- await ws._connect()
- too_slow_count = 0
- continue
+ last_hb: float = 0
+ async for msg in ws:
match msg:
case {'event': 'heartbeat'}:
now = time.time()
@@ -122,7 +115,6 @@ async def stream_messages(
yield msg
-@trio_async_generator
async def process_data_feed_msgs(
ws: NoBsWs,
):
@@ -130,63 +122,75 @@ async def process_data_feed_msgs(
Parse and pack data feed messages.
'''
- async for msg in stream_messages(ws):
- match msg:
- case {
- 'errorMessage': errmsg
- }:
- raise BrokerError(errmsg)
+ async with aclosing(stream_messages(ws)) as ws_stream:
+ async for msg in ws_stream:
+ match msg:
+ case {
+ 'errorMessage': errmsg
+ }:
+ raise BrokerError(errmsg)
- case {
- 'event': 'subscriptionStatus',
- } as sub:
- log.info(
- 'WS subscription is active:\n'
- f'{sub}'
- )
- continue
-
- case [
- chan_id,
- *payload_array,
- chan_name,
- pair
- ]:
- if 'ohlc' in chan_name:
- ohlc = OHLC(
- chan_id,
- chan_name,
- pair,
- *payload_array[0]
+ case {
+ 'event': 'subscriptionStatus',
+ } as sub:
+ log.info(
+ 'WS subscription is active:\n'
+ f'{sub}'
)
- ohlc.typecast()
- yield 'ohlc', ohlc
+ continue
- elif 'spread' in chan_name:
+ case [
+ chan_id,
+ *payload_array,
+ chan_name,
+ pair
+ ]:
+ if 'ohlc' in chan_name:
+ ohlc = OHLC(
+ chan_id,
+ chan_name,
+ pair,
+ *payload_array[0]
+ )
+ ohlc.typecast()
+ yield 'ohlc', ohlc
- bid, ask, ts, bsize, asize = map(
- float, payload_array[0])
+ elif 'spread' in chan_name:
- # TODO: really makes you think IB has a horrible API...
- quote = {
- 'symbol': pair.replace('/', ''),
- 'ticks': [
- {'type': 'bid', 'price': bid, 'size': bsize},
- {'type': 'bsize', 'price': bid, 'size': bsize},
+ bid, ask, ts, bsize, asize = map(
+ float, payload_array[0])
- {'type': 'ask', 'price': ask, 'size': asize},
- {'type': 'asize', 'price': ask, 'size': asize},
- ],
- }
- yield 'l1', quote
+ # TODO: really makes you think IB has a horrible API...
+ quote = {
+ 'symbol': pair.replace('/', ''),
+ 'ticks': [
+ {'type': 'bid', 'price': bid, 'size': bsize},
+ {'type': 'bsize', 'price': bid, 'size': bsize},
- # elif 'book' in msg[-2]:
- # chan_id, *payload_array, chan_name, pair = msg
- # print(msg)
+ {'type': 'ask', 'price': ask, 'size': asize},
+ {'type': 'asize', 'price': ask, 'size': asize},
+ ],
+ }
+ yield 'l1', quote
- case _:
- print(f'UNHANDLED MSG: {msg}')
- # yield msg
+ # elif 'book' in msg[-2]:
+ # chan_id, *payload_array, chan_name, pair = msg
+ # print(msg)
+
+ case {
+ 'connectionID': conid,
+ 'event': 'systemStatus',
+ 'status': 'online',
+ 'version': ver,
+ }:
+ log.info(
+ f'Established {ver} ws connection with id: {conid}'
+ )
+ continue
+
+ case _:
+ print(f'UNHANDLED MSG: {msg}')
+ # yield msg
def normalize(
@@ -211,9 +215,11 @@ def normalize(
@acm
async def open_history_client(
- symbol: str,
+ mkt: MktPair,
-) -> tuple[Callable, int]:
+) -> AsyncGenerator[Callable, None]:
+
+ symbol: str = mkt.bs_fqme
# TODO implement history getter for the new storage layer.
async with open_cached_client('kraken') as client:
@@ -263,6 +269,44 @@ async def open_history_client(
yield get_ohlc, {'erlangs': 1, 'rate': 1}
+@async_lifo_cache()
+async def get_mkt_info(
+ fqme: str,
+
+) -> tuple[MktPair, Pair]:
+ '''
+ Query for and return a `MktPair` and backend-native `Pair` (or
+ wtv else) info.
+
+ If more then one fqme is provided return a ``dict`` of native
+ key-strs to `MktPair`s.
+
+ '''
+ async with open_cached_client('kraken') as client:
+
+ # uppercase since kraken bs_mktid is always upper
+ bs_fqme, _, broker = fqme.partition('.')
+ pair_str: str = bs_fqme.upper()
+ bs_mktid: str = Client.normalize_symbol(pair_str)
+ pair: Pair = await client.pair_info(pair_str)
+
+ assets = client.assets
+ dst_asset: Asset = assets[pair.base]
+ src_asset: Asset = assets[pair.quote]
+
+ mkt = MktPair(
+ dst=dst_asset,
+ src=src_asset,
+
+ price_tick=pair.price_tick,
+ size_tick=pair.size_tick,
+ bs_mktid=bs_mktid,
+
+ broker='kraken',
+ )
+ return mkt, pair
+
+
async def stream_quotes(
send_chan: trio.abc.SendChannel,
@@ -283,45 +327,20 @@ async def stream_quotes(
``pairs`` must be formatted /.
'''
- # XXX: required to propagate ``tractor`` loglevel to piker logging
- get_console_log(loglevel or tractor.current_actor().loglevel)
- ws_pairs = {}
- sym_infos = {}
+ ws_pairs: list[str] = []
+ init_msgs: list[FeedInit] = []
- async with open_cached_client('kraken') as client, send_chan as send_chan:
+ async with (
+ send_chan as send_chan,
+ ):
+ for sym_str in symbols:
+ mkt, pair = await get_mkt_info(sym_str)
+ init_msgs.append(
+ FeedInit(mkt_info=mkt)
+ )
- # keep client cached for real-time section
- for sym in symbols:
-
- # transform to upper since piker style is always lower
- sym = sym.upper()
- si: Pair = await client.symbol_info(sym)
- # try:
- # si = Pair(**sym_info) # validation
- # except TypeError:
- # fields_diff = set(sym_info) - set(Pair.__struct_fields__)
- # raise TypeError(
- # f'Missing msg fields {fields_diff}'
- # )
- syminfo = si.to_dict()
- syminfo['price_tick_size'] = 1. / 10**si.pair_decimals
- syminfo['lot_tick_size'] = 1. / 10**si.lot_decimals
- syminfo['asset_type'] = 'crypto'
- sym_infos[sym] = syminfo
- ws_pairs[sym] = si.wsname
-
- symbol = symbols[0].lower()
-
- init_msgs = {
- # pass back token, and bool, signalling if we're the writer
- # and that history has been written
- symbol: {
- 'symbol_info': sym_infos[sym],
- 'shm_write_opts': {'sum_tick_vml': False},
- 'fqsn': sym,
- },
- }
+ ws_pairs.append(pair.wsname)
@acm
async def subscribe(ws: NoBsWs):
@@ -332,7 +351,7 @@ async def stream_quotes(
# https://github.com/krakenfx/kraken-wsclient-py/blob/master/kraken_wsclient_py/kraken_wsclient_py.py#L188
ohlc_sub = {
'event': 'subscribe',
- 'pair': list(ws_pairs.values()),
+ 'pair': ws_pairs,
'subscription': {
'name': 'ohlc',
'interval': 1,
@@ -348,7 +367,7 @@ async def stream_quotes(
# trade data (aka L1)
l1_sub = {
'event': 'subscribe',
- 'pair': list(ws_pairs.values()),
+ 'pair': ws_pairs,
'subscription': {
'name': 'spread',
# 'depth': 10}
@@ -363,7 +382,7 @@ async def stream_quotes(
# unsub from all pairs on teardown
if ws.connected():
await ws.send_msg({
- 'pair': list(ws_pairs.values()),
+ 'pair': ws_pairs,
'event': 'unsubscribe',
'subscription': ['ohlc', 'spread'],
})
@@ -378,21 +397,20 @@ async def stream_quotes(
open_autorecon_ws(
'wss://ws.kraken.com/',
fixture=subscribe,
+ reset_after=20,
) as ws,
# avoid stream-gen closure from breaking trio..
# NOTE: not sure this actually works XD particularly
# if we call `ws._connect()` manally in the streaming
# async gen..
- process_data_feed_msgs(ws) as msg_gen,
+ aclosing(process_data_feed_msgs(ws)) as msg_gen,
):
# pull a first quote and deliver
typ, ohlc_last = await anext(msg_gen)
topic, quote = normalize(ohlc_last)
task_status.started((init_msgs, quote))
-
- # lol, only "closes" when they're margin squeezing clients ;P
feed_is_live.set()
# keep start of last interval for volume tracking
diff --git a/piker/brokers/kucoin.py b/piker/brokers/kucoin.py
index 743a78c2..8cf06300 100755
--- a/piker/brokers/kucoin.py
+++ b/piker/brokers/kucoin.py
@@ -1,4 +1,6 @@
-# Copyright (C) Jared Goldman (in stewardship for pikers)
+# Copyright (C) (in stewardship for pikers)
+# - Jared Goldman
+# - Tyler Goodlet
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -18,34 +20,54 @@ Kucoin broker backend
'''
-from typing import Any, Callable, Literal, AsyncGenerator
-from contextlib import asynccontextmanager as acm
+from contextlib import (
+ asynccontextmanager as acm,
+ aclosing,
+)
from datetime import datetime
-import time
+from decimal import Decimal
import base64
import hmac
import hashlib
+import time
+from functools import partial
+from pprint import pformat
+from typing import (
+ Any,
+ Callable,
+ Literal,
+ AsyncGenerator,
+)
import wsproto
from uuid import uuid4
+from fuzzywuzzy import process as fuzzy
+from trio_typing import TaskStatus
import asks
+from bidict import bidict
+import numpy as np
+import pendulum
import tractor
import trio
-from trio_util import trio_async_generator
-from trio_typing import TaskStatus
-from fuzzywuzzy import process as fuzzy
-import pendulum
-import numpy as np
-from piker._cacheables import open_cached_client
+from piker.accounting._mktinfo import (
+ Asset,
+ digits_to_dec,
+ MktPair,
+)
+from piker.data.validate import FeedInit
+from piker import config
+from piker._cacheables import (
+ open_cached_client,
+ async_lifo_cache,
+)
from piker.log import get_logger
-from ._util import DataUnavailable
-from piker.pp import config
-from ..data.types import Struct
-from ..data._web_bs import (
+from piker.data.types import Struct
+from piker.data._web_bs import (
open_autorecon_ws,
NoBsWs,
)
+from ._util import DataUnavailable
log = get_logger(__name__)
@@ -67,11 +89,20 @@ class KucoinMktPair(Struct, frozen=True):
https://docs.kucoin.com/#get-symbols-list
'''
-
baseCurrency: str
baseIncrement: float
+
+ @property
+ def price_tick(self) -> Decimal:
+ return Decimal(str(self.baseIncrement))
+
baseMaxSize: float
baseMinSize: float
+
+ @property
+ def size_tick(self) -> Decimal:
+ return Decimal(str(self.baseMinSize))
+
enableTrading: bool
feeCurrency: str
isMarginEnabled: bool
@@ -84,7 +115,7 @@ class KucoinMktPair(Struct, frozen=True):
quoteIncrement: float
quoteMaxSize: float
quoteMinSize: float
- symbol: str
+ symbol: str # our bs_mktid, kucoin's internal id
class AccountTrade(Struct, frozen=True):
@@ -93,7 +124,6 @@ class AccountTrade(Struct, frozen=True):
https://docs.kucoin.com/#get-account-ledgers
'''
-
id: str
currency: str
amount: float
@@ -111,7 +141,6 @@ class AccountResponse(Struct, frozen=True):
https://docs.kucoin.com/#get-account-ledgers
'''
-
currentPage: int
pageSize: int
totalNum: int
@@ -125,7 +154,6 @@ class KucoinTrade(Struct, frozen=True):
https://docs.kucoin.com/#symbol-ticker
'''
-
bestAsk: float
bestAskSize: float
bestBid: float
@@ -148,16 +176,24 @@ class KucoinL2(Struct, frozen=True):
timestamp: float
-class KucoinMsg(Struct, frozen=True):
+class Currency(Struct, frozen=True):
'''
- Generic outer-wrapper for any Kucoin ws msg
+ Currency (asset) info:
+ https://docs.kucoin.com/#get-currencies
'''
-
- type: str
- topic: str
- subject: str
- data: list[KucoinTrade | KucoinL2]
+ currency: str
+ name: str
+ fullName: str
+ precision: int
+ confirms: int
+ contractAddress: str
+ withdrawalMinSize: str
+ withdrawalMinFee: str
+ isWithdrawEnabled: bool
+ isDepositEnabled: bool
+ isMarginEnabled: bool
+ isDebitEnabled: bool
class BrokerConfig(Struct, frozen=True):
@@ -180,15 +216,18 @@ def get_config() -> BrokerConfig | None:
class Client:
def __init__(self) -> None:
- self._pairs: dict[str, KucoinMktPair] = {}
- self._bars: list[list[float]] = []
self._config: BrokerConfig | None = get_config()
+ self._pairs: dict[str, KucoinMktPair] = {}
+ self._fqmes2mktids: bidict[str, str] = bidict()
+ self._bars: list[list[float]] = []
+ self._currencies: dict[str, Currency] = {}
def _gen_auth_req_headers(
self,
action: Literal['POST', 'GET'],
endpoint: str,
- api_v: str = 'v2',
+ api: str = 'v2',
+
) -> dict[str, str | bytes]:
'''
Generate authenticated request headers
@@ -202,7 +241,7 @@ class Client:
str_to_sign = (
str(int(time.time() * 1000))
- + action + f'/api/{api_v}{endpoint}'
+ + action + f'/api/{api}/{endpoint.lstrip("/")}'
)
signature = base64.b64encode(
@@ -234,7 +273,7 @@ class Client:
self,
action: Literal['POST', 'GET'],
endpoint: str,
- api_v: str = 'v2',
+ api: str = 'v2',
headers: dict = {},
) -> Any:
'''
@@ -243,19 +282,24 @@ class Client:
'''
if self._config:
headers = self._gen_auth_req_headers(
- action, endpoint, api_v)
+ action,
+ endpoint,
+ api,
+ )
- api_url = f'https://api.kucoin.com/api/{api_v}{endpoint}'
+ api_url = f'https://api.kucoin.com/api/{api}/{endpoint}'
res = await asks.request(action, api_url, headers=headers)
- if 'data' in res.json():
- return res.json()['data']
+ json = res.json()
+ if 'data' in json:
+ return json['data']
else:
log.error(
- f'Error making request to {api_url} -> {res.json()["msg"]}'
+ f'Error making request to {api_url} ->\n'
+ f'{pformat(res)}'
)
- return res.json()['msg']
+ return json['msg']
async def _get_ws_token(
self,
@@ -271,7 +315,9 @@ class Client:
token_type = 'private' if private else 'public'
try:
data: dict[str, Any] | None = await self._request(
- 'POST', f'/bullet-{token_type}', 'v1'
+ 'POST',
+ endpoint=f'bullet-{token_type}',
+ api='v1'
)
except Exception as e:
log.error(
@@ -288,27 +334,72 @@ class Client:
f'{data.json()["msg"]}'
)
+ async def get_currencies(
+ self,
+ update: bool = False,
+ ) -> dict[str, Currency]:
+ '''
+ Retrieve all "currency" info:
+ https://docs.kucoin.com/#get-currencies
+
+ We use this for creating piker-interal ``Asset``s.
+
+ '''
+ if (
+ not self._currencies
+ or update
+ ):
+ currencies: dict[str, Currency] = {}
+ entries: list[dict] = await self._request(
+ 'GET',
+ api='v1',
+ endpoint='currencies',
+ )
+ for entry in entries:
+ curr = Currency(**entry).copy()
+ currencies[curr.name] = curr
+
+ self._currencies.update(currencies)
+
+ return self._currencies
+
async def _get_pairs(
self,
- ) -> dict[str, KucoinMktPair]:
- entries = await self._request('GET', '/symbols')
- syms = {
- kucoin_sym_to_fqsn(item['name']): KucoinMktPair(**item)
- for item in entries
- }
+ ) -> tuple[
+ dict[str, KucoinMktPair],
+ bidict[str, KucoinMktPair],
+ ]:
+ entries = await self._request('GET', 'symbols')
+ log.info(f' {len(entries)} Kucoin market pairs fetched')
- log.info(f' {len(syms)} Kucoin market pairs fetched')
- return syms
+ pairs: dict[str, KucoinMktPair] = {}
+ fqmes2mktids: bidict[str, str] = bidict()
+ for item in entries:
+ pair = pairs[item['name']] = KucoinMktPair(**item)
+ fqmes2mktids[
+ item['name'].lower().replace('-', '')
+ ] = pair.name
+
+ return pairs, fqmes2mktids
async def cache_pairs(
self,
+ update: bool = False,
+
) -> dict[str, KucoinMktPair]:
'''
- Get cached pairs and convert keyed symbols into fqsns if ya want
+ Get request all market pairs and store in a local cache.
+
+ Also create a table of piker style fqme -> kucoin symbols.
'''
- if not self._pairs:
- self._pairs = await self._get_pairs()
+ if (
+ not self._pairs
+ or update
+ ):
+ pairs, fqmes = await self._get_pairs()
+ self._pairs.update(pairs)
+ self._fqmes2mktids.update(fqmes)
return self._pairs
@@ -316,7 +407,12 @@ class Client:
self,
pattern: str,
limit: int = 30,
+
) -> dict[str, KucoinMktPair]:
+ '''
+ Use fuzzy search to match against all market names.
+
+ '''
data = await self.cache_pairs()
matches = fuzzy.extractBests(
@@ -327,19 +423,23 @@ class Client:
async def last_trades(self, sym: str) -> list[AccountTrade]:
trades = await self._request(
- 'GET', f'/accounts/ledgers?currency={sym}', 'v1'
+ 'GET',
+ endpoint=f'accounts/ledgers?currency={sym}',
+ api='v1'
)
trades = AccountResponse(**trades)
return trades.items
async def _get_bars(
self,
- fqsn: str,
+ fqme: str,
+
start_dt: datetime | None = None,
end_dt: datetime | None = None,
limit: int = 1000,
as_np: bool = True,
type: str = '1min',
+
) -> np.ndarray:
'''
Get OHLC data and convert to numpy array for perffff:
@@ -381,10 +481,10 @@ class Client:
start_dt = int(start_dt.timestamp())
end_dt = int(end_dt.timestamp())
- kucoin_sym = fqsn_to_kucoin_sym(fqsn, self._pairs)
+ kucoin_sym = self._fqmes2mktids[fqme]
url = (
- f'/market/candles?type={type}'
+ f'market/candles?type={type}'
f'&symbol={kucoin_sym}'
f'&startAt={start_dt}'
f'&endAt={end_dt}'
@@ -394,7 +494,7 @@ class Client:
data: list[list[str]] | dict = await self._request(
'GET',
url,
- api_v='v1',
+ api='v1',
)
if not isinstance(data, list):
@@ -439,19 +539,22 @@ class Client:
return array
-def fqsn_to_kucoin_sym(fqsn: str, pairs: dict[str, KucoinMktPair]) -> str:
- pair_data = pairs[fqsn]
+def fqme_to_kucoin_sym(
+ fqme: str,
+ pairs: dict[str, KucoinMktPair],
+
+) -> str:
+ pair_data = pairs[fqme]
return pair_data.baseCurrency + '-' + pair_data.quoteCurrency
-def kucoin_sym_to_fqsn(sym: str) -> str:
- return sym.lower().replace('-', '')
-
-
@acm
async def get_client() -> AsyncGenerator[Client, None]:
client = Client()
- await client.cache_pairs()
+
+ async with trio.open_nursery() as n:
+ n.start_soon(client.cache_pairs)
+ await client.get_currencies()
yield client
@@ -497,195 +600,268 @@ async def open_ping_task(
n.cancel_scope.cancel()
+@async_lifo_cache()
+async def get_mkt_info(
+ fqme: str,
+
+) -> tuple[MktPair, KucoinMktPair]:
+ '''
+ Query for and return a `MktPair` and `KucoinMktPair`.
+
+ '''
+ async with open_cached_client('kucoin') as client:
+ # split off any fqme broker part
+ bs_fqme, _, broker = fqme.partition('.')
+
+ pairs: dict[str, KucoinMktPair] = await client.cache_pairs()
+
+ try:
+ # likely search result key which is already in native mkt symbol form
+ pair: KucoinMktPair = pairs[bs_fqme]
+ bs_mktid: str = bs_fqme
+
+ except KeyError:
+
+ # likely a piker-style fqme from API request or CLI
+ bs_mktid: str = client._fqmes2mktids[bs_fqme]
+ pair: KucoinMktPair = pairs[bs_mktid]
+
+ # symbology sanity
+ assert bs_mktid == pair.symbol
+
+ assets: dict[str, Currency] = client._currencies
+
+ # TODO: maybe just do this processing in
+ # a .get_assets() method (see kraken)?
+ src: Currency = assets[pair.quoteCurrency]
+ src_asset = Asset(
+ name=src.name,
+ atype='crypto_currency',
+ tx_tick=digits_to_dec(src.precision),
+ info=src.to_dict(),
+ )
+ dst: Currency = assets[pair.baseCurrency]
+ dst_asset = Asset(
+ name=dst.name,
+ atype='crypto_currency',
+ tx_tick=digits_to_dec(dst.precision),
+ info=dst.to_dict(),
+ )
+ mkt = MktPair(
+ dst=dst_asset,
+ src=src_asset,
+
+ price_tick=pair.price_tick,
+ size_tick=pair.size_tick,
+ bs_mktid=bs_mktid,
+
+ broker='kucoin',
+ )
+ return mkt, pair
+
+
async def stream_quotes(
send_chan: trio.abc.SendChannel,
symbols: list[str],
feed_is_live: trio.Event,
- loglevel: str = '',
- # startup sync
- task_status: TaskStatus[tuple[dict, dict]
- ] = trio.TASK_STATUS_IGNORED,
+
+ task_status: TaskStatus[
+ tuple[dict, dict]
+ ] = trio.TASK_STATUS_IGNORED,
+
) -> None:
'''
Required piker api to stream real-time data.
Where the rubber hits the road baby
'''
+ init_msgs: list[FeedInit] = []
+
async with open_cached_client('kucoin') as client:
+
+ log.info(f'Starting up quote stream(s) for {symbols}')
+ for sym_str in symbols:
+ mkt, pair = await get_mkt_info(sym_str)
+ init_msgs.append(
+ FeedInit(mkt_info=mkt)
+ )
+
+ ws: NoBsWs
token, ping_interval = await client._get_ws_token()
connect_id = str(uuid4())
- pairs = await client.cache_pairs()
- ws_url = (
- f'wss://ws-api-spot.kucoin.com/?'
- f'token={token}&[connectId={connect_id}]'
- )
-
- # open ping task
async with (
- open_autorecon_ws(ws_url) as ws,
+ open_autorecon_ws(
+ (
+ f'wss://ws-api-spot.kucoin.com/?'
+ f'token={token}&[connectId={connect_id}]'
+ ),
+ fixture=partial(
+ subscribe,
+ connect_id=connect_id,
+ bs_mktid=pair.symbol,
+ ),
+ ) as ws,
open_ping_task(ws, ping_interval, connect_id),
+ aclosing(stream_messages(ws, sym_str)) as msg_gen,
):
- log.info('Starting up quote stream')
- # loop through symbols and sub to feedz
- for sym in symbols:
- pair: KucoinMktPair = pairs[sym]
- kucoin_sym = pair.symbol
+ typ, quote = await anext(msg_gen)
- init_msgs = {
- # pass back token, and bool, signalling if we're the writer
- # and that history has been written
- sym: {
- 'symbol_info': {
- 'asset_type': 'crypto',
- 'price_tick_size': float(pair.baseIncrement),
- 'lot_tick_size': float(pair.baseMinSize),
- },
- 'shm_write_opts': {'sum_tick_vml': False},
- 'fqsn': sym,
- }
- }
+ while typ != 'trade':
+ # take care to not unblock here until we get a real
+ # trade quote
+ typ, quote = await anext(msg_gen)
- async with (
- subscribe(ws, connect_id, kucoin_sym),
- stream_messages(ws, sym) as msg_gen,
- ):
- typ, quote = await anext(msg_gen)
- while typ != 'trade':
- # take care to not unblock here until we get a real
- # trade quote
- typ, quote = await anext(msg_gen)
+ task_status.started((init_msgs, quote))
+ feed_is_live.set()
- task_status.started((init_msgs, quote))
- feed_is_live.set()
-
- async for typ, msg in msg_gen:
- await send_chan.send({sym: msg})
+ async for typ, msg in msg_gen:
+ await send_chan.send({sym_str: msg})
@acm
-async def subscribe(ws: wsproto.WSConnection, connect_id, sym) -> AsyncGenerator[None, None]:
- # level 2 sub
- await ws.send_msg(
- {
- 'id': connect_id,
- 'type': 'subscribe',
- 'topic': f'/spotMarket/level2Depth5:{sym}',
- 'privateChannel': False,
- 'response': True,
- }
- )
+async def subscribe(
+ ws: NoBsWs,
+ connect_id,
+ bs_mktid,
- # watch trades
- await ws.send_msg(
- {
- 'id': connect_id,
- 'type': 'subscribe',
- 'topic': f'/market/ticker:{sym}',
- 'privateChannel': False,
- 'response': True,
- }
- )
+ # subs are filled in with `bs_mktid` from avbove
+ topics: list[str] = [
+ '/market/ticker:{bs_mktid}', # clearing events
+ '/spotMarket/level2Depth5:{bs_mktid}', # level 2
+ ],
- yield
+) -> AsyncGenerator[None, None]:
- # unsub
- if ws.connected():
- log.info(f'Unsubscribing to {sym} feed')
+ eps: list[str] = []
+ for topic in topics:
+ ep: str = topic.format(bs_mktid=bs_mktid)
+ eps.append(ep)
await ws.send_msg(
{
'id': connect_id,
- 'type': 'unsubscribe',
- 'topic': f'/market/ticker:{sym}',
+ 'type': 'subscribe',
+ 'topic': ep,
'privateChannel': False,
'response': True,
}
)
+ welcome_msg = await ws.recv_msg()
+ log.info(f'WS welcome: {welcome_msg}')
+
+ for _ in topics:
+ ack_msg = await ws.recv_msg()
+ log.info(f'Sub ACK: {ack_msg}')
+
+ yield
+
+ # unsub
+ if ws.connected():
+ log.info(f'Unsubscribing to {bs_mktid} feed')
+ for ep in eps:
+ await ws.send_msg(
+ {
+ 'id': connect_id,
+ 'type': 'unsubscribe',
+ 'topic': ep,
+ 'privateChannel': False,
+ 'response': True,
+ }
+ )
+
-@trio_async_generator
async def stream_messages(
- ws: NoBsWs, sym: str
+ ws: NoBsWs,
+ sym: str,
+
) -> AsyncGenerator[tuple[str, dict], None]:
- timeouts = 0
- last_trade_ts = 0
+ '''
+ Core (live) feed msg handler: relay market events
+ to the piker-ized tick-stream format.
- while True:
- with trio.move_on_after(3) as cs:
- msg = await ws.recv_msg()
- if cs.cancelled_caught:
- timeouts += 1
- if timeouts > 2:
- log.error(
- 'kucoin feed is sh**ing the bed... rebooting...')
- await ws._connect()
+ '''
+ last_trade_ts: float = 0
- continue
- if msg.get('subject'):
- msg = KucoinMsg(**msg)
- match msg.subject:
- case 'trade.ticker':
- trade_data = KucoinTrade(**msg.data)
+ dict_msg: dict[str, Any]
+ async for dict_msg in ws:
+ match dict_msg:
+ case {
+ 'subject': 'trade.ticker',
+ 'data': trade_data_dict,
+ }:
+ trade_data = KucoinTrade(**trade_data_dict)
- # XXX: Filter for duplicate messages as ws feed will
- # send duplicate market state
- # https://docs.kucoin.com/#level2-5-best-ask-bid-orders
- if trade_data.time == last_trade_ts:
- continue
+ # XXX: Filter out duplicate messages as ws feed will
+ # send duplicate market state
+ # https://docs.kucoin.com/#level2-5-best-ask-bid-orders
+ if trade_data.time == last_trade_ts:
+ continue
- last_trade_ts = trade_data.time
+ last_trade_ts = trade_data.time
- yield 'trade', {
- 'symbol': sym,
- 'last': trade_data.price,
- 'brokerd_ts': last_trade_ts,
- 'ticks': [
- {
- 'type': 'trade',
- 'price': float(trade_data.price),
- 'size': float(trade_data.size),
- 'broker_ts': last_trade_ts,
- }
- ],
- }
+ yield 'trade', {
+ 'symbol': sym,
+ 'last': trade_data.price,
+ 'brokerd_ts': last_trade_ts,
+ 'ticks': [
+ {
+ 'type': 'trade',
+ 'price': float(trade_data.price),
+ 'size': float(trade_data.size),
+ 'broker_ts': last_trade_ts,
+ }
+ ],
+ }
- case 'level2':
- l2_data = KucoinL2(**msg.data)
- first_ask = l2_data.asks[0]
- first_bid = l2_data.bids[0]
- yield 'l1', {
- 'symbol': sym,
- 'ticks': [
- {
- 'type': 'bid',
- 'price': float(first_bid[0]),
- 'size': float(first_bid[1]),
- },
- {
- 'type': 'bsize',
- 'price': float(first_bid[0]),
- 'size': float(first_bid[1]),
- },
- {
- 'type': 'ask',
- 'price': float(first_ask[0]),
- 'size': float(first_ask[1]),
- },
- {
- 'type': 'asize',
- 'price': float(first_ask[0]),
- 'size': float(first_ask[1]),
- },
- ],
- }
+ case {
+ 'subject': 'level2',
+ 'data': trade_data_dict,
+ }:
+ l2_data = KucoinL2(**trade_data_dict)
+ first_ask = l2_data.asks[0]
+ first_bid = l2_data.bids[0]
+ yield 'l1', {
+ 'symbol': sym,
+ 'ticks': [
+ {
+ 'type': 'bid',
+ 'price': float(first_bid[0]),
+ 'size': float(first_bid[1]),
+ },
+ {
+ 'type': 'bsize',
+ 'price': float(first_bid[0]),
+ 'size': float(first_bid[1]),
+ },
+ {
+ 'type': 'ask',
+ 'price': float(first_ask[0]),
+ 'size': float(first_ask[1]),
+ },
+ {
+ 'type': 'asize',
+ 'price': float(first_ask[0]),
+ 'size': float(first_ask[1]),
+ },
+ ],
+ }
- case _:
- log.warn(f'Unhandled message: {msg}')
+ case {'type': 'pong'}:
+ # resp to ping task req
+ continue
+
+ case _:
+ log.warn(f'Unhandled message: {dict_msg}')
@acm
async def open_history_client(
- symbol: str,
+ mkt: MktPair,
+
) -> AsyncGenerator[Callable, None]:
+
+ symbol: str = mkt.bs_fqme
+
async with open_cached_client('kucoin') as client:
log.info('Attempting to open kucoin history client')
@@ -709,6 +885,11 @@ async def open_history_client(
times = array['time']
+ if not len(times):
+ raise DataUnavailable(
+ f'No more history before {start_dt}?'
+ )
+
if end_dt is None:
inow = round(time.time())
diff --git a/piker/brokers/questrade.py b/piker/brokers/questrade.py
index a3b5cfe0..1d447b23 100644
--- a/piker/brokers/questrade.py
+++ b/piker/brokers/questrade.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
+# Copyright (C) 2018-present Tyler Goodlet (in stewardship of pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -43,10 +43,13 @@ from ..calc import humanize, percent_change
from .._cacheables import open_cached_client, async_lifo_cache
from .. import config
from ._util import resproc, BrokerError, SymbolNotFound
-from ..log import get_logger, colorize_json, get_console_log
-
-
-log = get_logger(__name__)
+from ..log import (
+ colorize_json,
+)
+from ._util import (
+ log,
+ get_console_log,
+)
_use_practice_account = False
_refresh_token_ep = 'https://{}login.questrade.com/oauth2/'
diff --git a/piker/brokers/robinhood.py b/piker/brokers/robinhood.py
index 71b21055..8fc5739f 100644
--- a/piker/brokers/robinhood.py
+++ b/piker/brokers/robinhood.py
@@ -27,12 +27,13 @@ from typing import List
from async_generator import asynccontextmanager
import asks
-from ..log import get_logger
-from ._util import resproc, BrokerError
+from ._util import (
+ resproc,
+ BrokerError,
+ log,
+)
from ..calc import percent_change
-log = get_logger(__name__)
-
_service_ep = 'https://api.robinhood.com'
@@ -65,8 +66,10 @@ class Client:
self.api = _API(self._sess)
def _zip_in_order(self, symbols: [str], quotes: List[dict]):
- return {quote.get('symbol', sym) if quote else sym: quote
- for sym, quote in zip(symbols, results_dict)}
+ return {
+ quote.get('symbol', sym) if quote else sym: quote
+ for sym, quote in zip(symbols, quotes)
+ }
async def quote(self, symbols: [str]):
"""Retrieve quotes for a list of ``symbols``.
diff --git a/piker/clearing/__init__.py b/piker/clearing/__init__.py
index 06a9212e..b2cc5fa7 100644
--- a/piker/clearing/__init__.py
+++ b/piker/clearing/__init__.py
@@ -18,9 +18,17 @@
Market machinery for order executions, book, management.
"""
-from ._client import open_ems
+from ..log import get_logger
+from ._client import (
+ open_ems,
+ OrderClient,
+)
__all__ = [
'open_ems',
+ 'OrderClient',
+
]
+
+log = get_logger(__name__)
diff --git a/piker/clearing/_client.py b/piker/clearing/_client.py
index 7d03406a..65a21fef 100644
--- a/piker/clearing/_client.py
+++ b/piker/clearing/_client.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -27,68 +27,104 @@ import trio
import tractor
from tractor.trionics import broadcast_receiver
-from ..log import get_logger
+from ._util import (
+ log, # sub-sys logger
+)
from ..data.types import Struct
from ..service import maybe_open_emsd
from ._messages import (
Order,
Cancel,
+ BrokerdPosition,
)
from ..brokers import get_brokermod
if TYPE_CHECKING:
from ._messages import (
- BrokerdPosition,
Status,
)
-log = get_logger(__name__)
+class OrderClient(Struct):
+ '''
+ EMS-client-side order book ctl and tracking.
-
-class OrderBook(Struct):
- '''EMS-client-side order book ctl and tracking.
-
- A style similar to "model-view" is used here where this api is
- provided as a supervised control for an EMS actor which does all the
- hard/fast work of talking to brokers/exchanges to conduct
- executions.
-
- Currently, this is mostly for keeping local state to match the EMS
- and use received events to trigger graphics updates.
+ (A)sync API for submitting orders and alerts to the `emsd` service;
+ this is the main control for execution management from client code.
'''
+ # IPC stream to `emsd` actor
+ _ems_stream: tractor.MsgStream
+
# mem channels used to relay order requests to the EMS daemon
- _to_ems: trio.abc.SendChannel
- _from_order_book: trio.abc.ReceiveChannel
+ _to_relay_task: trio.abc.SendChannel
+ _from_sync_order_client: trio.abc.ReceiveChannel
+
+ # history table
_sent_orders: dict[str, Order] = {}
- def send(
+ def send_nowait(
self,
msg: Order | dict,
- ) -> dict:
+ ) -> dict | Order:
+ '''
+ Sync version of ``.send()``.
+
+ '''
self._sent_orders[msg.oid] = msg
- self._to_ems.send_nowait(msg)
+ self._to_relay_task.send_nowait(msg)
return msg
- def send_update(
+ async def send(
self,
+ msg: Order | dict,
+ ) -> dict | Order:
+ '''
+ Send a new order msg async to the `emsd` service.
+
+ '''
+ self._sent_orders[msg.oid] = msg
+ await self._ems_stream.send(msg)
+ return msg
+
+ def update_nowait(
+ self,
uuid: str,
**data: dict,
) -> dict:
+ '''
+ Sync version of ``.update()``.
+
+ '''
cmd = self._sent_orders[uuid]
msg = cmd.copy(update=data)
self._sent_orders[uuid] = msg
- self._to_ems.send_nowait(msg)
- return cmd
+ self._to_relay_task.send_nowait(msg)
+ return msg
- def cancel(self, uuid: str) -> bool:
- """Cancel an order (or alert) in the EMS.
+ async def update(
+ self,
+ uuid: str,
+ **data: dict,
+ ) -> dict:
+ '''
+ Update an existing order dialog with a msg updated from
+ ``update`` kwargs.
- """
+ '''
+ cmd = self._sent_orders[uuid]
+ msg = cmd.copy(update=data)
+ self._sent_orders[uuid] = msg
+ await self._ems_stream.send(msg)
+ return msg
+
+ def _mk_cancel_msg(
+ self,
+ uuid: str,
+ ) -> Cancel:
cmd = self._sent_orders.get(uuid)
if not cmd:
log.error(
@@ -96,77 +132,75 @@ class OrderBook(Struct):
f'Maybe there is a stale entry or line?\n'
f'You should report this as a bug!'
)
- msg = Cancel(
+ fqme = str(cmd.symbol)
+ return Cancel(
oid=uuid,
- symbol=cmd.symbol,
- )
- self._to_ems.send_nowait(msg)
-
-
-_orders: OrderBook = None
-
-
-def get_orders(
- emsd_uid: tuple[str, str] = None
-) -> OrderBook:
- """"
- OrderBook singleton factory per actor.
-
- """
- if emsd_uid is not None:
- # TODO: read in target emsd's active book on startup
- pass
-
- global _orders
-
- if _orders is None:
- size = 100
- tx, rx = trio.open_memory_channel(size)
- brx = broadcast_receiver(rx, size)
-
- # setup local ui event streaming channels for request/resp
- # streamging with EMS daemon
- _orders = OrderBook(
- _to_ems=tx,
- _from_order_book=brx,
+ symbol=fqme,
)
- return _orders
+ def cancel_nowait(
+ self,
+ uuid: str,
+
+ ) -> None:
+ '''
+ Sync version of ``.cancel()``.
+
+ '''
+ self._to_relay_task.send_nowait(
+ self._mk_cancel_msg(uuid)
+ )
+
+ async def cancel(
+ self,
+ uuid: str,
+
+ ) -> bool:
+ '''
+ Cancel an already existintg order (or alert) dialog.
+
+ '''
+ await self._ems_stream.send(
+ self._mk_cancel_msg(uuid)
+ )
-# TODO: we can get rid of this relay loop once we move
-# order_mode inputs to async code!
-async def relay_order_cmds_from_sync_code(
+async def relay_orders_from_sync_code(
+
+ client: OrderClient,
symbol_key: str,
to_ems_stream: tractor.MsgStream,
) -> None:
- """
- Order streaming task: deliver orders transmitted from UI
- to downstream consumers.
+ '''
+ Order submission relay task: deliver orders sent from synchronous (UI)
+ code to the EMS via ``OrderClient._from_sync_order_client``.
This is run in the UI actor (usually the one running Qt but could be
any other client service code). This process simply delivers order
- messages to the above ``_to_ems`` send channel (from sync code using
+ messages to the above ``_to_relay_task`` send channel (from sync code using
``.send_nowait()``), these values are pulled from the channel here
and relayed to any consumer(s) that called this function using
a ``tractor`` portal.
This effectively makes order messages look like they're being
"pushed" from the parent to the EMS where local sync code is likely
- doing the pushing from some UI.
+ doing the pushing from some non-async UI handler.
- """
- book = get_orders()
- async with book._from_order_book.subscribe() as orders_stream:
- async for cmd in orders_stream:
+ '''
+ async with (
+ client._from_sync_order_client.subscribe() as sync_order_cmds
+ ):
+ async for cmd in sync_order_cmds:
sym = cmd.symbol
- msg = pformat(cmd)
+ msg = pformat(cmd.to_dict())
+
if sym == symbol_key:
log.info(f'Send order cmd:\n{msg}')
# send msg over IPC / wire
await to_ems_stream.send(cmd)
+
else:
log.warning(
f'Ignoring unmatched order cmd for {sym} != {symbol_key}:'
@@ -176,62 +210,39 @@ async def relay_order_cmds_from_sync_code(
@acm
async def open_ems(
- fqsn: str,
+ fqme: str,
mode: str = 'live',
loglevel: str = 'error',
) -> tuple[
- OrderBook,
+ OrderClient,
tractor.MsgStream,
dict[
# brokername, acctid
tuple[str, str],
- list[BrokerdPosition],
+ dict[str, BrokerdPosition],
],
list[str],
dict[str, Status],
]:
'''
- Spawn an EMS daemon and begin sending orders and receiving
- alerts.
+ (Maybe) spawn an EMS-daemon (emsd), deliver an `OrderClient` for
+ requesting orders/alerts and a `trades_stream` which delivers all
+ response-msgs.
- This EMS tries to reduce most broker's terrible order entry apis to
- a very simple protocol built on a few easy to grok and/or
- "rantsy" premises:
-
- - most users will prefer "dark mode" where orders are not submitted
- to a broker until and execution condition is triggered
- (aka client-side "hidden orders")
-
- - Brokers over-complicate their apis and generally speaking hire
- poor designers to create them. We're better off using creating a super
- minimal, schema-simple, request-event-stream protocol to unify all the
- existing piles of shit (and shocker, it'll probably just end up
- looking like a decent crypto exchange's api)
-
- - all order types can be implemented with client-side limit orders
-
- - we aren't reinventing a wheel in this case since none of these
- brokers are exposing FIX protocol; it is they doing the re-invention.
-
-
- TODO: make some fancy diagrams using mermaid.io
-
- the possible set of responses from the stream is currently:
- - 'dark_submitted', 'broker_submitted'
- - 'dark_cancelled', 'broker_cancelled'
- - 'dark_executed', 'broker_executed'
- - 'broker_filled'
+ This is a "client side" entrypoint which may spawn the `emsd` service
+ if it can't be discovered and generally speaking is the lowest level
+ broker control client-API.
'''
- # wait for service to connect back to us signalling
- # ready for order commands
- book = get_orders()
+ # TODO: prolly hand in the `MktPair` instance directly here as well!
+ from piker.accounting import unpack_fqme
+ broker, mktep, venue, suffix = unpack_fqme(fqme)
- from ..data._source import unpack_fqsn
- broker, symbol, suffix = unpack_fqsn(fqsn)
-
- async with maybe_open_emsd(broker) as portal:
+ async with maybe_open_emsd(
+ broker,
+ loglevel=loglevel,
+ ) as portal:
mod = get_brokermod(broker)
if (
@@ -244,9 +255,8 @@ async def open_ems(
async with (
# connect to emsd
portal.open_context(
-
_emsd_main,
- fqsn=fqsn,
+ fqme=fqme,
exec_mode=mode,
loglevel=loglevel,
@@ -262,18 +272,36 @@ async def open_ems(
# open 2-way trade command stream
ctx.open_stream() as trades_stream,
):
+ size: int = 100 # what should this be?
+ tx, rx = trio.open_memory_channel(size)
+ brx = broadcast_receiver(rx, size)
+
+ # setup local ui event streaming channels for request/resp
+ # streamging with EMS daemon
+ client = OrderClient(
+ _ems_stream=trades_stream,
+ _to_relay_task=tx,
+ _from_sync_order_client=brx,
+ )
+
+ client._ems_stream = trades_stream
+
# start sync code order msg delivery task
async with trio.open_nursery() as n:
n.start_soon(
- relay_order_cmds_from_sync_code,
- fqsn,
+ relay_orders_from_sync_code,
+ client,
+ fqme,
trades_stream
)
yield (
- book,
+ client,
trades_stream,
positions,
accounts,
dialogs,
)
+
+ # stop the sync-msg-relay task on exit.
+ n.cancel_scope.cancel()
diff --git a/piker/clearing/_ems.py b/piker/clearing/_ems.py
index 477da310..e41ddbf1 100644
--- a/piker/clearing/_ems.py
+++ b/piker/clearing/_ems.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -41,11 +41,13 @@ import trio
from trio_typing import TaskStatus
import tractor
-from ..log import get_logger
+from ._util import (
+ log, # sub-sys logger
+ get_console_log,
+)
from ..data._normalize import iterticks
-from ..data._source import (
- unpack_fqsn,
- mk_fqsn,
+from ..accounting._mktinfo import (
+ unpack_fqme,
float_digits,
)
from ..data.feed import (
@@ -69,9 +71,6 @@ from ._messages import (
)
-log = get_logger(__name__)
-
-
# TODO: numba all of this
def mk_check(
@@ -157,7 +156,7 @@ async def clear_dark_triggers(
brokerd_orders_stream: tractor.MsgStream,
quote_stream: tractor.ReceiveMsgStream, # noqa
broker: str,
- fqsn: str,
+ fqme: str,
book: DarkBook,
@@ -232,7 +231,7 @@ async def clear_dark_triggers(
account=account,
size=size,
):
- bfqsn: str = symbol.replace(f'.{broker}', '')
+ bfqme: str = symbol.replace(f'.{broker}', '')
submit_price = price + abs_diff_away
resp = 'triggered' # hidden on client-side
@@ -245,7 +244,7 @@ async def clear_dark_triggers(
oid=oid,
account=account,
time_ns=time.time_ns(),
- symbol=bfqsn,
+ symbol=bfqme,
price=submit_price,
size=size,
)
@@ -288,14 +287,14 @@ async def clear_dark_triggers(
# send response to client-side
await router.client_broadcast(
- fqsn,
+ fqme,
status,
)
else: # condition scan loop complete
log.debug(f'execs are {execs}')
if execs:
- book.triggers[fqsn] = execs
+ book.triggers[fqme] = execs
# print(f'execs scan took: {time.time() - start}')
@@ -316,9 +315,6 @@ class TradesRelay(Struct):
# allowed account names
accounts: tuple[str]
- # count of connected ems clients for this ``brokerd``
- consumers: int = 0
-
class Router(Struct):
'''
@@ -334,9 +330,12 @@ class Router(Struct):
# broker to book map
books: dict[str, DarkBook] = {}
+ # NOTE: disable for since stupid "dunst"
+ notify_on_order_loads: bool = False
+
# sets of clients mapped from subscription keys
subscribers: defaultdict[
- str, # sub key, default fqsn
+ str, # sub key, default fqme
set[tractor.MsgStream], # unique client streams
] = defaultdict(set)
@@ -387,7 +386,7 @@ class Router(Struct):
brokermod: ModuleType,
portal: tractor.Portal,
exec_mode: str,
- symbol: str,
+ fqme: str,
loglevel: str,
) -> None:
@@ -408,11 +407,12 @@ class Router(Struct):
yield relay
return
- trades_endpoint = getattr(brokermod, 'trades_dialogue', None)
- if (
- trades_endpoint is None
- or exec_mode == 'paper'
- ):
+ def mk_paper_ep():
+ nonlocal brokermod, exec_mode
+
+ # for logging purposes
+ brokermod = paper
+
# for paper mode we need to mock this trades response feed
# so we load bidir stream to a new sub-actor running
# a paper-simulator clearing engine.
@@ -424,26 +424,53 @@ class Router(Struct):
# load the paper trading engine as a subactor of this emsd
# actor to simulate the real IPC load it'll have when also
# pulling data from feeds
- open_trades_endpoint = paper.open_paperboi(
- fqsn='.'.join([symbol, broker]),
+ return paper.open_paperboi(
+ fqme=fqme,
loglevel=loglevel,
)
- else:
+ trades_endpoint = getattr(brokermod, 'trades_dialogue', None)
+ if (
+ trades_endpoint is not None
+ or exec_mode != 'paper'
+ ):
# open live brokerd trades endpoint
open_trades_endpoint = portal.open_context(
trades_endpoint,
loglevel=loglevel,
)
- # open trades-dialog endpoint with backend broker
+ else:
+ exec_mode: str = 'paper'
+
+ @acm
+ async def maybe_open_paper_ep():
+ if exec_mode == 'paper':
+ async with mk_paper_ep() as msg:
+ yield msg
+ return
+
+ # open trades-dialog endpoint with backend broker
+ async with open_trades_endpoint as msg:
+ ctx, first = msg
+
+ # runtime indication that the backend can't support live
+ # order ctrl yet, so boot the paperboi B0
+ if first == 'paper':
+ async with mk_paper_ep() as msg:
+ yield msg
+ return
+ else:
+ # working live ep case B)
+ yield msg
+ return
+
positions: list[BrokerdPosition]
accounts: tuple[str]
-
async with (
- open_trades_endpoint as (
+ maybe_open_paper_ep() as (
brokerd_ctx,
- (positions, accounts,),
+ (positions, accounts),
),
brokerd_ctx.open_stream() as brokerd_trades_stream,
):
@@ -466,30 +493,31 @@ class Router(Struct):
# client set.
# locally cache and track positions per account with
- # a table of (brokername, acctid) -> `BrokerdPosition`
- # msgs.
- pps = {}
- for msg in positions:
- log.info(f'loading pp: {msg}')
-
- account = msg['account']
-
- # TODO: better value error for this which
- # dumps the account and message and states the
- # mismatch..
- assert account in accounts
-
- pps.setdefault(
- (broker, account),
- [],
- ).append(msg)
-
+ # a nested table of msgs:
+ # tuple(brokername, acctid) ->
+ # (fqme: str ->
+ # `BrokerdPosition`)
relay = TradesRelay(
brokerd_stream=brokerd_trades_stream,
- positions=pps,
+ positions={},
accounts=accounts,
- consumers=1,
)
+ for msg in positions:
+
+ msg = BrokerdPosition(**msg)
+ log.info(
+ f'loading pp for {brokermod.__name__}:\n'
+ f'{pformat(msg.to_dict())}',
+ )
+
+ # TODO: state any mismatch here?
+ account = msg.account
+ assert account in accounts
+
+ relay.positions.setdefault(
+ (broker, account),
+ {},
+ )[msg.symbol] = msg
self.relays[broker] = relay
@@ -507,7 +535,7 @@ class Router(Struct):
async def open_trade_relays(
self,
- fqsn: str,
+ fqme: str,
exec_mode: str,
loglevel: str,
@@ -517,35 +545,33 @@ class Router(Struct):
) -> tuple[TradesRelay, Feed]:
'''
- Open and yield ``brokerd`` trades dialogue context-stream if
- none already exists.
+ Maybe open a live feed to the target fqme, start `brokerd` order
+ msg relay and dark clearing tasks to run in the background
+ indefinitely.
'''
- from ..data._source import unpack_fqsn
- broker, symbol, suffix = unpack_fqsn(fqsn)
-
async with (
maybe_open_feed(
- [fqsn],
+ [fqme],
loglevel=loglevel,
) as feed,
):
- brokername, _, _ = unpack_fqsn(fqsn)
+ brokername, _, _, _ = unpack_fqme(fqme)
brokermod = feed.mods[brokername]
broker = brokermod.name
portal = feed.portals[brokermod]
# XXX: this should be initial price quote from target provider
- flume = feed.flumes[fqsn]
+ flume = feed.flumes[fqme]
first_quote: dict = flume.first_quote
book: DarkBook = self.get_dark_book(broker)
- book.lasts[fqsn]: float = first_quote['last']
+ book.lasts[fqme]: float = float(first_quote['last'])
async with self.maybe_open_brokerd_dialog(
brokermod=brokermod,
portal=portal,
exec_mode=exec_mode,
- symbol=symbol,
+ fqme=fqme,
loglevel=loglevel,
) as relay:
@@ -558,7 +584,7 @@ class Router(Struct):
relay.brokerd_stream,
flume.stream,
broker,
- fqsn, # form: ...
+ fqme, # form: ...
book
)
@@ -619,6 +645,7 @@ class Router(Struct):
if (
not sent_some
+ and self.notify_on_order_loads
and notify_on_headless
):
log.info(
@@ -638,11 +665,14 @@ _router: Router = None
@tractor.context
async def _setup_persistent_emsd(
-
ctx: tractor.Context,
+ loglevel: str | None = None,
) -> None:
+ if loglevel:
+ get_console_log(loglevel)
+
global _router
# open a root "service nursery" for the ``emsd`` actor
@@ -692,16 +722,15 @@ async def translate_and_relay_brokerd_events(
async for brokerd_msg in brokerd_trades_stream:
fmsg = pformat(brokerd_msg)
log.info(
- f'Received broker trade event:\n'
+ f'Rx brokerd trade msg:\n'
f'{fmsg}'
)
- status_msg: Optional[Status] = None
+ status_msg: Status | None = None
match brokerd_msg:
# BrokerdPosition
case {
'name': 'position',
- 'symbol': sym,
'broker': broker,
}:
pos_msg = BrokerdPosition(**brokerd_msg)
@@ -712,9 +741,9 @@ async def translate_and_relay_brokerd_events(
relay.positions.setdefault(
# NOTE: translate to a FQSN!
- (broker, sym),
- []
- ).append(pos_msg)
+ (broker, pos_msg.account),
+ {}
+ )[pos_msg.symbol] = pos_msg
# fan-out-relay position msgs immediately by
# broadcasting updates on all client streams
@@ -781,12 +810,11 @@ async def translate_and_relay_brokerd_events(
# no msg to client necessary
continue
- # BrokerdOrderError
+ # BrokerdError
case {
'name': 'error',
'oid': oid, # ems order-dialog id
'reqid': reqid, # brokerd generated order-request id
- 'symbol': sym,
}:
status_msg = book._active.get(oid)
msg = BrokerdError(**brokerd_msg)
@@ -947,9 +975,9 @@ async def translate_and_relay_brokerd_events(
# may end up with collisions?
status_msg = Status(**brokerd_msg)
- # NOTE: be sure to pack an fqsn for the client side!
+ # NOTE: be sure to pack an fqme for the client side!
order = Order(**status_msg.req)
- order.symbol = mk_fqsn(broker, order.symbol)
+ order.symbol = f'{order.symbol}.{broker}'
assert order.price and order.size
status_msg.req = order
@@ -1024,7 +1052,7 @@ async def process_client_order_cmds(
client_order_stream: tractor.MsgStream,
brokerd_order_stream: tractor.MsgStream,
- fqsn: str,
+ fqme: str,
flume: Flume,
dark_book: DarkBook,
router: Router,
@@ -1051,11 +1079,11 @@ async def process_client_order_cmds(
# backend can be routed and relayed to subscribed clients.
subs = router.dialogs[oid]
- # add all subscribed clients for this fqsn (should eventually be
+ # add all subscribed clients for this fqme (should eventually be
# a more generalize subscription system) to received order msg
# updates (and thus show stuff in the UI).
subs.add(client_order_stream)
- subs.update(router.subscribers[fqsn])
+ subs.update(router.subscribers[fqme])
reqid = dark_book._ems2brokerd_ids.inverse.get(oid)
@@ -1113,7 +1141,7 @@ async def process_client_order_cmds(
and status.resp == 'dark_open'
):
# remove from dark book clearing
- entry = dark_book.triggers[fqsn].pop(oid, None)
+ entry = dark_book.triggers[fqme].pop(oid, None)
if entry:
(
pred,
@@ -1129,7 +1157,7 @@ async def process_client_order_cmds(
status.req = cmd
await router.client_broadcast(
- fqsn,
+ fqme,
status,
)
@@ -1139,7 +1167,7 @@ async def process_client_order_cmds(
dark_book._active.pop(oid)
else:
- log.exception(f'No dark order for {fqsn}?')
+ log.exception(f'No dark order for {fqme}?')
# TODO: eventually we should be receiving
# this struct on the wire unpacked in a scoped protocol
@@ -1148,7 +1176,7 @@ async def process_client_order_cmds(
# LIVE order REQUEST
case {
'oid': oid,
- 'symbol': fqsn,
+ 'symbol': fqme,
'price': trigger_price,
'size': size,
'action': ('buy' | 'sell') as action,
@@ -1161,7 +1189,7 @@ async def process_client_order_cmds(
# remove the broker part before creating a message
# to send to the specific broker since they probably
# aren't expectig their own name, but should they?
- sym = fqsn.replace(f'.{broker}', '')
+ sym = fqme.replace(f'.{broker}', '')
if status is not None:
# if we already had a broker order id then
@@ -1218,7 +1246,7 @@ async def process_client_order_cmds(
# DARK-order / alert REQUEST
case {
'oid': oid,
- 'symbol': fqsn,
+ 'symbol': fqme,
'price': trigger_price,
'size': size,
'exec_mode': exec_mode,
@@ -1240,7 +1268,7 @@ async def process_client_order_cmds(
# price received from the feed, instead of being
# like every other shitty tina platform that makes
# the user choose the predicate operator.
- last = dark_book.lasts[fqsn]
+ last = dark_book.lasts[fqme]
# sometimes the real-time feed hasn't come up
# so just pull from the latest history.
@@ -1249,8 +1277,13 @@ async def process_client_order_cmds(
pred = mk_check(trigger_price, last, action)
+ # NOTE: for dark orders currently we submit
+ # the triggered live order at a price 5 ticks
+ # above/below the L1 prices.
+ # TODO: make this configurable from our top level
+ # config, prolly in a .clearing` section?
spread_slap: float = 5
- min_tick = flume.symbol.tick_size
+ min_tick = float(flume.mkt.size_tick)
min_tick_digits = float_digits(min_tick)
if action == 'buy':
@@ -1282,7 +1315,7 @@ async def process_client_order_cmds(
# NOTE: this may result in an override of an existing
# dark book entry if the order id already exists
dark_book.triggers.setdefault(
- fqsn, {}
+ fqme, {}
)[oid] = (
pred,
tickfilter,
@@ -1307,7 +1340,7 @@ async def process_client_order_cmds(
# broadcast status to all subscribed clients
await router.client_broadcast(
- fqsn,
+ fqme,
status,
)
@@ -1318,35 +1351,36 @@ async def process_client_order_cmds(
@acm
async def maybe_open_trade_relays(
router: Router,
- fqsn: str,
+ fqme: str,
exec_mode: str, # ('paper', 'live')
loglevel: str = 'info',
) -> tuple:
- def cache_on_fqsn_unless_paper(
+ def cache_on_fqme_unless_paper(
router: Router,
- fqsn: str,
+ fqme: str,
exec_mode: str, # ('paper', 'live')
loglevel: str = 'info',
) -> Hashable:
if exec_mode == 'paper':
- return f'paper_{fqsn}'
+ return f'paper_{fqme}'
else:
- return fqsn
+ return fqme
# XXX: closure to enable below use of
# ``tractor.trionics.maybe_open_context()``
@acm
async def cached_mngr(
router: Router,
- fqsn: str,
+ fqme: str,
exec_mode: str, # ('paper', 'live')
loglevel: str = 'info',
):
+
relay, feed, client_ready = await _router.nursery.start(
_router.open_trade_relays,
- fqsn,
+ fqme,
exec_mode,
loglevel,
)
@@ -1356,24 +1390,28 @@ async def maybe_open_trade_relays(
acm_func=cached_mngr,
kwargs={
'router': _router,
- 'fqsn': fqsn,
+ 'fqme': fqme,
'exec_mode': exec_mode,
'loglevel': loglevel,
},
- key=cache_on_fqsn_unless_paper,
+ key=cache_on_fqme_unless_paper,
) as (
cache_hit,
(relay, feed, client_ready)
):
+ if cache_hit:
+ log.info(f'Reusing existing trades relay for {fqme}:\n'
+ f'{relay}\n')
+
yield relay, feed, client_ready
@tractor.context
async def _emsd_main(
ctx: tractor.Context,
- fqsn: str,
+ fqme: str,
exec_mode: str, # ('paper', 'live')
- loglevel: str = 'info',
+ loglevel: str | None = None,
) -> tuple[
dict[
@@ -1428,7 +1466,7 @@ async def _emsd_main(
global _router
assert _router
- broker, symbol, suffix = unpack_fqsn(fqsn)
+ broker, _, _, _ = unpack_fqme(fqme)
# TODO: would be nice if in tractor we can require either a ctx arg,
# or a named arg with ctx in it and a type annotation of
@@ -1445,7 +1483,7 @@ async def _emsd_main(
# few duplicate streams as necessary per ems actor.
async with maybe_open_trade_relays(
_router,
- fqsn,
+ fqme,
exec_mode,
loglevel,
) as (relay, feed, client_ready):
@@ -1468,28 +1506,28 @@ async def _emsd_main(
# register the client side before starting the
# brokerd-side relay task to ensure the client is
# delivered all exisiting open orders on startup.
- # TODO: instead of by fqsn we need a subscription
+ # TODO: instead of by fqme we need a subscription
# system/schema here to limit what each new client is
# allowed to see in terms of broadcasted order flow
# updates per dialog.
- _router.subscribers[fqsn].add(client_stream)
+ _router.subscribers[fqme].add(client_stream)
client_ready.set()
# start inbound (from attached client) order request processing
# main entrypoint, run here until cancelled.
try:
- flume = feed.flumes[fqsn]
+ flume = feed.flumes[fqme]
await process_client_order_cmds(
client_stream,
brokerd_stream,
- fqsn,
+ fqme,
flume,
dark_book,
_router,
)
finally:
# try to remove client from subscription registry
- _router.subscribers[fqsn].remove(client_stream)
+ _router.subscribers[fqme].remove(client_stream)
for oid, client_streams in _router.dialogs.items():
client_streams.discard(client_stream)
diff --git a/piker/clearing/_messages.py b/piker/clearing/_messages.py
index c7693b9f..126cd347 100644
--- a/piker/clearing/_messages.py
+++ b/piker/clearing/_messages.py
@@ -29,7 +29,6 @@ from typing import (
from msgspec import field
-from ..data._source import Symbol
from ..data.types import Struct
@@ -94,7 +93,8 @@ class Order(Struct):
# internal ``emdsd`` unique "order id"
oid: str # uuid4
- symbol: str | Symbol
+ # TODO: figure out how to optionally typecast this to `MktPair`?
+ symbol: str # | MktPair
account: str # should we set a default as '' ?
price: float
@@ -191,7 +191,7 @@ class BrokerdOrder(Struct):
account: str
time_ns: int
- symbol: str # fqsn
+ symbol: str # fqme
price: float
size: float
@@ -300,10 +300,10 @@ class BrokerdError(Struct):
class BrokerdPosition(Struct):
- '''Position update event from brokerd.
+ '''
+ Position update event from brokerd.
'''
-
broker: str
account: str
symbol: str
diff --git a/piker/clearing/_paper_engine.py b/piker/clearing/_paper_engine.py
index 7a093ad4..44171dbc 100644
--- a/piker/clearing/_paper_engine.py
+++ b/piker/clearing/_paper_engine.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -14,19 +14,20 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-"""
-Fake trading for forward testing.
+'''
+Fake trading: a full forward testing simulation engine.
-"""
+We can real-time emulate any mkt conditions you want bruddr B)
+Just slide us the model que quieres..
+
+'''
from collections import defaultdict
-from contextlib import asynccontextmanager
+from contextlib import asynccontextmanager as acm
from datetime import datetime
from operator import itemgetter
import itertools
import time
from typing import (
- Any,
- Optional,
Callable,
)
import uuid
@@ -36,18 +37,25 @@ import pendulum
import trio
import tractor
+from ..brokers import get_brokermod
from .. import data
from ..data.types import Struct
-from ..data._source import Symbol
-from ..pp import (
+from ..accounting._mktinfo import (
+ MktPair,
+)
+from ..accounting import (
Position,
+ PpTable,
Transaction,
+ TransactionLedger,
open_trade_ledger,
open_pps,
)
from ..data._normalize import iterticks
-from ..data._source import unpack_fqsn
-from ..log import get_logger
+from ..accounting import unpack_fqme
+from ._util import (
+ log, # sub-sys logger
+)
from ._messages import (
BrokerdCancel,
BrokerdOrder,
@@ -58,10 +66,6 @@ from ._messages import (
BrokerdError,
)
-from ..config import load
-
-log = get_logger(__name__)
-
class PaperBoi(Struct):
'''
@@ -75,14 +79,15 @@ class PaperBoi(Struct):
ems_trades_stream: tractor.MsgStream
+ ppt: PpTable
+ ledger: TransactionLedger
+
# map of paper "live" orders which be used
# to simulate fills based on paper engine settings
_buys: defaultdict[str, bidict]
_sells: defaultdict[str, bidict]
_reqids: bidict
- _positions: dict[str, Position]
- _trade_ledger: dict[str, Any]
- _syms: dict[str, Symbol] = {}
+ _mkts: dict[str, MktPair] = {}
# init edge case L1 spread
last_ask: tuple[float, float] = (float('inf'), 0) # price, size
@@ -95,7 +100,7 @@ class PaperBoi(Struct):
price: float,
action: str,
size: float,
- reqid: Optional[str],
+ reqid: str | None,
) -> int:
'''
@@ -121,7 +126,10 @@ class PaperBoi(Struct):
# in the broker trades event processing loop
await trio.sleep(0.05)
- if action == 'sell':
+ if (
+ action == 'sell'
+ and size > 0
+ ):
size = -size
msg = BrokerdStatus(
@@ -197,7 +205,7 @@ class PaperBoi(Struct):
async def fake_fill(
self,
- fqsn: str,
+ fqme: str,
price: float,
size: float,
action: str, # one of {'buy', 'sell'}
@@ -250,43 +258,46 @@ class PaperBoi(Struct):
)
await self.ems_trades_stream.send(msg)
- # lookup any existing position
- key = fqsn.rstrip(f'.{self.broker}')
+ # NOTE: for paper we set the "bs_mktid" as just the fqme since
+ # we don't actually have any unique backend symbol ourselves
+ # other then this thing, our fqme address.
+ bs_mktid: str = fqme
t = Transaction(
- fqsn=fqsn,
- sym=self._syms[fqsn],
+ fqme=fqme,
+ sym=self._mkts[fqme],
tid=oid,
size=size,
price=price,
cost=0, # TODO: cost model
dt=pendulum.from_timestamp(fill_time_s),
- bsuid=key,
+ bs_mktid=bs_mktid,
)
- with (
- open_trade_ledger(self.broker, 'paper') as ledger,
- open_pps(self.broker, 'paper', write_on_exit=True) as table
- ):
- tx = t.to_dict()
- tx.pop('sym')
- ledger.update({oid: tx})
- # Write to pps toml right now
- table.update_from_trans({oid: t})
+ # update in-mem ledger and pos table
+ self.ledger.update_from_t(t)
+ self.ppt.update_from_trans({oid: t})
- pp = table.pps[key]
- pp_msg = BrokerdPosition(
- broker=self.broker,
- account='paper',
- symbol=fqsn,
- # TODO: we need to look up the asset currency from
- # broker info. i guess for crypto this can be
- # inferred from the pair?
- currency=key,
- size=pp.size,
- avg_price=pp.ppu,
- )
+ # transmit pp msg to ems
+ pp = self.ppt.pps[bs_mktid]
+ pp_msg = BrokerdPosition(
+ broker=self.broker,
+ account='paper',
+ symbol=fqme,
- await self.ems_trades_stream.send(pp_msg)
+ size=pp.size,
+ avg_price=pp.ppu,
+
+ # TODO: we need to look up the asset currency from
+ # broker info. i guess for crypto this can be
+ # inferred from the pair?
+ # currency=bs_mktid,
+ )
+ # write all updates to filesys immediately
+ # (adds latency but that works for simulation anyway)
+ self.ledger.write_config()
+ self.ppt.write_config()
+
+ await self.ems_trades_stream.send(pp_msg)
async def simulate_fills(
@@ -421,7 +432,7 @@ async def simulate_fills(
# clearing price would have filled entirely
await client.fake_fill(
- fqsn=sym,
+ fqme=sym,
# todo slippage to determine fill price
price=tick_price,
size=size,
@@ -469,6 +480,7 @@ async def handle_order_requests(
BrokerdOrderAck(
oid=order.oid,
reqid=reqid,
+ account='paper'
)
)
@@ -512,7 +524,6 @@ _sells: defaultdict[
tuple[float, float, str, str], # order info
]
] = defaultdict(bidict)
-_positions: dict[str, Position] = {}
@tractor.context
@@ -520,33 +531,86 @@ async def trades_dialogue(
ctx: tractor.Context,
broker: str,
- fqsn: str,
- loglevel: str = None,
+ fqme: str | None = None, # if empty, we only boot broker mode
+ loglevel: str = 'warning',
) -> None:
tractor.log.get_console_log(loglevel)
- async with (
- data.open_feed(
- [fqsn],
- loglevel=loglevel,
- ) as feed,
+ ppt: PpTable
+ ledger: TransactionLedger
+ with (
+ open_pps(
+ broker,
+ 'paper',
+ write_on_exit=True,
+ ) as ppt,
+ open_trade_ledger(
+ broker,
+ 'paper',
+ ) as ledger
):
+ # NOTE: retreive market(pair) info from the backend broker
+ # since ledger entries (in their backend native format) often
+ # don't contain necessary market info per trade record entry..
+ # - if no fqme was passed in, we presume we're running in
+ # "ledger-sync-only mode" and thus we load mkt info for
+ # each symbol found in the ledger to a ppt table manually.
- with open_pps(broker, 'paper') as table:
- # save pps in local state
- _positions.update(table.pps)
+ # TODO: how to process ledger info from backends?
+ # - should we be rolling our own actor-cached version of these
+ # client API refs or using portal IPC to send requests to the
+ # existing brokerd daemon?
+ # - alternatively we can possibly expect and use
+ # a `.broker.norm_trade_records()` ep?
+ brokermod = get_brokermod(broker)
+ gmi = getattr(brokermod, 'get_mkt_info', None)
+
+ # update all transactions with mkt info before
+ # loading any pps
+ mkt_by_fqme: dict[str, MktPair] = {}
+ if fqme:
+ bs_fqme, _, broker = fqme.rpartition('.')
+ mkt, _ = await brokermod.get_mkt_info(bs_fqme)
+ mkt_by_fqme[fqme] = mkt
+
+ # for each sym in the ledger load it's `MktPair` info
+ for tid, txdict in ledger.data.items():
+ l_fqme: str = txdict.get('fqme') or txdict['fqsn']
+
+ if (
+ gmi
+ and l_fqme not in mkt_by_fqme
+ ):
+ mkt, pair = await brokermod.get_mkt_info(
+ l_fqme.rstrip(f'.{broker}'),
+ )
+ mkt_by_fqme[l_fqme] = mkt
+
+ # if an ``fqme: str`` input was provided we only
+ # need a ``MktPair`` for that one market, since we're
+ # running in real simulated-clearing mode, not just ledger
+ # syncing.
+ if (
+ fqme is not None
+ and fqme in mkt_by_fqme
+ ):
+ break
+
+ # update pos table from ledger history and provide a ``MktPair``
+ # lookup for internal position accounting calcs.
+ ppt.update_from_trans(ledger.to_trans(mkt_by_fqme=mkt_by_fqme))
pp_msgs: list[BrokerdPosition] = []
pos: Position
token: str # f'{symbol}.{self.broker}'
- for token, pos in _positions.items():
+ for token, pos in ppt.pps.items():
pp_msgs.append(BrokerdPosition(
broker=broker,
account='paper',
- symbol=pos.symbol.front_fqsn(),
+ symbol=pos.mkt.fqme,
size=pos.size,
avg_price=pos.ppu,
))
@@ -556,42 +620,64 @@ async def trades_dialogue(
['paper'],
))
+ # write new positions state in case ledger was
+ # newer then that tracked in pps.toml
+ ppt.write_config()
+
+ # exit early since no fqme was passed,
+ # normally this case is just to load
+ # positions "offline".
+ if fqme is None:
+ log.warning(
+ 'Paper engine only running in position delivery mode!\n'
+ 'NO SIMULATED CLEARING LOOP IS ACTIVE!'
+ )
+ await trio.sleep_forever()
+ return
+
async with (
- ctx.open_stream() as ems_stream,
- trio.open_nursery() as n,
+ data.open_feed(
+ [fqme],
+ loglevel=loglevel,
+ ) as feed,
):
- client = PaperBoi(
- broker,
- ems_stream,
- _buys=_buys,
- _sells=_sells,
+ # sanity check all the mkt infos
+ for fqme, flume in feed.flumes.items():
+ assert mkt_by_fqme[fqme] == flume.mkt
- _reqids=_reqids,
+ async with (
+ ctx.open_stream() as ems_stream,
+ trio.open_nursery() as n,
+ ):
+ client = PaperBoi(
+ broker=broker,
+ ems_trades_stream=ems_stream,
+ ppt=ppt,
+ ledger=ledger,
- _positions=_positions,
+ _buys=_buys,
+ _sells=_sells,
+ _reqids=_reqids,
- # TODO: load postions from ledger file
- _trade_ledger={},
- _syms={
- fqsn: flume.symbol
- for fqsn, flume in feed.flumes.items()
- }
- )
+ _mkts=mkt_by_fqme,
- n.start_soon(
- handle_order_requests,
- client,
- ems_stream,
- )
+ )
- # paper engine simulator clearing task
- await simulate_fills(feed.streams[broker], client)
+ n.start_soon(
+ handle_order_requests,
+ client,
+ ems_stream,
+ )
+
+ # paper engine simulator clearing task
+ await simulate_fills(feed.streams[broker], client)
-@asynccontextmanager
+@acm
async def open_paperboi(
- fqsn: str,
- loglevel: str,
+ fqme: str | None = None,
+ broker: str | None = None,
+ loglevel: str | None = None,
) -> Callable:
'''
@@ -599,28 +685,39 @@ async def open_paperboi(
its context.
'''
- broker, symbol, expiry = unpack_fqsn(fqsn)
+ if not fqme:
+ assert broker, 'One of `broker` or `fqme` is required siss..!'
+ else:
+ broker, _, _, _ = unpack_fqme(fqme)
+
+ we_spawned: bool = False
service_name = f'paperboi.{broker}'
async with (
tractor.find_actor(service_name) as portal,
tractor.open_nursery() as tn,
):
- # only spawn if no paperboi already is up
- # (we likely don't need more then one proc for basic
- # simulated order clearing)
+ # NOTE: only spawn if no paperboi already is up since we likely
+ # don't need more then one actor for simulated order clearing
+ # per broker-backend.
if portal is None:
log.info('Starting new paper-engine actor')
portal = await tn.start_actor(
service_name,
enable_modules=[__name__]
)
+ we_spawned = True
async with portal.open_context(
trades_dialogue,
broker=broker,
- fqsn=fqsn,
+ fqme=fqme,
loglevel=loglevel,
) as (ctx, first):
yield ctx, first
+
+ # tear down connection and any spawned actor on exit
+ await ctx.cancel()
+ if we_spawned:
+ await portal.cancel_actor()
diff --git a/piker/clearing/_util.py b/piker/clearing/_util.py
new file mode 100644
index 00000000..ec93512d
--- /dev/null
+++ b/piker/clearing/_util.py
@@ -0,0 +1,33 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+"""
+Sub-sys module commons.
+
+"""
+from functools import partial
+
+from ..log import (
+ get_logger,
+ get_console_log,
+)
+subsys: str = 'piker.clearing'
+
+log = get_logger(subsys)
+
+get_console_log = partial(
+ get_console_log,
+ name=subsys,
+)
diff --git a/piker/cli/__init__.py b/piker/cli/__init__.py
index 63b8321a..70610135 100644
--- a/piker/cli/__init__.py
+++ b/piker/cli/__init__.py
@@ -19,6 +19,7 @@ CLI commons.
'''
import os
+from contextlib import AsyncExitStack
import click
import trio
@@ -69,8 +70,8 @@ def pikerd(
Spawn the piker broker-daemon.
'''
+ from .. import service
- from ..service import open_pikerd
log = get_console_log(loglevel)
if pdb:
@@ -90,17 +91,36 @@ def pikerd(
)
async def main():
+ service_mngr: service.Services
+
async with (
- open_pikerd(
- tsdb=tsdb,
- es=es,
+ service.open_pikerd(
loglevel=loglevel,
debug_mode=pdb,
registry_addr=reg_addr,
- ), # normally delivers a ``Services`` handle
+ ) as service_mngr, # normally delivers a ``Services`` handle
trio.open_nursery() as n,
+
+ AsyncExitStack() as stack,
):
+ if tsdb:
+ dname, conf = await stack.enter_async_context(
+ service.marketstore.start_ahab_daemon(
+ service_mngr,
+ loglevel=loglevel,
+ )
+ )
+ log.info(f'TSDB `{dname}` up with conf:\n{conf}')
+
+ if es:
+ dname, conf = await stack.enter_async_context(
+ service.elastic.start_ahab_daemon(
+ service_mngr,
+ loglevel=loglevel,
+ )
+ )
+ log.info(f'DB `{dname}` up with conf:\n{conf}')
await trio.sleep_forever()
diff --git a/piker/config.py b/piker/config.py
index 397342e3..e2c63ea4 100644
--- a/piker/config.py
+++ b/piker/config.py
@@ -21,14 +21,21 @@ Platform configuration (files) mgmt.
import platform
import sys
import os
-from os import path
-from os.path import dirname
import shutil
-from typing import Optional
+import time
+from typing import (
+ Callable,
+ MutableMapping,
+)
from pathlib import Path
from bidict import bidict
-import toml
+import tomlkit
+try:
+ import tomllib
+except ModuleNotFoundError:
+ import tomli as tomllib
+
from .log import get_logger
@@ -126,30 +133,33 @@ def get_app_dir(
)
-_config_dir = _click_config_dir = get_app_dir('piker')
-_parent_user = os.environ.get('SUDO_USER')
+_click_config_dir: Path = Path(get_app_dir('piker'))
+_config_dir: Path = _click_config_dir
+_parent_user: str = os.environ.get('SUDO_USER')
if _parent_user:
- non_root_user_dir = os.path.expanduser(
- f'~{_parent_user}'
+ non_root_user_dir = Path(
+ os.path.expanduser(f'~{_parent_user}')
)
- root = 'root'
+ root: str = 'root'
+ _ccds: str = str(_click_config_dir) # click config dir string
+ i_tail: int = int(_ccds.rfind(root) + len(root))
_config_dir = (
- non_root_user_dir +
- _click_config_dir[
- _click_config_dir.rfind(root) + len(root):
- ]
+ non_root_user_dir
+ /
+ Path(_ccds[i_tail+1:]) # +1 to capture trailing '/'
)
+
_conf_names: set[str] = {
- 'brokers',
- 'pps',
- 'trades',
- 'watchlists',
- 'paper_trades'
+ 'conf', # god config
+ 'brokers', # sec backend deatz
+ 'watchlists', # (user defined) market lists
}
-_watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
+# TODO: probably drop all this super legacy, questrade specific,
+# config stuff XD ?
+_watchlists_data_path: Path = _config_dir / Path('watchlists.json')
_context_defaults = dict(
default_map={
# Questrade specific quote poll rates
@@ -180,7 +190,7 @@ def _conf_fn_w_ext(
def get_conf_path(
conf_name: str = 'brokers',
-) -> str:
+) -> Path:
'''
Return the top-level default config path normally under
``~/.config/piker`` on linux for a given ``conf_name``, the config
@@ -188,7 +198,6 @@ def get_conf_path(
Contains files such as:
- brokers.toml
- - pp.toml
- watchlists.toml
# maybe coming soon ;)
@@ -196,72 +205,187 @@ def get_conf_path(
- strats.toml
'''
- assert conf_name in _conf_names
+ if 'account.' not in conf_name:
+ assert str(conf_name) in _conf_names
+
fn = _conf_fn_w_ext(conf_name)
- return os.path.join(
- _config_dir,
- fn,
- )
+ return _config_dir / Path(fn)
-def repodir():
+def repodir() -> Path:
'''
- Return the abspath to the repo directory.
+ Return the abspath as ``Path`` to the git repo's root dir.
'''
- dirpath = path.abspath(
- # we're 3 levels down in **this** module file
- dirname(dirname(os.path.realpath(__file__)))
- )
- return dirpath
+ repodir: Path = Path(__file__).absolute().parent.parent
+ confdir: Path = repodir / 'config'
+
+ if not confdir.is_dir():
+ # prolly inside stupid GH actions CI..
+ repodir: Path = Path(os.environ.get('GITHUB_WORKSPACE'))
+ confdir: Path = repodir / 'config'
+
+ assert confdir.is_dir(), f'{confdir} DNE, {repodir} is likely incorrect!'
+ return repodir
def load(
- conf_name: str = 'brokers',
- path: str = None,
+ conf_name: str = 'brokers', # appended with .toml suffix
+ path: Path | None = None,
+
+ decode: Callable[
+ [str | bytes,],
+ MutableMapping,
+ ] = tomllib.loads,
+
+ touch_if_dne: bool = False,
**tomlkws,
-) -> (dict, str):
+) -> tuple[dict, Path]:
'''
Load config file by name.
+ If desired config is not in the top level piker-user config path then
+ pass the ``path: Path`` explicitly.
+
'''
- path = path or get_conf_path(conf_name)
-
- if not os.path.isdir(_config_dir):
- Path(_config_dir).mkdir(parents=True, exist_ok=True)
-
- if not os.path.isfile(path):
- fn = _conf_fn_w_ext(conf_name)
-
- template = os.path.join(
- repodir(),
- 'config',
- fn
+ # create the $HOME/.config/piker dir if dne
+ if not _config_dir.is_dir():
+ _config_dir.mkdir(
+ parents=True,
+ exist_ok=True,
+ )
+
+ path_provided: bool = path is not None
+ path: Path = path or get_conf_path(conf_name)
+
+ if (
+ not path.is_file()
+ and touch_if_dne
+ ):
+ # only do a template if no path provided,
+ # just touch an empty file with same name.
+ if path_provided:
+ with path.open(mode='x'):
+ pass
+
+ # try to copy in a template config to the user's dir if one
+ # exists.
+ else:
+ fn: str = _conf_fn_w_ext(conf_name)
+ template: Path = repodir() / 'config' / fn
+ if template.is_file():
+ shutil.copyfile(template, path)
+
+ elif fn and template:
+ assert template.is_file(), f'{template} is not a file!?'
+
+ assert path.is_file(), f'Config file {path} not created!?'
+
+ with path.open(mode='r') as fp:
+ config: dict = decode(
+ fp.read(),
+ **tomlkws,
)
- # try to copy in a template config to the user's directory
- # if one exists.
- if os.path.isfile(template):
- shutil.copyfile(template, path)
- else:
- # create an empty file
- with open(path, 'x'):
- pass
- else:
- with open(path, 'r'):
- pass # touch it
- config = toml.load(path, **tomlkws)
log.debug(f"Read config file {path}")
return config, path
+def load_account(
+ brokername: str,
+ acctid: str,
+
+) -> tuple[dict, Path]:
+ '''
+ Load a accounting (with positions) file from
+ $PIKER_CONFIG_DIR/accounting/account...toml.
+
+ Where normally $PIKER_CONFIG_DIR = ~/.config/piker/
+ and we implicitly create a accounting subdir which should
+ normally be linked to a git repo managed by the user B)
+
+ '''
+ legacy_fn: str = f'pps.{brokername}.{acctid}.toml'
+ fn: str = f'account.{brokername}.{acctid}.toml'
+
+ dirpath: Path = _config_dir / 'accounting'
+ if not dirpath.is_dir():
+ dirpath.mkdir()
+
+ config, path = load(
+ path=dirpath / fn,
+ decode=tomlkit.parse,
+ touch_if_dne=True,
+ )
+
+ if not config:
+ legacypath = dirpath / legacy_fn
+ log.warning(
+ f'Your account file is using the legacy `pps.` prefix..\n'
+ f'Rewriting contents to new name -> {path}\n'
+ 'Please delete the old file!\n'
+ f'|-> {legacypath}\n'
+ )
+ if legacypath.is_file():
+ legacy_config, _ = load(
+ path=legacypath,
+
+ # TODO: move to tomlkit:
+ # - needs to be fixed to support bidict?
+ # https://github.com/sdispater/tomlkit/issues/289
+ # - we need to use or fork's fix to do multiline array
+ # indenting.
+ decode=tomlkit.parse,
+ )
+ config.update(legacy_config)
+
+ # XXX: override the presumably previously non-existant
+ # file with legacy's contents.
+ write(
+ config,
+ path=path,
+ fail_empty=False,
+ )
+
+ return config, path
+
+
+def load_ledger(
+ brokername: str,
+ acctid: str,
+
+) -> tuple[dict, Path]:
+
+ ldir: Path = _config_dir / 'accounting' / 'ledgers'
+ if not ldir.is_dir():
+ ldir.mkdir()
+
+ fname = f'trades_{brokername}_{acctid}.toml'
+ fpath: Path = ldir / fname
+
+ if not fpath.is_file():
+ log.info(
+ f'Creating new local trades ledger: {fpath}'
+ )
+ fpath.touch()
+
+ with fpath.open(mode='rb') as cf:
+ start = time.time()
+ ledger_dict = tomllib.load(cf)
+ log.debug(f'Ledger load took {time.time() - start}s')
+
+ return ledger_dict, fpath
+
+
def write(
config: dict, # toml config as dict
- name: str = 'brokers',
- path: str = None,
+
+ name: str | None = None,
+ path: Path | None = None,
fail_empty: bool = True,
+
**toml_kwargs,
) -> None:
@@ -271,32 +395,37 @@ def write(
Create a ``brokers.ini`` file if one does not exist.
'''
- path = path or get_conf_path(name)
- dirname = os.path.dirname(path)
- if not os.path.isdir(dirname):
- log.debug(f"Creating config dir {_config_dir}")
- os.makedirs(dirname)
+ if name:
+ path: Path = path or get_conf_path(name)
+ dirname: Path = path.parent
+ if not dirname.is_dir():
+ log.debug(f"Creating config dir {_config_dir}")
+ dirname.mkdir()
- if not config and fail_empty:
+ if (
+ not config
+ and fail_empty
+ ):
raise ValueError(
- "Watch out you're trying to write a blank config!")
+ "Watch out you're trying to write a blank config!"
+ )
log.debug(
f"Writing config `{name}` file to:\n"
f"{path}"
)
- with open(path, 'w') as cf:
- return toml.dump(
+ with path.open(mode='w') as fp:
+ return tomlkit.dump( # preserve style on write B)
config,
- cf,
+ fp,
**toml_kwargs,
)
def load_accounts(
- providers: Optional[list[str]] = None
+ providers: list[str] | None = None
-) -> bidict[str, Optional[str]]:
+) -> bidict[str, str | None]:
conf, path = load()
accounts = bidict()
diff --git a/piker/data/__init__.py b/piker/data/__init__.py
index 5c83150e..ba6af4ca 100644
--- a/piker/data/__init__.py
+++ b/piker/data/__init__.py
@@ -25,7 +25,7 @@ sharing live streams over a network.
import tractor
import trio
-from ..log import (
+from ._util import (
get_console_log,
)
from ._normalize import iterticks
@@ -50,39 +50,3 @@ __all__ = [
'open_shm_array',
'get_shm_token',
]
-
-
-@tractor.context
-async def _setup_persistent_brokerd(
- ctx: tractor.Context,
- brokername: str,
-
-) -> None:
- '''
- Allocate a actor-wide service nursery in ``brokerd``
- such that feeds can be run in the background persistently by
- the broker backend as needed.
-
- '''
- get_console_log(tractor.current_actor().loglevel)
-
- from .feed import (
- _bus,
- get_feed_bus,
- )
- global _bus
- assert not _bus
-
- async with trio.open_nursery() as service_nursery:
- # assign a nursery to the feeds bus for spawning
- # background tasks from clients
- get_feed_bus(brokername, service_nursery)
-
- # unblock caller
- await ctx.started()
-
- # we pin this task to keep the feeds manager active until the
- # parent actor decides to tear it down
- await trio.sleep_forever()
-
-
diff --git a/piker/data/_m4.py b/piker/data/_m4.py
index 8452e022..3c23d966 100644
--- a/piker/data/_m4.py
+++ b/piker/data/_m4.py
@@ -42,10 +42,7 @@ from numba import (
# float64, optional, int64,
)
-from ..log import get_logger
-
-
-log = get_logger(__name__)
+from ._util import log
def ds_m4(
diff --git a/piker/data/_sampling.py b/piker/data/_sampling.py
index 84dce08e..20bf9b49 100644
--- a/piker/data/_sampling.py
+++ b/piker/data/_sampling.py
@@ -38,8 +38,8 @@ from tractor.trionics import (
import trio
from trio_typing import TaskStatus
-from ..log import (
- get_logger,
+from ._util import (
+ log,
get_console_log,
)
from ..service import maybe_spawn_daemon
@@ -50,8 +50,6 @@ if TYPE_CHECKING:
)
from .feed import _FeedsBus
-log = get_logger(__name__)
-
# highest frequency sample step is 1 second by default, though in
# the future we may want to support shorter periods or a dynamic style
@@ -353,7 +351,9 @@ async def register_with_sampler(
if open_index_stream:
try:
- async with ctx.open_stream() as stream:
+ async with ctx.open_stream(
+ allow_overruns=True,
+ ) as stream:
if sub_for_broadcasts:
subs.add(stream)
@@ -362,7 +362,10 @@ async def register_with_sampler(
if msg == 'broadcast_all':
await Sampler.broadcast_all()
finally:
- if sub_for_broadcasts:
+ if (
+ sub_for_broadcasts
+ and subs
+ ):
subs.remove(stream)
else:
# if no shms are passed in we just wait until cancelled
@@ -429,7 +432,7 @@ async def spawn_samplerd(
async def maybe_open_samplerd(
loglevel: str | None = None,
- **kwargs,
+ **pikerd_kwargs,
) -> tractor.Portal: # noqa
'''
@@ -442,9 +445,9 @@ async def maybe_open_samplerd(
async with maybe_spawn_daemon(
dname,
service_task_target=spawn_samplerd,
- spawn_args={'loglevel': loglevel},
+ spawn_args={},
loglevel=loglevel,
- **kwargs,
+ **pikerd_kwargs,
) as portal:
yield portal
@@ -615,10 +618,10 @@ async def sample_and_broadcast(
] = bus.get_subs(sub_key)
# NOTE: by default the broker backend doesn't append
- # it's own "name" into the fqsn schema (but maybe it
+ # it's own "name" into the fqme schema (but maybe it
# should?) so we have to manually generate the correct
# key here.
- fqsn = f'{broker_symbol}.{brokername}'
+ fqme = f'{broker_symbol}.{brokername}'
lags: int = 0
# TODO: speed up this loop in an AOT compiled lang (like
@@ -637,7 +640,7 @@ async def sample_and_broadcast(
# pushes to the ``uniform_rate_send()`` below.
try:
stream.send_nowait(
- (fqsn, quote)
+ (fqme, quote)
)
except trio.WouldBlock:
overruns[sub_key] += 1
@@ -669,7 +672,7 @@ async def sample_and_broadcast(
raise trio.BrokenResourceError
else:
await stream.send(
- {fqsn: quote}
+ {fqme: quote}
)
if cs.cancelled_caught:
@@ -782,9 +785,6 @@ async def uniform_rate_send(
https://gist.github.com/njsmith/7ea44ec07e901cb78ebe1dd8dd846cb9
'''
- # try not to error-out on overruns of the subscribed client
- stream._ctx._backpressure = True
-
# TODO: compute the approx overhead latency per cycle
left_to_sleep = throttle_period = 1/rate - 0.000616
diff --git a/piker/data/_sharedmem.py b/piker/data/_sharedmem.py
index bd40ad7e..2ed1c892 100644
--- a/piker/data/_sharedmem.py
+++ b/piker/data/_sharedmem.py
@@ -32,14 +32,11 @@ import numpy as np
from numpy.lib import recfunctions as rfn
import tractor
-from ..log import get_logger
+from ._util import log
from ._source import base_iohlc_dtype
from .types import Struct
-log = get_logger(__name__)
-
-
# how much is probably dependent on lifestyle
_secs_in_day = int(60 * 60 * 24)
# we try for a buncha times, but only on a run-every-other-day kinda week.
@@ -649,7 +646,7 @@ def maybe_open_shm_array(
token = _known_tokens[key]
return attach_shm_array(token=token, **kwargs), False
except KeyError:
- log.warning(f"Could not find {key} in shms cache")
+ log.debug(f"Could not find {key} in shms cache")
if dtype:
token = _make_token(
key,
@@ -659,7 +656,7 @@ def maybe_open_shm_array(
try:
return attach_shm_array(token=token, **kwargs), False
except FileNotFoundError:
- log.warning(f"Could not attach to shm with token {token}")
+ log.debug(f"Could not attach to shm with token {token}")
# This actor does not know about memory
# associated with the provided "key".
diff --git a/piker/data/_source.py b/piker/data/_source.py
index d358cd96..d1d8be02 100644
--- a/piker/data/_source.py
+++ b/piker/data/_source.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) 2018-present Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) 2018-present Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -18,18 +18,10 @@
numpy data source coversion helpers.
"""
from __future__ import annotations
-from decimal import (
- Decimal,
- ROUND_HALF_EVEN,
-)
-from typing import Any
from bidict import bidict
import numpy as np
-from .types import Struct
-# from numba import from_dtype
-
ohlc_fields = [
('time', float),
@@ -50,6 +42,7 @@ base_ohlc_dtype = np.dtype(ohlc_fields)
# TODO: for now need to construct this manually for readonly arrays, see
# https://github.com/numba/numba/issues/4511
+# from numba import from_dtype
# numba_ohlc_dtype = from_dtype(base_ohlc_dtype)
# map time frame "keys" to seconds values
@@ -64,32 +57,6 @@ tf_in_1s = bidict({
})
-def mk_fqsn(
- provider: str,
- symbol: str,
-
-) -> str:
- '''
- Generate a "fully qualified symbol name" which is
- a reverse-hierarchical cross broker/provider symbol
-
- '''
- return '.'.join([symbol, provider]).lower()
-
-
-def float_digits(
- value: float,
-) -> int:
- '''
- Return the number of precision digits read from a float value.
-
- '''
- if value == 0:
- return 0
-
- return int(-Decimal(str(value)).as_tuple().exponent)
-
-
def ohlc_zeros(length: int) -> np.ndarray:
"""Construct an OHLC field formatted structarray.
@@ -100,220 +67,6 @@ def ohlc_zeros(length: int) -> np.ndarray:
return np.zeros(length, dtype=base_ohlc_dtype)
-def unpack_fqsn(fqsn: str) -> tuple[str, str, str]:
- '''
- Unpack a fully-qualified-symbol-name to ``tuple``.
-
- '''
- venue = ''
- suffix = ''
-
- # TODO: probably reverse the order of all this XD
- tokens = fqsn.split('.')
- if len(tokens) < 3:
- # probably crypto
- symbol, broker = tokens
- return (
- broker,
- symbol,
- '',
- )
-
- elif len(tokens) > 3:
- symbol, venue, suffix, broker = tokens
- else:
- symbol, venue, broker = tokens
- suffix = ''
-
- # head, _, broker = fqsn.rpartition('.')
- # symbol, _, suffix = head.rpartition('.')
- return (
- broker,
- '.'.join([symbol, venue]),
- suffix,
- )
-
-
-class MktPair(Struct, frozen=True):
-
- src: str # source asset name being used to buy
- src_type: str # source asset's financial type/classification name
- # ^ specifies a "class" of financial instrument
- # egs. stock, futer, option, bond etc.
-
- dst: str # destination asset name being bought
- dst_type: str # destination asset's financial type/classification name
-
- price_tick: float # minimum price increment value increment
- price_tick_digits: int # required decimal digits for above
-
- size_tick: float # minimum size (aka vlm) increment value increment
- size_tick_digits: int # required decimal digits for above
-
- venue: str | None = None # market venue provider name
- expiry: str | None = None # for derivs, expiry datetime parseable str
-
- # for derivs, info describing contract, egs.
- # strike price, call or put, swap type, exercise model, etc.
- contract_info: str | None = None
-
- @classmethod
- def from_msg(
- self,
- msg: dict[str, Any],
-
- ) -> MktPair:
- '''
- Constructor for a received msg-dict normally received over IPC.
-
- '''
- ...
-
- # fqa, fqma, .. etc. see issue:
- # https://github.com/pikers/piker/issues/467
- @property
- def fqsn(self) -> str:
- '''
- Return the fully qualified market (endpoint) name for the
- pair of transacting assets.
-
- '''
- ...
-
-
-# TODO: rework the below `Symbol` (which was originally inspired and
-# derived from stuff in quantdom) into a simpler, ipc msg ready, market
-# endpoint meta-data container type as per the drafted interace above.
-class Symbol(Struct):
- '''
- I guess this is some kinda container thing for dealing with
- all the different meta-data formats from brokers?
-
- '''
- key: str
- tick_size: float = 0.01
- lot_tick_size: float = 0.0 # "volume" precision as min step value
- tick_size_digits: int = 2
- lot_size_digits: int = 0
- suffix: str = ''
- broker_info: dict[str, dict[str, Any]] = {}
-
- @classmethod
- def from_broker_info(
- cls,
- broker: str,
- symbol: str,
- info: dict[str, Any],
- suffix: str = '',
-
- ) -> Symbol:
-
- tick_size = info.get('price_tick_size', 0.01)
- lot_size = info.get('lot_tick_size', 0.0)
-
- return Symbol(
- key=symbol,
- tick_size=tick_size,
- lot_tick_size=lot_size,
- tick_size_digits=float_digits(tick_size),
- lot_size_digits=float_digits(lot_size),
- suffix=suffix,
- broker_info={broker: info},
- )
-
- @classmethod
- def from_fqsn(
- cls,
- fqsn: str,
- info: dict[str, Any],
-
- ) -> Symbol:
- broker, key, suffix = unpack_fqsn(fqsn)
- return cls.from_broker_info(
- broker,
- key,
- info=info,
- suffix=suffix,
- )
-
- @property
- def type_key(self) -> str:
- return list(self.broker_info.values())[0]['asset_type']
-
- @property
- def brokers(self) -> list[str]:
- return list(self.broker_info.keys())
-
- def nearest_tick(self, value: float) -> float:
- '''
- Return the nearest tick value based on mininum increment.
-
- '''
- mult = 1 / self.tick_size
- return round(value * mult) / mult
-
- def front_feed(self) -> tuple[str, str]:
- '''
- Return the "current" feed key for this symbol.
-
- (i.e. the broker + symbol key in a tuple).
-
- '''
- return (
- list(self.broker_info.keys())[0],
- self.key,
- )
-
- def tokens(self) -> tuple[str]:
- broker, key = self.front_feed()
- if self.suffix:
- return (key, self.suffix, broker)
- else:
- return (key, broker)
-
- @property
- def fqsn(self) -> str:
- return '.'.join(self.tokens()).lower()
-
- def front_fqsn(self) -> str:
- '''
- fqsn = "fully qualified symbol name"
-
- Basically the idea here is for all client-ish code (aka programs/actors
- that ask the provider agnostic layers in the stack for data) should be
- able to tell which backend / venue / derivative each data feed/flow is
- from by an explicit string key of the current form:
-
- ...
-
- TODO: I have thoughts that we should actually change this to be
- more like an "attr lookup" (like how the web should have done
- urls, but marketting peeps ruined it etc. etc.):
-
- ...
-
- '''
- tokens = self.tokens()
- fqsn = '.'.join(map(str.lower, tokens))
- return fqsn
-
- def quantize_size(
- self,
- size: float,
-
- ) -> Decimal:
- '''
- Truncate input ``size: float`` using ``Decimal``
- and ``.lot_size_digits``.
-
- '''
- digits = self.lot_size_digits
- return Decimal(size).quantize(
- Decimal(f'1.{"0".ljust(digits, "0")}'),
- rounding=ROUND_HALF_EVEN
- )
-
-
def _nan_to_closest_num(array: np.ndarray):
"""Return interpolated values instead of NaN.
diff --git a/piker/data/_util.py b/piker/data/_util.py
new file mode 100644
index 00000000..8c78255f
--- /dev/null
+++ b/piker/data/_util.py
@@ -0,0 +1,34 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+Data layer module commons.
+
+'''
+from functools import partial
+
+from ..log import (
+ get_logger,
+ get_console_log,
+)
+subsys: str = 'piker.data'
+
+log = get_logger(subsys)
+
+get_console_log = partial(
+ get_console_log,
+ name=subsys,
+)
diff --git a/piker/data/_web_bs.py b/piker/data/_web_bs.py
index 21b06d68..9c2753b1 100644
--- a/piker/data/_web_bs.py
+++ b/piker/data/_web_bs.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -18,23 +18,29 @@
ToOlS fOr CoPInG wITh "tHE wEB" protocols.
"""
+from __future__ import annotations
from contextlib import (
- asynccontextmanager,
- AsyncExitStack,
+ asynccontextmanager as acm,
)
from itertools import count
+from functools import partial
from types import ModuleType
from typing import (
Any,
Optional,
Callable,
+ AsyncContextManager,
AsyncGenerator,
Iterable,
)
import json
import trio
-import trio_websocket
+from trio_typing import TaskStatus
+from trio_websocket import (
+ WebSocketConnection,
+ open_websocket_url,
+)
from wsproto.utilities import LocalProtocolError
from trio_websocket._impl import (
ConnectionClosed,
@@ -44,20 +50,23 @@ from trio_websocket._impl import (
ConnectionTimeout,
)
-from ..log import get_logger
-
+from ._util import log
from .types import Struct
-log = get_logger(__name__)
-
class NoBsWs:
'''
Make ``trio_websocket`` sockets stay up no matter the bs.
- You can provide a ``fixture`` async-context-manager which will be
- enter/exitted around each reconnect operation.
+ A shim interface that allows client code to stream from some
+ ``WebSocketConnection`` but where any connectivy bs is handled
+ automatcially and entirely in the background.
+
+ NOTE: this type should never be created directly but instead is
+ provided via the ``open_autorecon_ws()`` factor below.
+
'''
+ # apparently we can QoS for all sorts of reasons..so catch em.
recon_errors = (
ConnectionClosed,
DisconnectionTimeout,
@@ -70,68 +79,42 @@ class NoBsWs:
def __init__(
self,
url: str,
- stack: AsyncExitStack,
- fixture: Optional[Callable] = None,
+ rxchan: trio.MemoryReceiveChannel,
+ msg_recv_timeout: float,
+
serializer: ModuleType = json
):
self.url = url
- self.fixture = fixture
- self._stack = stack
- self._ws: 'WebSocketConnection' = None # noqa
+ self._rx = rxchan
+ self._timeout = msg_recv_timeout
- # TODO: is there some method we can call
- # on the underlying `._ws` to get this?
- self._connected: bool = False
+ # signaling between caller and relay task which determines when
+ # socket is connected (and subscribed).
+ self._connected: trio.Event = trio.Event()
- async def _connect(
- self,
- tries: int = 1000,
- ) -> None:
+ # dynamically reset by the bg relay task
+ self._ws: WebSocketConnection | None = None
+ self._cs: trio.CancelScope | None = None
- self._connected = False
- while True:
- try:
- await self._stack.aclose()
- except self.recon_errors:
- await trio.sleep(0.5)
- else:
- break
-
- last_err = None
- for i in range(tries):
- try:
- self._ws = await self._stack.enter_async_context(
- trio_websocket.open_websocket_url(self.url)
- )
-
- if self.fixture is not None:
- # rerun user code fixture
- ret = await self._stack.enter_async_context(
- self.fixture(self)
- )
-
- assert ret is None
-
- log.info(f'Connection success: {self.url}')
-
- self._connected = True
- return self._ws
-
- except self.recon_errors as err:
- last_err = err
- log.error(
- f'{self} connection bail with '
- f'{type(err)}...retry attempt {i}'
- )
- await trio.sleep(0.5)
- self._connected = False
- continue
- else:
- log.exception('ws connection fail...')
- raise last_err
+ # interchange codec methods
+ # TODO: obviously the method API here may be different
+ # for another interchange format..
+ self._dumps: Callable = serializer.dumps
+ self._loads: Callable = serializer.loads
def connected(self) -> bool:
- return self._connected
+ return self._connected.is_set()
+
+ async def reset(self) -> None:
+ '''
+ Reset the underlying ws connection by cancelling
+ the bg relay task and waiting for it to signal
+ a new connection.
+
+ '''
+ self._connected = trio.Event()
+ self._cs.cancel()
+ await self._connected.wait()
async def send_msg(
self,
@@ -139,18 +122,15 @@ class NoBsWs:
) -> None:
while True:
try:
- return await self._ws.send_message(json.dumps(data))
+ msg: Any = self._dumps(data)
+ return await self._ws.send_message(msg)
except self.recon_errors:
- await self._connect()
+ await self.reset()
- async def recv_msg(
- self,
- ) -> Any:
- while True:
- try:
- return json.loads(await self._ws.get_message())
- except self.recon_errors:
- await self._connect()
+ async def recv_msg(self) -> Any:
+ msg: Any = await self._rx.receive()
+ data = self._loads(msg)
+ return data
def __aiter__(self):
return self
@@ -158,27 +138,218 @@ class NoBsWs:
async def __anext__(self):
return await self.recv_msg()
+ def set_recv_timeout(
+ self,
+ timeout: float,
+ ) -> None:
+ self._timeout = timeout
-@asynccontextmanager
+
+async def _reconnect_forever(
+ url: str,
+ snd: trio.MemorySendChannel,
+ nobsws: NoBsWs,
+ reset_after: int, # msg recv timeout before reset attempt
+
+ fixture: AsyncContextManager | None = None,
+ task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
+
+) -> None:
+
+ # TODO: can we just report "where" in the call stack
+ # the client code is using the ws stream?
+ # Maybe we can just drop this since it's already in the log msg
+ # orefix?
+ if fixture is not None:
+ src_mod: str = fixture.__module__
+ else:
+ src_mod: str = 'unknown'
+
+ async def proxy_msgs(
+ ws: WebSocketConnection,
+ pcs: trio.CancelScope, # parent cancel scope
+ ):
+ '''
+ Receive (under `timeout` deadline) all msgs from from underlying
+ websocket and relay them to (calling) parent task via ``trio``
+ mem chan.
+
+ '''
+ # after so many msg recv timeouts, reset the connection
+ timeouts: int = 0
+
+ while True:
+ with trio.move_on_after(
+ # can be dynamically changed by user code
+ nobsws._timeout,
+ ) as cs:
+ try:
+ msg: Any = await ws.get_message()
+ await snd.send(msg)
+ except nobsws.recon_errors:
+ log.exception(
+ f'{src_mod}\n'
+ f'{url} connection bail with:'
+ )
+ await trio.sleep(0.5)
+ pcs.cancel()
+
+ # go back to reonnect loop in parent task
+ return
+
+ if cs.cancelled_caught:
+ timeouts += 1
+ if timeouts > reset_after:
+ log.error(
+ f'{src_mod}\n'
+ 'WS feed seems down and slow af.. reconnecting\n'
+ )
+ pcs.cancel()
+
+ # go back to reonnect loop in parent task
+ return
+
+ async def open_fixture(
+ fixture: AsyncContextManager,
+ nobsws: NoBsWs,
+ task_status: TaskStatus = trio.TASK_STATUS_IGNORED,
+ ):
+ '''
+ Open user provided `@acm` and sleep until any connection
+ reset occurs.
+
+ '''
+ async with fixture(nobsws) as ret:
+ assert ret is None
+ task_status.started()
+ await trio.sleep_forever()
+
+ # last_err = None
+ nobsws._connected = trio.Event()
+ task_status.started()
+
+ while not snd._closed:
+ log.info(
+ f'{src_mod}\n'
+ f'{url} trying (RE)CONNECT'
+ )
+
+ async with trio.open_nursery() as n:
+ cs = nobsws._cs = n.cancel_scope
+ ws: WebSocketConnection
+ async with open_websocket_url(url) as ws:
+ nobsws._ws = ws
+ log.info(
+ f'{src_mod}\n'
+ f'Connection success: {url}'
+ )
+
+ # begin relay loop to forward msgs
+ n.start_soon(
+ proxy_msgs,
+ ws,
+ cs,
+ )
+
+ if fixture is not None:
+ log.info(
+ f'{src_mod}\n'
+ f'Entering fixture: {fixture}'
+ )
+
+ # TODO: should we return an explicit sub-cs
+ # from this fixture task?
+ await n.start(
+ open_fixture,
+ fixture,
+ nobsws,
+ )
+
+ # indicate to wrapper / opener that we are up and block
+ # to let tasks run **inside** the ws open block above.
+ nobsws._connected.set()
+ await trio.sleep_forever()
+
+ # ws open block end
+ # nursery block end
+ nobsws._connected = trio.Event()
+ if cs.cancelled_caught:
+ log.cancel(
+ f'{url} connection cancelled!'
+ )
+ # if wrapper cancelled us, we expect it to also
+ # have re-assigned a new event
+ assert (
+ nobsws._connected
+ and not nobsws._connected.is_set()
+ )
+
+ # -> from here, move to next reconnect attempt
+
+ else:
+ log.exception(
+ f'{src_mod}\n'
+ 'ws connection closed by client...'
+ )
+
+
+@acm
async def open_autorecon_ws(
url: str,
- # TODO: proper type cannot smh
- fixture: Optional[Callable] = None,
+ fixture: AsyncContextManager | None = None,
-) -> AsyncGenerator[tuple[...], NoBsWs]:
- """Apparently we can QoS for all sorts of reasons..so catch em.
+ # time in sec between msgs received before
+ # we presume connection might need a reset.
+ msg_recv_timeout: float = 16,
- """
- async with AsyncExitStack() as stack:
- ws = NoBsWs(url, stack, fixture=fixture)
- await ws._connect()
+ # count of the number of above timeouts before connection reset
+ reset_after: int = 3,
+
+) -> AsyncGenerator[tuple[...], NoBsWs]:
+ '''
+ An auto-reconnect websocket (wrapper API) around
+ ``trio_websocket.open_websocket_url()`` providing automatic
+ re-connection on network errors, msg latency and thus roaming.
+
+ Here we implement a re-connect websocket interface where a bg
+ nursery runs ``WebSocketConnection.receive_message()``s in a loop
+ and restarts the full http(s) handshake on catches of certain
+ connetivity errors, or some user defined recv timeout.
+
+ You can provide a ``fixture`` async-context-manager which will be
+ entered/exitted around each connection reset; eg. for (re)requesting
+ subscriptions without requiring streaming setup code to rerun.
+
+ '''
+ snd: trio.MemorySendChannel
+ rcv: trio.MemoryReceiveChannel
+ snd, rcv = trio.open_memory_channel(616)
+
+ async with trio.open_nursery() as n:
+ nobsws = NoBsWs(
+ url,
+ rcv,
+ msg_recv_timeout=msg_recv_timeout,
+ )
+ await n.start(
+ partial(
+ _reconnect_forever,
+ url,
+ snd,
+ nobsws,
+ fixture=fixture,
+ reset_after=reset_after,
+ )
+ )
+ await nobsws._connected.wait()
+ assert nobsws._cs
+ assert nobsws.connected()
try:
- yield ws
-
+ yield nobsws
finally:
- await stack.aclose()
+ n.cancel_scope.cancel()
'''
@@ -195,7 +366,7 @@ class JSONRPCResult(Struct):
error: Optional[dict] = None
-@asynccontextmanager
+@acm
async def open_jsonrpc_session(
url: str,
start_id: int = 0,
diff --git a/piker/data/cli.py b/piker/data/cli.py
index 6984d9ff..59db1037 100644
--- a/piker/data/cli.py
+++ b/piker/data/cli.py
@@ -32,14 +32,11 @@ from ..service.marketstore import (
)
from ..cli import cli
from .. import watchlists as wl
-from ..log import (
- get_logger,
+from ._util import (
+ log,
)
-log = get_logger(__name__)
-
-
@cli.command()
@click.option(
'--url',
@@ -187,10 +184,10 @@ def storage(
symbol = symbols[0]
async with open_tsdb_client(symbol) as storage:
if delete:
- for fqsn in symbols:
+ for fqme in symbols:
syms = await storage.client.list_symbols()
- resp60s = await storage.delete_ts(fqsn, 60)
+ resp60s = await storage.delete_ts(fqme, 60)
msgish = resp60s.ListFields()[0][1]
if 'error' in str(msgish):
@@ -202,15 +199,15 @@ def storage(
# well, if we ever can make this work we
# probably want to dogsplain the real reason
# for the delete errurz..llululu
- if fqsn not in syms:
- log.error(f'Pair {fqsn} dne in DB')
+ if fqme not in syms:
+ log.error(f'Pair {fqme} dne in DB')
- log.error(f'Deletion error: {fqsn}\n{msgish}')
+ log.error(f'Deletion error: {fqme}\n{msgish}')
- resp1s = await storage.delete_ts(fqsn, 1)
+ resp1s = await storage.delete_ts(fqme, 1)
msgish = resp1s.ListFields()[0][1]
if 'error' in str(msgish):
- log.error(f'Deletion error: {fqsn}\n{msgish}')
+ log.error(f'Deletion error: {fqme}\n{msgish}')
trio.run(main)
diff --git a/piker/data/feed.py b/piker/data/feed.py
index 7efd5eb3..1714cf19 100644
--- a/piker/data/feed.py
+++ b/piker/data/feed.py
@@ -14,30 +14,31 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-"""
+'''
Data feed apis and infra.
-This module is enabled for ``brokerd`` daemons.
+This module is enabled for ``brokerd`` daemons and includes mostly
+endpoints and middleware to support our real-time, provider agnostic,
+live market quotes layer. Historical data loading and processing is also
+initiated in parts of the feed bus startup but business logic and
+functionality is generally located in the sibling `.data.history`
+module.
-"""
+'''
from __future__ import annotations
from collections import (
defaultdict,
- Counter,
)
from contextlib import asynccontextmanager as acm
-from datetime import datetime
from functools import partial
import time
from types import ModuleType
from typing import (
Any,
AsyncContextManager,
- Callable,
Optional,
Awaitable,
Sequence,
- TYPE_CHECKING,
Union,
)
@@ -49,46 +50,35 @@ from tractor.trionics import (
maybe_open_context,
gather_contexts,
)
-import pendulum
-import numpy as np
from ..brokers import get_brokermod
from ..calc import humanize
-from ..log import (
- get_logger,
+from ._util import (
+ log,
get_console_log,
)
from ..service import (
maybe_spawn_brokerd,
- check_for_service,
)
from .flows import Flume
-from ._sharedmem import (
- maybe_open_shm_array,
- ShmArray,
- _secs_in_day,
+from .validate import (
+ FeedInit,
+ validate_backend,
+)
+from .history import (
+ manage_history,
)
from .ingest import get_ingestormod
from .types import Struct
-from ._source import (
- base_iohlc_dtype,
- Symbol,
- unpack_fqsn,
+from ..accounting import (
+ MktPair,
+ unpack_fqme,
)
from ..ui import _search
from ._sampling import (
- open_sample_stream,
sample_and_broadcast,
uniform_rate_send,
)
-from ..brokers._util import (
- DataUnavailable,
-)
-
-if TYPE_CHECKING:
- from ..service.marketstore import Storage
-
-log = get_logger(__name__)
class _FeedsBus(Struct):
@@ -227,709 +217,6 @@ def get_feed_bus(
return _bus
-def diff_history(
- array: np.ndarray,
- timeframe: int,
- start_dt: datetime,
- end_dt: datetime,
- last_tsdb_dt: datetime | None = None
-
-) -> np.ndarray:
-
- # no diffing with tsdb dt index possible..
- if last_tsdb_dt is None:
- return array
-
- time = array['time']
- return array[time > last_tsdb_dt.timestamp()]
-
-
-async def start_backfill(
- mod: ModuleType,
- bfqsn: str,
- shm: ShmArray,
- timeframe: float,
- sampler_stream: tractor.MsgStream,
- feed_is_live: trio.Event,
-
- last_tsdb_dt: Optional[datetime] = None,
- storage: Optional[Storage] = None,
- write_tsdb: bool = True,
- tsdb_is_up: bool = False,
-
- task_status: TaskStatus[tuple] = trio.TASK_STATUS_IGNORED,
-
-) -> int:
-
- hist: Callable[
- [int, datetime, datetime],
- tuple[np.ndarray, str]
- ]
- config: dict[str, int]
- async with mod.open_history_client(bfqsn) as (hist, config):
-
- # get latest query's worth of history all the way
- # back to what is recorded in the tsdb
- array, start_dt, end_dt = await hist(
- timeframe,
- end_dt=None,
- )
- times = array['time']
-
- # sample period step size in seconds
- step_size_s = (
- pendulum.from_timestamp(times[-1])
- - pendulum.from_timestamp(times[-2])
- ).seconds
-
- # if the market is open (aka we have a live feed) but the
- # history sample step index seems off we report the surrounding
- # data and drop into a bp. this case shouldn't really ever
- # happen if we're doing history retrieval correctly.
- if (
- step_size_s == 60
- and feed_is_live.is_set()
- ):
- inow = round(time.time())
- diff = inow - times[-1]
- if abs(diff) > 60:
- surr = array[-6:]
- diff_in_mins = round(diff/60., ndigits=2)
- log.warning(
- f'STEP ERROR `{bfqsn}` for period {step_size_s}s:\n'
- f'Off by `{diff}` seconds (or `{diff_in_mins}` mins)\n'
- 'Surrounding 6 time stamps:\n'
- f'{list(surr["time"])}\n'
- 'Here is surrounding 6 samples:\n'
- f'{surr}\nn'
- )
-
- # uncomment this for a hacker who wants to investigate
- # this case manually..
- # await tractor.breakpoint()
-
- # frame's worth of sample-period-steps, in seconds
- frame_size_s = len(array) * step_size_s
-
- to_push = diff_history(
- array,
- timeframe,
- start_dt,
- end_dt,
- last_tsdb_dt=last_tsdb_dt,
- )
-
- log.info(f'Pushing {to_push.size} to shm!')
- shm.push(to_push, prepend=True)
-
- # TODO: *** THIS IS A BUG ***
- # we need to only broadcast to subscribers for this fqsn..
- # otherwise all fsps get reset on every chart..
- await sampler_stream.send('broadcast_all')
-
- # signal that backfilling to tsdb's end datum is complete
- bf_done = trio.Event()
-
- # let caller unblock and deliver latest history frame
- task_status.started((
- start_dt,
- end_dt,
- bf_done,
- ))
-
- # based on the sample step size, maybe load a certain amount history
- if last_tsdb_dt is None:
- if step_size_s not in (1, 60):
- raise ValueError(
- '`piker` only needs to support 1m and 1s sampling '
- 'but ur api is trying to deliver a longer '
- f'timeframe of {step_size_s} seconds..\n'
- 'So yuh.. dun do dat brudder.'
- )
-
- # when no tsdb "last datum" is provided, we just load
- # some near-term history.
- periods = {
- 1: {'days': 1},
- 60: {'days': 14},
- }
-
- if tsdb_is_up:
- # do a decently sized backfill and load it into storage.
- periods = {
- 1: {'days': 6},
- 60: {'years': 6},
- }
-
- period_duration = periods[step_size_s]
-
- # NOTE: manually set the "latest" datetime which we intend to
- # backfill history "until" so as to adhere to the history
- # settings above when the tsdb is detected as being empty.
- last_tsdb_dt = start_dt.subtract(**period_duration)
-
- # configure async query throttling
- # rate = config.get('rate', 1)
- # XXX: legacy from ``trimeter`` code but unsupported now.
- # erlangs = config.get('erlangs', 1)
-
- # avoid duplicate history frames with a set of datetime frame
- # starts and associated counts of how many duplicates we see
- # per time stamp.
- starts: Counter[datetime] = Counter()
-
- # inline sequential loop where we simply pass the
- # last retrieved start dt to the next request as
- # it's end dt.
- while end_dt > last_tsdb_dt:
- log.debug(
- f'Requesting {step_size_s}s frame ending in {start_dt}'
- )
-
- try:
- array, next_start_dt, end_dt = await hist(
- timeframe,
- end_dt=start_dt,
- )
-
- # broker says there never was or is no more history to pull
- except DataUnavailable:
- log.warning(
- f'NO-MORE-DATA: backend {mod.name} halted history!?'
- )
-
- # ugh, what's a better way?
- # TODO: fwiw, we probably want a way to signal a throttle
- # condition (eg. with ib) so that we can halt the
- # request loop until the condition is resolved?
- return
-
- if (
- next_start_dt in starts
- and starts[next_start_dt] <= 6
- ):
- start_dt = min(starts)
- log.warning(
- f"{bfqsn}: skipping duplicate frame @ {next_start_dt}"
- )
- starts[start_dt] += 1
- continue
-
- elif starts[next_start_dt] > 6:
- log.warning(
- f'NO-MORE-DATA: backend {mod.name} before {next_start_dt}?'
- )
- return
-
- # only update new start point if not-yet-seen
- start_dt = next_start_dt
- starts[start_dt] += 1
-
- assert array['time'][0] == start_dt.timestamp()
-
- diff = end_dt - start_dt
- frame_time_diff_s = diff.seconds
- expected_frame_size_s = frame_size_s + step_size_s
-
- if frame_time_diff_s > expected_frame_size_s:
-
- # XXX: query result includes a start point prior to our
- # expected "frame size" and thus is likely some kind of
- # history gap (eg. market closed period, outage, etc.)
- # so just report it to console for now.
- log.warning(
- f'History frame ending @ {end_dt} appears to have a gap:\n'
- f'{diff} ~= {frame_time_diff_s} seconds'
- )
-
- to_push = diff_history(
- array,
- timeframe,
- start_dt,
- end_dt,
- last_tsdb_dt=last_tsdb_dt,
- )
- ln = len(to_push)
- if ln:
- log.info(f'{ln} bars for {start_dt} -> {end_dt}')
-
- else:
- log.warning(
- f'{ln} BARS TO PUSH after diff?!: {start_dt} -> {end_dt}'
- )
-
- # bail gracefully on shm allocation overrun/full condition
- try:
- shm.push(to_push, prepend=True)
- except ValueError:
- log.info(
- f'Shm buffer overrun on: {start_dt} -> {end_dt}?'
- )
- # can't push the entire frame? so
- # push only the amount that can fit..
- break
-
- log.info(
- f'Shm pushed {ln} frame:\n'
- f'{start_dt} -> {end_dt}'
- )
-
- if (
- storage is not None
- and write_tsdb
- ):
- log.info(
- f'Writing {ln} frame to storage:\n'
- f'{start_dt} -> {end_dt}'
- )
- await storage.write_ohlcv(
- f'{bfqsn}.{mod.name}', # lul..
- to_push,
- timeframe,
- )
-
- # TODO: can we only trigger this if the respective
- # history in "in view"?!?
-
- # XXX: extremely important, there can be no checkpoints
- # in the block above to avoid entering new ``frames``
- # values while we're pipelining the current ones to
- # memory...
- await sampler_stream.send('broadcast_all')
-
- # short-circuit (for now)
- bf_done.set()
-
-
-async def basic_backfill(
- bus: _FeedsBus,
- mod: ModuleType,
- bfqsn: str,
- shms: dict[int, ShmArray],
- sampler_stream: tractor.MsgStream,
- feed_is_live: trio.Event,
-
-) -> None:
-
- # do a legacy incremental backfill from the provider.
- log.info('No TSDB (marketstored) found, doing basic backfill..')
-
- # start history backfill task ``backfill_bars()`` is
- # a required backend func this must block until shm is
- # filled with first set of ohlc bars
- for timeframe, shm in shms.items():
- try:
- await bus.nursery.start(
- partial(
- start_backfill,
- mod,
- bfqsn,
- shm,
- timeframe,
- sampler_stream,
- feed_is_live,
- )
- )
- except DataUnavailable:
- # XXX: timeframe not supported for backend
- continue
-
-
-async def tsdb_backfill(
- mod: ModuleType,
- marketstore: ModuleType,
- bus: _FeedsBus,
- storage: Storage,
- fqsn: str,
- bfqsn: str,
- shms: dict[int, ShmArray],
- sampler_stream: tractor.MsgStream,
- feed_is_live: trio.Event,
-
- task_status: TaskStatus[
- tuple[ShmArray, ShmArray]
- ] = trio.TASK_STATUS_IGNORED,
-
-) -> None:
-
- # TODO: this should be used verbatim for the pure
- # shm backfiller approach below.
- dts_per_tf: dict[int, datetime] = {}
-
- # start history anal and load missing new data via backend.
- for timeframe, shm in shms.items():
- # loads a (large) frame of data from the tsdb depending
- # on the db's query size limit.
- tsdb_history, first_tsdb_dt, last_tsdb_dt = await storage.load(
- fqsn,
- timeframe=timeframe,
- )
-
- broker, symbol, expiry = unpack_fqsn(fqsn)
- try:
- (
- latest_start_dt,
- latest_end_dt,
- bf_done,
- ) = await bus.nursery.start(
- partial(
- start_backfill,
- mod,
- bfqsn,
- shm,
- timeframe,
- sampler_stream,
- feed_is_live,
-
- last_tsdb_dt=last_tsdb_dt,
- tsdb_is_up=True,
- storage=storage,
- )
- )
- except DataUnavailable:
- # XXX: timeframe not supported for backend
- dts_per_tf[timeframe] = (
- tsdb_history,
- last_tsdb_dt,
- None,
- None,
- None,
- )
- continue
-
- # tsdb_history = series.get(timeframe)
- dts_per_tf[timeframe] = (
- tsdb_history,
- last_tsdb_dt,
- latest_start_dt,
- latest_end_dt,
- bf_done,
- )
-
- # if len(hist_shm.array) < 2:
- # TODO: there's an edge case here to solve where if the last
- # frame before market close (at least on ib) was pushed and
- # there was only "1 new" row pushed from the first backfill
- # query-iteration, then the sample step sizing calcs will
- # break upstream from here since you can't diff on at least
- # 2 steps... probably should also add logic to compute from
- # the tsdb series and stash that somewhere as meta data on
- # the shm buffer?.. no se.
-
- # unblock the feed bus management task
- # assert len(shms[1].array)
- task_status.started()
-
- async def back_load_from_tsdb(
- timeframe: int,
- shm: ShmArray,
- ):
- (
- tsdb_history,
- last_tsdb_dt,
- latest_start_dt,
- latest_end_dt,
- bf_done,
- ) = dts_per_tf[timeframe]
-
- # sync to backend history task's query/load completion
- if bf_done:
- await bf_done.wait()
-
- # TODO: eventually it'd be nice to not require a shm array/buffer
- # to accomplish this.. maybe we can do some kind of tsdb direct to
- # graphics format eventually in a child-actor?
-
- # TODO: see if there's faster multi-field reads:
- # https://numpy.org/doc/stable/user/basics.rec.html#accessing-multiple-fields
- # re-index with a `time` and index field
- prepend_start = shm._first.value
- array = shm.array
- if len(array):
- shm_last_dt = pendulum.from_timestamp(shm.array[0]['time'])
- else:
- shm_last_dt = None
-
- if last_tsdb_dt:
- assert shm_last_dt >= last_tsdb_dt
-
- # do diff against start index of last frame of history and only
- # fill in an amount of datums from tsdb allows for most recent
- # to be loaded into mem *before* tsdb data.
- if (
- last_tsdb_dt
- and latest_start_dt
- ):
- backfilled_size_s = (
- latest_start_dt - last_tsdb_dt
- ).seconds
- # if the shm buffer len is not large enough to contain
- # all missing data between the most recent backend-queried frame
- # and the most recent dt-index in the db we warn that we only
- # want to load a portion of the next tsdb query to fill that
- # space.
- log.info(
- f'{backfilled_size_s} seconds worth of {timeframe}s loaded'
- )
-
- # Load TSDB history into shm buffer (for display) if there is
- # remaining buffer space.
-
- if (
- len(tsdb_history)
- ):
- # load the first (smaller) bit of history originally loaded
- # above from ``Storage.load()``.
- to_push = tsdb_history[-prepend_start:]
- shm.push(
- to_push,
-
- # insert the history pre a "days worth" of samples
- # to leave some real-time buffer space at the end.
- prepend=True,
- # update_first=False,
- # start=prepend_start,
- field_map=marketstore.ohlc_key_map,
- )
-
- tsdb_last_frame_start = tsdb_history['Epoch'][0]
-
- if timeframe == 1:
- times = shm.array['time']
- assert (times[1] - times[0]) == 1
-
- # load as much from storage into shm possible (depends on
- # user's shm size settings).
- while shm._first.value > 0:
-
- tsdb_history = await storage.read_ohlcv(
- fqsn,
- timeframe=timeframe,
- end=tsdb_last_frame_start,
- )
-
- # empty query
- if not len(tsdb_history):
- break
-
- next_start = tsdb_history['Epoch'][0]
- if next_start >= tsdb_last_frame_start:
- # no earlier data detected
- break
- else:
- tsdb_last_frame_start = next_start
-
- prepend_start = shm._first.value
- to_push = tsdb_history[-prepend_start:]
-
- # insert the history pre a "days worth" of samples
- # to leave some real-time buffer space at the end.
- shm.push(
- to_push,
- prepend=True,
- field_map=marketstore.ohlc_key_map,
- )
- log.info(f'Loaded {to_push.shape} datums from storage')
-
- # manually trigger step update to update charts/fsps
- # which need an incremental update.
- # NOTE: the way this works is super duper
- # un-intuitive right now:
- # - the broadcaster fires a msg to the fsp subsystem.
- # - fsp subsys then checks for a sample step diff and
- # possibly recomputes prepended history.
- # - the fsp then sends back to the parent actor
- # (usually a chart showing graphics for said fsp)
- # which tells the chart to conduct a manual full
- # graphics loop cycle.
- await sampler_stream.send('broadcast_all')
-
- # TODO: write new data to tsdb to be ready to for next read.
-
- # backload from db (concurrently per timeframe) once backfilling of
- # recent dat a loaded from the backend provider (see
- # ``bf_done.wait()`` call).
- async with trio.open_nursery() as nurse:
- for timeframe, shm in shms.items():
- nurse.start_soon(
- back_load_from_tsdb,
- timeframe,
- shm,
- )
-
-
-async def manage_history(
- mod: ModuleType,
- bus: _FeedsBus,
- fqsn: str,
- some_data_ready: trio.Event,
- feed_is_live: trio.Event,
- timeframe: float = 60, # in seconds
-
- task_status: TaskStatus[
- tuple[ShmArray, ShmArray]
- ] = trio.TASK_STATUS_IGNORED,
-
-) -> None:
- '''
- Load and manage historical data including the loading of any
- available series from `marketstore` as well as conducting real-time
- update of both that existing db and the allocated shared memory
- buffer.
-
- '''
-
- # TODO: is there a way to make each shm file key
- # actor-tree-discovery-addr unique so we avoid collisions
- # when doing tests which also allocate shms for certain instruments
- # that may be in use on the system by some other running daemons?
- # from tractor._state import _runtime_vars
- # port = _runtime_vars['_root_mailbox'][1]
-
- uid = tractor.current_actor().uid
- suffix = '.'.join(uid)
-
- # (maybe) allocate shm array for this broker/symbol which will
- # be used for fast near-term history capture and processing.
- hist_shm, opened = maybe_open_shm_array(
- # key=f'{fqsn}_hist_p{port}',
- key=f'{fqsn}_hist.{suffix}',
-
- # use any broker defined ohlc dtype:
- dtype=getattr(mod, '_ohlc_dtype', base_iohlc_dtype),
-
- # we expect the sub-actor to write
- readonly=False,
- )
- hist_zero_index = hist_shm.index - 1
-
- # TODO: history validation
- if not opened:
- raise RuntimeError(
- "Persistent shm for sym was already open?!"
- )
-
- rt_shm, opened = maybe_open_shm_array(
- # key=f'{fqsn}_rt_p{port}',
- key=f'{fqsn}_rt.{suffix}',
-
- # use any broker defined ohlc dtype:
- dtype=getattr(mod, '_ohlc_dtype', base_iohlc_dtype),
-
- # we expect the sub-actor to write
- readonly=False,
- size=3*_secs_in_day,
- )
-
- # (for now) set the rt (hft) shm array with space to prepend
- # only a few days worth of 1s history.
- days = 2
- start_index = days*_secs_in_day
- rt_shm._first.value = start_index
- rt_shm._last.value = start_index
- rt_zero_index = rt_shm.index - 1
-
- if not opened:
- raise RuntimeError(
- "Persistent shm for sym was already open?!"
- )
-
- # register 1s and 1m buffers with the global incrementer task
- async with open_sample_stream(
- period_s=1.,
- shms_by_period={
- 1.: rt_shm.token,
- 60.: hist_shm.token,
- },
-
- # NOTE: we want to only open a stream for doing broadcasts on
- # backfill operations, not receive the sample index-stream
- # (since there's no code in this data feed layer that needs to
- # consume it).
- open_index_stream=True,
- sub_for_broadcasts=False,
-
- ) as sample_stream:
-
- log.info('Scanning for existing `marketstored`')
- tsdb_is_up = await check_for_service('marketstored')
-
- bfqsn = fqsn.replace('.' + mod.name, '')
- open_history_client = getattr(mod, 'open_history_client', None)
- assert open_history_client
-
- if (
- tsdb_is_up
- and opened
- and open_history_client
- ):
- log.info('Found existing `marketstored`')
-
- from ..service import marketstore
- async with (
- marketstore.open_storage_client(fqsn)as storage,
- ):
- # TODO: drop returning the output that we pass in?
- await bus.nursery.start(
- tsdb_backfill,
- mod,
- marketstore,
- bus,
- storage,
- fqsn,
- bfqsn,
- {
- 1: rt_shm,
- 60: hist_shm,
- },
- sample_stream,
- feed_is_live,
- )
-
- # yield back after client connect with filled shm
- task_status.started((
- hist_zero_index,
- hist_shm,
- rt_zero_index,
- rt_shm,
- ))
-
- # indicate to caller that feed can be delivered to
- # remote requesting client since we've loaded history
- # data that can be used.
- some_data_ready.set()
-
- # history retreival loop depending on user interaction
- # and thus a small RPC-prot for remotely controllinlg
- # what data is loaded for viewing.
- await trio.sleep_forever()
-
- # load less history if no tsdb can be found
- elif (
- not tsdb_is_up
- and opened
- ):
- await basic_backfill(
- bus,
- mod,
- bfqsn,
- {
- 1: rt_shm,
- 60: hist_shm,
- },
- sample_stream,
- feed_is_live,
- )
- task_status.started((
- hist_zero_index,
- hist_shm,
- rt_zero_index,
- rt_shm,
- ))
- some_data_ready.set()
- await trio.sleep_forever()
-
-
async def allocate_persistent_feed(
bus: _FeedsBus,
sub_registered: trio.Event,
@@ -940,7 +227,7 @@ async def allocate_persistent_feed(
loglevel: str,
start_stream: bool = True,
- task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
+ task_status: TaskStatus[FeedInit] = trio.TASK_STATUS_IGNORED,
) -> None:
'''
@@ -970,19 +257,33 @@ async def allocate_persistent_feed(
some_data_ready = trio.Event()
feed_is_live = trio.Event()
- symstr = symstr.lower()
-
# establish broker backend quote stream by calling
- # ``stream_quotes()``, which is a required broker backend endpoint.
- init_msg, first_quote = await bus.nursery.start(
+ # ``stream_quotes()``, a required broker backend endpoint.
+ init_msgs: (
+ list[FeedInit] # new
+ | dict[str, dict[str, str]] # legacy / deprecated
+ )
+
+ # TODO: probably make a struct msg type for this as well
+ # since eventually we do want to have more efficient IPC..
+ first_quote: dict[str, Any]
+ (
+ init_msgs,
+ first_quote,
+ ) = await bus.nursery.start(
partial(
mod.stream_quotes,
send_chan=send,
feed_is_live=feed_is_live,
+
+ # NOTE / TODO: eventualy we may support providing more then
+ # one input here such that a datad daemon can multiplex
+ # multiple live feeds from one task, instead of getting
+ # a new request (and thus new task) for each subscription.
symbols=[symstr],
- loglevel=loglevel,
)
)
+
# TODO: this is indexed by symbol for now since we've planned (for
# some time) to expect backends to handle single
# ``.stream_quotes()`` calls with multiple symbols inputs to just
@@ -1005,21 +306,13 @@ async def allocate_persistent_feed(
# a small streaming machine around the remote feed which can then
# do the normal work of sampling and writing shm buffers
# (depending on if we want sampling done on the far end or not?)
- msg = init_msg[symstr]
-
- # the broker-specific fully qualified symbol name,
- # but ensure it is lower-cased for external use.
- bfqsn = msg['fqsn'].lower()
-
- # true fqsn including broker/provider suffix
- fqsn = '.'.join((bfqsn, brokername))
- # msg['fqsn'] = bfqsn
-
- symbol = Symbol.from_fqsn(
- fqsn=fqsn,
- info=msg['symbol_info'],
+ init: FeedInit = validate_backend(
+ mod,
+ [symstr],
+ init_msgs, # NOTE: only 1 should be delivered for now..
)
- assert symbol.type_key
+ mkt: MktPair = init.mkt_info
+ fqme: str = mkt.fqme
# HISTORY storage, run 2 tasks:
# - a history loader / maintainer
@@ -1040,30 +333,45 @@ async def allocate_persistent_feed(
manage_history,
mod,
bus,
- fqsn,
+ mkt,
some_data_ready,
feed_is_live,
)
# yield back control to starting nursery once we receive either
# some history or a real-time quote.
- log.info(f'waiting on history to load: {fqsn}')
+ log.info(f'waiting on history to load: {fqme}')
await some_data_ready.wait()
flume = Flume(
- symbol=symbol,
+
+ # TODO: we have to use this for now since currently the
+ # MktPair above doesn't render the correct output key it seems
+ # when we provide the `MktInfo` here?..?
+ mkt=mkt,
+
first_quote=first_quote,
_rt_shm_token=rt_shm.token,
_hist_shm_token=hist_shm.token,
izero_hist=izero_hist,
izero_rt=izero_rt,
+
+ # NOTE: some instruments don't have this provided,
+ # eg. commodities and forex from ib.
+ _has_vlm=init.shm_write_opts['has_vlm'],
)
- # for ambiguous names we simply apply the retreived
+ # for ambiguous names we simply register the
+ # flume for all possible name (sub) sets.
# feed to that name (for now).
- bus.feeds[symstr] = bus.feeds[bfqsn] = flume
+ bus.feeds.update({
+ symstr: flume,
+ fqme: flume,
+ mkt.bs_fqme: flume,
+ })
- task_status.started()
+ # signal the ``open_feed_bus()`` caller task to continue
+ task_status.started(init)
if not start_stream:
await trio.sleep_forever()
@@ -1072,9 +380,14 @@ async def allocate_persistent_feed(
# the backend will indicate when real-time quotes have begun.
await feed_is_live.wait()
- sum_tick_vlm: bool = init_msg.get(
- 'shm_write_opts', {}
- ).get('sum_tick_vlm', True)
+ # NOTE: if not configured otherwise, we always sum tick volume
+ # values in the OHLCV sampler.
+ sum_tick_vlm: bool = True
+ if init.shm_write_opts:
+ sum_tick_vlm: bool = init.shm_write_opts.get(
+ 'sum_tick_vlm',
+ True,
+ )
# NOTE: if no high-freq sampled data has (yet) been loaded,
# seed the buffer with a history datum - this is most handy
@@ -1095,7 +408,7 @@ async def allocate_persistent_feed(
rt_shm.array['time'][1] = ts + 1
elif hist_shm.array.size == 0:
- await tractor.breakpoint()
+ raise RuntimeError(f'History (1m) Shm for {fqme} is empty!?')
# wait the spawning parent task to register its subscriber
# send-stream entry before we start the sample loop.
@@ -1104,7 +417,7 @@ async def allocate_persistent_feed(
# start sample loop and shm incrementer task for OHLC style sampling
# at the above registered step periods.
try:
- log.info(f'Starting sampler task for {fqsn}')
+ log.info(f'Starting sampler task for {fqme}')
await sample_and_broadcast(
bus,
rt_shm,
@@ -1114,7 +427,7 @@ async def allocate_persistent_feed(
sum_tick_vlm
)
finally:
- log.warning(f'{fqsn} feed task terminated')
+ log.warning(f'{fqme} feed task terminated')
@tractor.context
@@ -1122,14 +435,14 @@ async def open_feed_bus(
ctx: tractor.Context,
brokername: str,
- symbols: list[str], # normally expected to the broker-specific fqsn
+ symbols: list[str], # normally expected to the broker-specific fqme
loglevel: str = 'error',
tick_throttle: Optional[float] = None,
start_stream: bool = True,
) -> dict[
- str, # fqsn
+ str, # fqme
tuple[dict, dict] # pair of dicts of the initmsg and first quotes
]:
'''
@@ -1138,10 +451,6 @@ async def open_feed_bus(
symbol.
'''
- # ensure that a quote feed stream which is pushing too fast doesn't
- # cause and overrun in the client.
- ctx._backpressure = True
-
if loglevel is None:
loglevel = tractor.current_actor().loglevel
@@ -1154,7 +463,6 @@ async def open_feed_bus(
# ensure we are who we think we are
servicename = tractor.current_actor().name
assert 'brokerd' in servicename
-
assert brokername in servicename
bus = get_feed_bus(brokername)
@@ -1164,9 +472,6 @@ async def open_feed_bus(
for symbol in symbols:
- # we always use lower case keys internally
- symbol = symbol.lower()
-
# if no cached feed for this symbol has been created for this
# brokerd yet, start persistent stream and shm writer task in
# service nursery
@@ -1195,45 +500,50 @@ async def open_feed_bus(
# XXX: ``.first_quote`` may be outdated here if this is secondary
# subscriber
- flume = bus.feeds[symbol]
- sym = flume.symbol
- bfqsn = sym.key
- fqsn = sym.fqsn # true fqsn
- assert bfqsn in fqsn and brokername in fqsn
+ flume: Flume = bus.feeds[symbol]
+ mkt: MktPair = flume.mkt
+ bs_fqme: str = mkt.bs_fqme
+ fqme: str = mkt.fqme
+ assert brokername in fqme
- if sym.suffix:
- bfqsn = fqsn.removesuffix(f'.{brokername}')
- log.warning(f'{brokername} expanded symbol {symbol} -> {bfqsn}')
+ if mkt.suffix:
+ log.warning(f'{brokername} expanded symbol {symbol} -> {bs_fqme}')
# pack for ``.started()`` sync msg
- flumes[fqsn] = flume
+ flumes[fqme] = flume
- # we use the broker-specific fqsn (bfqsn) for
- # the sampler subscription since the backend isn't (yet)
- # expected to append it's own name to the fqsn, so we filter
- # on keys which *do not* include that name (e.g .ib) .
- bus._subscribers.setdefault(bfqsn, set())
+ # we use the broker-specific fqme (bs_fqme) for the
+ # sampler subscription since the backend isn't (yet) expected to
+ # append it's own name to the fqme, so we filter on keys which
+ # *do not* include that name (e.g .ib) .
+ bus._subscribers.setdefault(bs_fqme, set())
# sync feed subscribers with flume handles
await ctx.started(
- {fqsn: flume.to_msg() for fqsn, flume in flumes.items()}
+ {fqme: flume.to_msg()
+ for fqme, flume in flumes.items()}
)
if not start_stream:
- log.warning(f'Not opening real-time stream for {fqsn}')
+ log.warning(f'Not opening real-time stream for {fqme}')
await trio.sleep_forever()
# real-time stream loop
async with (
- ctx.open_stream() as stream,
+ ctx.open_stream(
+ # NOTE we allow this since it's common to have the live
+ # quote feed actor's sampling task push faster then the
+ # the local UI-graphics code during startup.
+ # allow_overruns=True,
+ ) as stream,
):
local_subs: dict[str, set[tuple]] = {}
- for fqsn, flume in flumes.items():
+ for fqme, flume in flumes.items():
# re-send to trigger display loop cycle (necessary especially
# when the mkt is closed and no real-time messages are
# expected).
- await stream.send({fqsn: flume.first_quote})
+ await stream.send({fqme: flume.first_quote})
# set a common msg stream for all requested symbols
assert stream
@@ -1253,7 +563,6 @@ async def open_feed_bus(
# a max ``tick_throttle`` instantaneous rate.
send, recv = trio.open_memory_channel(2**10)
- ctx._backpressure = False
cs = await bus.start_task(
uniform_rate_send,
tick_throttle,
@@ -1276,9 +585,9 @@ async def open_feed_bus(
# maybe use the current task-id to key the sub list that's
# added / removed? Or maybe we can add a general
# pause-resume by sub-key api?
- bfqsn = fqsn.removesuffix(f'.{brokername}')
- local_subs.setdefault(bfqsn, set()).add(sub)
- bus.add_subs(bfqsn, {sub})
+ bs_fqme = fqme.removesuffix(f'.{brokername}')
+ local_subs.setdefault(bs_fqme, set()).add(sub)
+ bus.add_subs(bs_fqme, {sub})
# sync caller with all subs registered state
sub_registered.set()
@@ -1291,16 +600,16 @@ async def open_feed_bus(
async for msg in stream:
if msg == 'pause':
- for bfqsn, subs in local_subs.items():
+ for bs_fqme, subs in local_subs.items():
log.info(
- f'Pausing {bfqsn} feed for {uid}')
- bus.remove_subs(bfqsn, subs)
+ f'Pausing {bs_fqme} feed for {uid}')
+ bus.remove_subs(bs_fqme, subs)
elif msg == 'resume':
- for bfqsn, subs in local_subs.items():
+ for bs_fqme, subs in local_subs.items():
log.info(
- f'Resuming {bfqsn} feed for {uid}')
- bus.add_subs(bfqsn, subs)
+ f'Resuming {bs_fqme} feed for {uid}')
+ bus.add_subs(bs_fqme, subs)
else:
raise ValueError(msg)
@@ -1314,8 +623,8 @@ async def open_feed_bus(
cs.cancel()
# drop all subs for this task from the bus
- for bfqsn, subs in local_subs.items():
- bus.remove_subs(bfqsn, subs)
+ for bs_fqme, subs in local_subs.items():
+ bus.remove_subs(bs_fqme, subs)
class Feed(Struct):
@@ -1348,7 +657,12 @@ class Feed(Struct):
brokers: Sequence[str] | None = None,
) -> trio.abc.ReceiveChannel:
+ '''
+ Open steams to multiple data providers (``brokers``) and
+ multiplex their msgs onto a common mem chan for
+ only-requires-a-single-thread style consumption.
+ '''
if brokers is None:
mods = self.mods
brokers = list(self.mods)
@@ -1385,14 +699,6 @@ class Feed(Struct):
_max_sample_rate: int = 1
- # @property
- # def portal(self) -> tractor.Portal:
- # return self._portal
-
- # @property
- # def name(self) -> str:
- # return self.mod.name
-
async def pause(self) -> None:
for stream in set(self.streams.values()):
await stream.send('pause')
@@ -1442,7 +748,7 @@ async def install_brokerd_search(
@acm
async def maybe_open_feed(
- fqsns: list[str],
+ fqmes: list[str],
loglevel: Optional[str] = None,
**kwargs,
@@ -1457,25 +763,25 @@ async def maybe_open_feed(
in a tractor broadcast receiver.
'''
- fqsn = fqsns[0]
+ fqme = fqmes[0]
async with maybe_open_context(
acm_func=open_feed,
kwargs={
- 'fqsns': fqsns,
+ 'fqmes': fqmes,
'loglevel': loglevel,
'tick_throttle': kwargs.get('tick_throttle'),
# XXX: super critical to have bool defaults here XD
- 'backpressure': kwargs.get('backpressure', True),
+ 'allow_overruns': kwargs.get('allow_overruns', True),
'start_stream': kwargs.get('start_stream', True),
},
- key=fqsn,
+ key=fqme,
) as (cache_hit, feed):
if cache_hit:
- log.info(f'Using cached feed for {fqsn}')
+ log.info(f'Using cached feed for {fqme}')
# add a new broadcast subscription for the quote stream
# if this feed is likely already in use
@@ -1496,10 +802,10 @@ async def maybe_open_feed(
@acm
async def open_feed(
- fqsns: list[str],
+ fqmes: list[str],
loglevel: str | None = None,
- backpressure: bool = True,
+ allow_overruns: bool = True,
start_stream: bool = True,
tick_throttle: float | None = None, # Hz
@@ -1511,9 +817,9 @@ async def open_feed(
providers: dict[ModuleType, list[str]] = {}
feed = Feed()
- for fqsn in fqsns:
- brokername, key, suffix = unpack_fqsn(fqsn)
- bfqsn = fqsn.replace('.' + brokername, '')
+ for fqme in fqmes:
+ brokername, *_ = unpack_fqme(fqme)
+ bs_fqme = fqme.replace('.' + brokername, '')
try:
mod = get_brokermod(brokername)
@@ -1521,13 +827,13 @@ async def open_feed(
mod = get_ingestormod(brokername)
# built a per-provider map to instrument names
- providers.setdefault(mod, []).append(bfqsn)
+ providers.setdefault(mod, []).append(bs_fqme)
feed.mods[mod.name] = mod
# one actor per brokerd for now
brokerd_ctxs = []
- for brokermod, bfqsns in providers.items():
+ for brokermod, bfqmes in providers.items():
# if no `brokerd` for this backend exists yet we spawn
# a daemon actor for it.
@@ -1546,7 +852,7 @@ async def open_feed(
bus_ctxs: list[AsyncContextManager] = []
for (
portal,
- (brokermod, bfqsns),
+ (brokermod, bfqmes),
) in zip(portals, providers.items()):
feed.portals[brokermod] = portal
@@ -1571,10 +877,20 @@ async def open_feed(
portal.open_context(
open_feed_bus,
brokername=brokermod.name,
- symbols=bfqsns,
+ symbols=bfqmes,
loglevel=loglevel,
start_stream=start_stream,
tick_throttle=tick_throttle,
+
+ # XXX: super important to avoid
+ # the brokerd from some other
+ # backend overruning the task here
+ # bc some other brokerd took longer
+ # to startup before we hit the `.open_stream()`
+ # loop below XD .. really we should try to do each
+ # of these stream open sequences sequentially per
+ # backend? .. need some thot!
+ allow_overruns=True,
)
)
@@ -1583,19 +899,16 @@ async def open_feed(
async with (
gather_contexts(bus_ctxs) as ctxs,
):
- stream_ctxs = []
+ stream_ctxs: list[tractor.MsgStream] = []
for (
(ctx, flumes_msg_dict),
- (brokermod, bfqsns),
+ (brokermod, bfqmes),
) in zip(ctxs, providers.items()):
- # NOTE: do it asap to avoid overruns during multi-feed setup?
- ctx._backpressure = backpressure
-
- for fqsn, flume_msg in flumes_msg_dict.items():
+ for fqme, flume_msg in flumes_msg_dict.items():
flume = Flume.from_msg(flume_msg)
- assert flume.symbol.fqsn == fqsn
- feed.flumes[fqsn] = flume
+ # assert flume.mkt.fqme == fqme
+ feed.flumes[fqme] = flume
# TODO: do we need this?
flume.feed = feed
@@ -1613,29 +926,32 @@ async def open_feed(
stream_ctxs.append(
ctx.open_stream(
- # XXX: be explicit about stream backpressure
+ # XXX: be explicit about stream overruns
# since we should **never** overrun on feeds
# being too fast, which will pretty much
# always happen with HFT XD
- backpressure=backpressure,
+ allow_overruns=allow_overruns,
)
)
+ stream: tractor.MsgStream
+ brokermod: ModuleType
+ fqmes: list[str]
async with (
gather_contexts(stream_ctxs) as streams,
):
for (
stream,
- (brokermod, bfqsns),
+ (brokermod, bfqmes),
) in zip(streams, providers.items()):
assert stream
feed.streams[brokermod.name] = stream
- # apply `brokerd`-common steam to each flume
- # tracking a symbol from that provider.
- for fqsn, flume in feed.flumes.items():
- if brokermod.name in flume.symbol.brokers:
+ # apply `brokerd`-common stream to each flume
+ # tracking a live market feed from that provider.
+ for fqme, flume in feed.flumes.items():
+ if brokermod.name == flume.mkt.broker:
flume.stream = stream
assert len(feed.mods) == len(feed.portals) == len(feed.streams)
diff --git a/piker/data/flows.py b/piker/data/flows.py
index 9d8b3103..7776a602 100644
--- a/piker/data/flows.py
+++ b/piker/data/flows.py
@@ -15,7 +15,7 @@
# along with this program. If not, see .
"""
-abstractions for organizing, managing and generally operating-on
+Public abstractions for organizing, managing and generally operating-on
real-time data processing data-structures.
"Streams, flumes, cascades and flows.."
@@ -30,10 +30,9 @@ import tractor
import pendulum
import numpy as np
+from ..accounting import MktPair
+from ._util import log
from .types import Struct
-from ._source import (
- Symbol,
-)
from ._sharedmem import (
attach_shm_array,
ShmArray,
@@ -89,7 +88,7 @@ class Flume(Struct):
queuing properties.
'''
- symbol: Symbol
+ mkt: MktPair
first_quote: dict
_rt_shm_token: _Token
@@ -172,8 +171,9 @@ class Flume(Struct):
# TODO: get native msgspec decoding for these workinn
def to_msg(self) -> dict:
+
msg = self.to_dict()
- msg['symbol'] = msg['symbol'].to_dict()
+ msg['mkt'] = self.mkt.to_dict()
# can't serialize the stream or feed objects, it's expected
# you'll have a ref to it since this msg should be rxed on
@@ -183,12 +183,19 @@ class Flume(Struct):
return msg
@classmethod
- def from_msg(cls, msg: dict) -> dict:
- symbol = Symbol(**msg.pop('symbol'))
- return cls(
- symbol=symbol,
- **msg,
- )
+ def from_msg(
+ cls,
+ msg: dict,
+
+ ) -> dict:
+ '''
+ Load from an IPC msg presumably in either `dict` or
+ `msgspec.Struct` form.
+
+ '''
+ mkt_msg = msg.pop('mkt')
+ mkt = MktPair.from_msg(mkt_msg)
+ return cls(mkt=mkt, **msg)
def get_index(
self,
@@ -208,3 +215,23 @@ class Flume(Struct):
)
imx = times.shape[0] - 1
return min(first, imx)
+
+ # only set by external msg or creator, never
+ # manually!
+ _has_vlm: bool = True
+
+ def has_vlm(self) -> bool:
+
+ if not self._has_vlm:
+ return False
+
+ # make sure that the instrument supports volume history
+ # (sometimes this is not the case for some commodities and
+ # derivatives)
+ vlm: np.ndarray = self.rt_shm.array['volume']
+ return not bool(
+ np.all(np.isin(vlm, -1))
+ or np.all(np.isnan(vlm))
+ )
+
+
diff --git a/piker/data/history.py b/piker/data/history.py
new file mode 100644
index 00000000..ebfe8c65
--- /dev/null
+++ b/piker/data/history.py
@@ -0,0 +1,810 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+'''
+Historical data business logic for load, backfill and tsdb storage.
+
+'''
+from __future__ import annotations
+from collections import (
+ Counter,
+)
+from datetime import datetime
+from functools import partial
+import time
+from types import ModuleType
+from typing import (
+ Callable,
+ Optional,
+ TYPE_CHECKING,
+)
+
+import trio
+from trio_typing import TaskStatus
+import tractor
+import pendulum
+import numpy as np
+
+from .. import config
+from ..accounting import (
+ MktPair,
+)
+from ._util import (
+ log,
+)
+from ..service import (
+ check_for_service,
+)
+from ._sharedmem import (
+ maybe_open_shm_array,
+ ShmArray,
+ _secs_in_day,
+)
+from ._source import base_iohlc_dtype
+from ._sampling import (
+ open_sample_stream,
+)
+from ..brokers._util import (
+ DataUnavailable,
+)
+
+if TYPE_CHECKING:
+ from ..service.marketstore import Storage
+ from .feed import _FeedsBus
+
+
+def diff_history(
+ array: np.ndarray,
+ timeframe: int,
+ start_dt: datetime,
+ end_dt: datetime,
+ last_tsdb_dt: datetime | None = None
+
+) -> np.ndarray:
+
+ # no diffing with tsdb dt index possible..
+ if last_tsdb_dt is None:
+ return array
+
+ time = array['time']
+ return array[time > last_tsdb_dt.timestamp()]
+
+
+async def start_backfill(
+ mod: ModuleType,
+ mkt: MktPair,
+ shm: ShmArray,
+ timeframe: float,
+ sampler_stream: tractor.MsgStream,
+ feed_is_live: trio.Event,
+
+ last_tsdb_dt: Optional[datetime] = None,
+ storage: Optional[Storage] = None,
+ write_tsdb: bool = True,
+ tsdb_is_up: bool = False,
+
+ task_status: TaskStatus[tuple] = trio.TASK_STATUS_IGNORED,
+
+) -> int:
+
+ hist: Callable[
+ [int, datetime, datetime],
+ tuple[np.ndarray, str]
+ ]
+ config: dict[str, int]
+
+ async with mod.open_history_client(
+ mkt,
+ ) as (hist, config):
+ log.info(f'{mod} history client returned backfill config: {config}')
+
+ # get latest query's worth of history all the way
+ # back to what is recorded in the tsdb
+ array, start_dt, end_dt = await hist(
+ timeframe,
+ end_dt=None,
+ )
+ times = array['time']
+
+ # sample period step size in seconds
+ step_size_s = (
+ pendulum.from_timestamp(times[-1])
+ - pendulum.from_timestamp(times[-2])
+ ).seconds
+
+ # if the market is open (aka we have a live feed) but the
+ # history sample step index seems off we report the surrounding
+ # data and drop into a bp. this case shouldn't really ever
+ # happen if we're doing history retrieval correctly.
+ if (
+ step_size_s == 60
+ and feed_is_live.is_set()
+ ):
+ inow = round(time.time())
+ diff = inow - times[-1]
+ if abs(diff) > 60:
+ surr = array[-6:]
+ diff_in_mins = round(diff/60., ndigits=2)
+ log.warning(
+ f'STEP ERROR `{mkt.fqme}` for period {step_size_s}s:\n'
+ f'Off by `{diff}` seconds (or `{diff_in_mins}` mins)\n'
+ 'Surrounding 6 time stamps:\n'
+ f'{list(surr["time"])}\n'
+ 'Here is surrounding 6 samples:\n'
+ f'{surr}\nn'
+ )
+
+ # uncomment this for a hacker who wants to investigate
+ # this case manually..
+ # await tractor.breakpoint()
+
+ # frame's worth of sample-period-steps, in seconds
+ frame_size_s = len(array) * step_size_s
+
+ to_push = diff_history(
+ array,
+ timeframe,
+ start_dt,
+ end_dt,
+ last_tsdb_dt=last_tsdb_dt,
+ )
+
+ log.info(f'Pushing {to_push.size} to shm!')
+ shm.push(to_push, prepend=True)
+
+ # TODO: *** THIS IS A BUG ***
+ # we need to only broadcast to subscribers for this fqme..
+ # otherwise all fsps get reset on every chart..
+ await sampler_stream.send('broadcast_all')
+
+ # signal that backfilling to tsdb's end datum is complete
+ bf_done = trio.Event()
+
+ # let caller unblock and deliver latest history frame
+ task_status.started((
+ start_dt,
+ end_dt,
+ bf_done,
+ ))
+
+ # based on the sample step size, maybe load a certain amount history
+ if last_tsdb_dt is None:
+ if step_size_s not in (1, 60):
+ raise ValueError(
+ '`piker` only needs to support 1m and 1s sampling '
+ 'but ur api is trying to deliver a longer '
+ f'timeframe of {step_size_s} seconds..\n'
+ 'So yuh.. dun do dat brudder.'
+ )
+
+ # when no tsdb "last datum" is provided, we just load
+ # some near-term history.
+ periods = {
+ 1: {'days': 1},
+ 60: {'days': 14},
+ }
+
+ if tsdb_is_up:
+ # do a decently sized backfill and load it into storage.
+ periods = {
+ 1: {'days': 6},
+ 60: {'years': 6},
+ }
+
+ period_duration = periods[step_size_s]
+
+ # NOTE: manually set the "latest" datetime which we intend to
+ # backfill history "until" so as to adhere to the history
+ # settings above when the tsdb is detected as being empty.
+ last_tsdb_dt = start_dt.subtract(**period_duration)
+
+ # configure async query throttling
+ # rate = config.get('rate', 1)
+ # XXX: legacy from ``trimeter`` code but unsupported now.
+ # erlangs = config.get('erlangs', 1)
+
+ # avoid duplicate history frames with a set of datetime frame
+ # starts and associated counts of how many duplicates we see
+ # per time stamp.
+ starts: Counter[datetime] = Counter()
+
+ # inline sequential loop where we simply pass the
+ # last retrieved start dt to the next request as
+ # it's end dt.
+ while end_dt > last_tsdb_dt:
+ log.debug(
+ f'Requesting {step_size_s}s frame ending in {start_dt}'
+ )
+
+ try:
+ array, next_start_dt, end_dt = await hist(
+ timeframe,
+ end_dt=start_dt,
+ )
+
+ # broker says there never was or is no more history to pull
+ except DataUnavailable:
+ log.warning(
+ f'NO-MORE-DATA: backend {mod.name} halted history!?'
+ )
+
+ # ugh, what's a better way?
+ # TODO: fwiw, we probably want a way to signal a throttle
+ # condition (eg. with ib) so that we can halt the
+ # request loop until the condition is resolved?
+ return
+
+ if (
+ next_start_dt in starts
+ and starts[next_start_dt] <= 6
+ ):
+ start_dt = min(starts)
+ log.warning(
+ f"{mkt.fqme}: skipping duplicate frame @ {next_start_dt}"
+ )
+ starts[start_dt] += 1
+ continue
+
+ elif starts[next_start_dt] > 6:
+ log.warning(
+ f'NO-MORE-DATA: backend {mod.name} before {next_start_dt}?'
+ )
+ return
+
+ # only update new start point if not-yet-seen
+ start_dt = next_start_dt
+ starts[start_dt] += 1
+
+ assert array['time'][0] == start_dt.timestamp()
+
+ diff = end_dt - start_dt
+ frame_time_diff_s = diff.seconds
+ expected_frame_size_s = frame_size_s + step_size_s
+
+ if frame_time_diff_s > expected_frame_size_s:
+
+ # XXX: query result includes a start point prior to our
+ # expected "frame size" and thus is likely some kind of
+ # history gap (eg. market closed period, outage, etc.)
+ # so just report it to console for now.
+ log.warning(
+ f'History frame ending @ {end_dt} appears to have a gap:\n'
+ f'{diff} ~= {frame_time_diff_s} seconds'
+ )
+
+ to_push = diff_history(
+ array,
+ timeframe,
+ start_dt,
+ end_dt,
+ last_tsdb_dt=last_tsdb_dt,
+ )
+ ln = len(to_push)
+ if ln:
+ log.info(f'{ln} bars for {start_dt} -> {end_dt}')
+
+ else:
+ log.warning(
+ f'{ln} BARS TO PUSH after diff?!: {start_dt} -> {end_dt}'
+ )
+
+ # bail gracefully on shm allocation overrun/full condition
+ try:
+ shm.push(to_push, prepend=True)
+ except ValueError:
+ log.info(
+ f'Shm buffer overrun on: {start_dt} -> {end_dt}?'
+ )
+ # can't push the entire frame? so
+ # push only the amount that can fit..
+ break
+
+ log.info(
+ f'Shm pushed {ln} frame:\n'
+ f'{start_dt} -> {end_dt}'
+ )
+
+ if (
+ storage is not None
+ and write_tsdb
+ ):
+ log.info(
+ f'Writing {ln} frame to storage:\n'
+ f'{start_dt} -> {end_dt}'
+ )
+
+ if mkt.dst.atype not in {'crypto', 'crypto_currency'}:
+ # for now, our table key schema is not including
+ # the dst[/src] source asset token.
+ col_sym_key: str = mkt.get_fqme(
+ delim_char='',
+ without_src=True,
+ )
+ else:
+ col_sym_key: str = mkt.get_fqme(delim_char='')
+
+ await storage.write_ohlcv(
+ col_sym_key,
+ to_push,
+ timeframe,
+ )
+
+ # TODO: can we only trigger this if the respective
+ # history in "in view"?!?
+
+ # XXX: extremely important, there can be no checkpoints
+ # in the block above to avoid entering new ``frames``
+ # values while we're pipelining the current ones to
+ # memory...
+ await sampler_stream.send('broadcast_all')
+
+ # short-circuit (for now)
+ bf_done.set()
+
+
+async def basic_backfill(
+ bus: _FeedsBus,
+ mod: ModuleType,
+ mkt: MktPair,
+ shms: dict[int, ShmArray],
+ sampler_stream: tractor.MsgStream,
+ feed_is_live: trio.Event,
+
+) -> None:
+
+ # do a legacy incremental backfill from the provider.
+ log.info('No TSDB (marketstored) found, doing basic backfill..')
+
+ # start history backfill task ``backfill_bars()`` is
+ # a required backend func this must block until shm is
+ # filled with first set of ohlc bars
+ for timeframe, shm in shms.items():
+ try:
+ await bus.nursery.start(
+ partial(
+ start_backfill,
+ mod,
+ mkt,
+ shm,
+ timeframe,
+ sampler_stream,
+ feed_is_live,
+ )
+ )
+ except DataUnavailable:
+ # XXX: timeframe not supported for backend
+ continue
+
+
+async def tsdb_backfill(
+ mod: ModuleType,
+ marketstore: ModuleType,
+ bus: _FeedsBus,
+ storage: Storage,
+ mkt: MktPair,
+ shms: dict[int, ShmArray],
+ sampler_stream: tractor.MsgStream,
+ feed_is_live: trio.Event,
+
+ task_status: TaskStatus[
+ tuple[ShmArray, ShmArray]
+ ] = trio.TASK_STATUS_IGNORED,
+
+) -> None:
+
+ # TODO: this should be used verbatim for the pure
+ # shm backfiller approach below.
+ dts_per_tf: dict[int, datetime] = {}
+ fqme: str = mkt.fqme
+
+ # start history anal and load missing new data via backend.
+ for timeframe, shm in shms.items():
+ # loads a (large) frame of data from the tsdb depending
+ # on the db's query size limit.
+ tsdb_history, first_tsdb_dt, last_tsdb_dt = await storage.load(
+ fqme,
+ timeframe=timeframe,
+ )
+
+ try:
+ (
+ latest_start_dt,
+ latest_end_dt,
+ bf_done,
+ ) = await bus.nursery.start(
+ partial(
+ start_backfill,
+ mod,
+ mkt,
+ shm,
+ timeframe,
+ sampler_stream,
+ feed_is_live,
+
+ last_tsdb_dt=last_tsdb_dt,
+ tsdb_is_up=True,
+ storage=storage,
+ )
+ )
+ except DataUnavailable:
+ # XXX: timeframe not supported for backend
+ dts_per_tf[timeframe] = (
+ tsdb_history,
+ last_tsdb_dt,
+ None,
+ None,
+ None,
+ )
+ continue
+
+ # tsdb_history = series.get(timeframe)
+ dts_per_tf[timeframe] = (
+ tsdb_history,
+ last_tsdb_dt,
+ latest_start_dt,
+ latest_end_dt,
+ bf_done,
+ )
+
+ # if len(hist_shm.array) < 2:
+ # TODO: there's an edge case here to solve where if the last
+ # frame before market close (at least on ib) was pushed and
+ # there was only "1 new" row pushed from the first backfill
+ # query-iteration, then the sample step sizing calcs will
+ # break upstream from here since you can't diff on at least
+ # 2 steps... probably should also add logic to compute from
+ # the tsdb series and stash that somewhere as meta data on
+ # the shm buffer?.. no se.
+
+ # unblock the feed bus management task
+ # assert len(shms[1].array)
+ task_status.started()
+
+ async def back_load_from_tsdb(
+ timeframe: int,
+ shm: ShmArray,
+ ):
+ (
+ tsdb_history,
+ last_tsdb_dt,
+ latest_start_dt,
+ latest_end_dt,
+ bf_done,
+ ) = dts_per_tf[timeframe]
+
+ # sync to backend history task's query/load completion
+ if bf_done:
+ await bf_done.wait()
+
+ # TODO: eventually it'd be nice to not require a shm array/buffer
+ # to accomplish this.. maybe we can do some kind of tsdb direct to
+ # graphics format eventually in a child-actor?
+
+ # TODO: see if there's faster multi-field reads:
+ # https://numpy.org/doc/stable/user/basics.rec.html#accessing-multiple-fields
+ # re-index with a `time` and index field
+ prepend_start = shm._first.value
+ array = shm.array
+ if len(array):
+ shm_last_dt = pendulum.from_timestamp(shm.array[0]['time'])
+ else:
+ shm_last_dt = None
+
+ if last_tsdb_dt:
+ assert shm_last_dt >= last_tsdb_dt
+
+ # do diff against start index of last frame of history and only
+ # fill in an amount of datums from tsdb allows for most recent
+ # to be loaded into mem *before* tsdb data.
+ if (
+ last_tsdb_dt
+ and latest_start_dt
+ ):
+ backfilled_size_s = (
+ latest_start_dt - last_tsdb_dt
+ ).seconds
+ # if the shm buffer len is not large enough to contain
+ # all missing data between the most recent backend-queried frame
+ # and the most recent dt-index in the db we warn that we only
+ # want to load a portion of the next tsdb query to fill that
+ # space.
+ log.info(
+ f'{backfilled_size_s} seconds worth of {timeframe}s loaded'
+ )
+
+ # Load TSDB history into shm buffer (for display) if there is
+ # remaining buffer space.
+
+ if (
+ len(tsdb_history)
+ ):
+ # load the first (smaller) bit of history originally loaded
+ # above from ``Storage.load()``.
+ to_push = tsdb_history[-prepend_start:]
+ shm.push(
+ to_push,
+
+ # insert the history pre a "days worth" of samples
+ # to leave some real-time buffer space at the end.
+ prepend=True,
+ # update_first=False,
+ # start=prepend_start,
+ field_map=marketstore.ohlc_key_map,
+ )
+
+ tsdb_last_frame_start = tsdb_history['Epoch'][0]
+
+ if timeframe == 1:
+ times = shm.array['time']
+ assert (times[1] - times[0]) == 1
+
+ # load as much from storage into shm possible (depends on
+ # user's shm size settings).
+ while shm._first.value > 0:
+
+ tsdb_history = await storage.read_ohlcv(
+ fqme,
+ timeframe=timeframe,
+ end=tsdb_last_frame_start,
+ )
+
+ # empty query
+ if not len(tsdb_history):
+ break
+
+ next_start = tsdb_history['Epoch'][0]
+ if next_start >= tsdb_last_frame_start:
+ # no earlier data detected
+ break
+ else:
+ tsdb_last_frame_start = next_start
+
+ prepend_start = shm._first.value
+ to_push = tsdb_history[-prepend_start:]
+
+ # insert the history pre a "days worth" of samples
+ # to leave some real-time buffer space at the end.
+ shm.push(
+ to_push,
+ prepend=True,
+ field_map=marketstore.ohlc_key_map,
+ )
+ log.info(f'Loaded {to_push.shape} datums from storage')
+
+ # manually trigger step update to update charts/fsps
+ # which need an incremental update.
+ # NOTE: the way this works is super duper
+ # un-intuitive right now:
+ # - the broadcaster fires a msg to the fsp subsystem.
+ # - fsp subsys then checks for a sample step diff and
+ # possibly recomputes prepended history.
+ # - the fsp then sends back to the parent actor
+ # (usually a chart showing graphics for said fsp)
+ # which tells the chart to conduct a manual full
+ # graphics loop cycle.
+ await sampler_stream.send('broadcast_all')
+
+ # TODO: write new data to tsdb to be ready to for next read.
+
+ # backload from db (concurrently per timeframe) once backfilling of
+ # recent dat a loaded from the backend provider (see
+ # ``bf_done.wait()`` call).
+ async with trio.open_nursery() as nurse:
+ for timeframe, shm in shms.items():
+ nurse.start_soon(
+ back_load_from_tsdb,
+ timeframe,
+ shm,
+ )
+
+
+async def manage_history(
+ mod: ModuleType,
+ bus: _FeedsBus,
+ mkt: MktPair,
+ some_data_ready: trio.Event,
+ feed_is_live: trio.Event,
+ timeframe: float = 60, # in seconds
+
+ task_status: TaskStatus[
+ tuple[ShmArray, ShmArray]
+ ] = trio.TASK_STATUS_IGNORED,
+
+) -> None:
+ '''
+ Load and manage historical data including the loading of any
+ available series from `marketstore` as well as conducting real-time
+ update of both that existing db and the allocated shared memory
+ buffer.
+
+ '''
+
+ # TODO: is there a way to make each shm file key
+ # actor-tree-discovery-addr unique so we avoid collisions
+ # when doing tests which also allocate shms for certain instruments
+ # that may be in use on the system by some other running daemons?
+ # from tractor._state import _runtime_vars
+ # port = _runtime_vars['_root_mailbox'][1]
+
+ uid = tractor.current_actor().uid
+ name, uuid = uid
+ service = name.rstrip(f'.{mod.name}')
+
+ fqme: str = mkt.get_fqme(delim_char='')
+
+ # (maybe) allocate shm array for this broker/symbol which will
+ # be used for fast near-term history capture and processing.
+ hist_shm, opened = maybe_open_shm_array(
+ key=f'piker.{service}[{uuid[:16]}].{fqme}.hist',
+
+ # use any broker defined ohlc dtype:
+ dtype=getattr(mod, '_ohlc_dtype', base_iohlc_dtype),
+
+ # we expect the sub-actor to write
+ readonly=False,
+ )
+ hist_zero_index = hist_shm.index - 1
+
+ # TODO: history validation
+ if not opened:
+ raise RuntimeError(
+ "Persistent shm for sym was already open?!"
+ )
+
+ rt_shm, opened = maybe_open_shm_array(
+ key=f'piker.{service}[{uuid[:16]}].{fqme}.rt',
+
+ # use any broker defined ohlc dtype:
+ dtype=getattr(mod, '_ohlc_dtype', base_iohlc_dtype),
+
+ # we expect the sub-actor to write
+ readonly=False,
+ size=3*_secs_in_day,
+ )
+
+ # (for now) set the rt (hft) shm array with space to prepend
+ # only a few days worth of 1s history.
+ days = 2
+ start_index = days*_secs_in_day
+ rt_shm._first.value = start_index
+ rt_shm._last.value = start_index
+ rt_zero_index = rt_shm.index - 1
+
+ if not opened:
+ raise RuntimeError(
+ "Persistent shm for sym was already open?!"
+ )
+
+ # register 1s and 1m buffers with the global incrementer task
+ async with open_sample_stream(
+ period_s=1.,
+ shms_by_period={
+ 1.: rt_shm.token,
+ 60.: hist_shm.token,
+ },
+
+ # NOTE: we want to only open a stream for doing broadcasts on
+ # backfill operations, not receive the sample index-stream
+ # (since there's no code in this data feed layer that needs to
+ # consume it).
+ open_index_stream=True,
+ sub_for_broadcasts=False,
+
+ ) as sample_stream:
+
+ open_history_client = getattr(
+ mod,
+ 'open_history_client',
+ None,
+ )
+ assert open_history_client
+
+ tsdb_is_up: bool = False
+ try_remote_tsdb: bool = False
+
+ conf, path = config.load('conf', touch_if_dne=True)
+ net = conf.get('network')
+ if net:
+ tsdbconf = net.get('tsdb')
+
+ # lookup backend tsdb module by name and load any user service
+ # settings for connecting to the tsdb service.
+ tsdb_backend: str = tsdbconf.pop('backend')
+ tsdb_host: str = tsdbconf['host']
+
+ # TODO: import and load storagemod by name
+ # mod = get_storagemod(tsdb_backend)
+ from ..service import marketstore
+ if tsdb_host == 'localhost':
+ log.info('Scanning for existing `{tsbd_backend}`')
+ tsdb_is_up: bool = await check_for_service(f'{tsdb_backend}d')
+
+ else:
+ try_remote_tsdb: bool = True
+
+ if (
+ tsdb_is_up
+ or try_remote_tsdb
+ and (
+ opened
+ and open_history_client
+ )
+ ):
+ log.info('Found existing `marketstored`')
+
+ async with (
+ marketstore.open_storage_client(
+ **tsdbconf
+ ) as storage,
+ ):
+ # TODO: drop returning the output that we pass in?
+ await bus.nursery.start(
+ tsdb_backfill,
+ mod,
+ marketstore,
+ bus,
+ storage,
+ mkt,
+ {
+ 1: rt_shm,
+ 60: hist_shm,
+ },
+ sample_stream,
+ feed_is_live,
+ )
+
+ # yield back after client connect with filled shm
+ task_status.started((
+ hist_zero_index,
+ hist_shm,
+ rt_zero_index,
+ rt_shm,
+ ))
+
+ # indicate to caller that feed can be delivered to
+ # remote requesting client since we've loaded history
+ # data that can be used.
+ some_data_ready.set()
+
+ # history retreival loop depending on user interaction
+ # and thus a small RPC-prot for remotely controllinlg
+ # what data is loaded for viewing.
+ await trio.sleep_forever()
+
+ # load less history if no tsdb can be found
+ elif (
+ not tsdb_is_up
+ and opened
+ ):
+ await basic_backfill(
+ bus,
+ mod,
+ mkt,
+ {
+ 1: rt_shm,
+ 60: hist_shm,
+ },
+ sample_stream,
+ feed_is_live,
+ )
+ task_status.started((
+ hist_zero_index,
+ hist_shm,
+ rt_zero_index,
+ rt_shm,
+ ))
+ some_data_ready.set()
+ await trio.sleep_forever()
diff --git a/piker/data/ingest.py b/piker/data/ingest.py
index afb5fc4a..c6f50135 100644
--- a/piker/data/ingest.py
+++ b/piker/data/ingest.py
@@ -23,7 +23,7 @@ Api layer likely in here...
from types import ModuleType
from importlib import import_module
-from ..log import get_logger
+from ._util import get_logger
log = get_logger(__name__)
diff --git a/piker/data/types.py b/piker/data/types.py
index 1359526c..7a3bc6bb 100644
--- a/piker/data/types.py
+++ b/piker/data/types.py
@@ -19,7 +19,6 @@ Built-in (extension) types.
"""
import sys
-from typing import Optional
from pprint import pformat
import msgspec
@@ -59,7 +58,7 @@ class Struct(
def copy(
self,
- update: Optional[dict] = None,
+ update: dict | None = None,
) -> msgspec.Struct:
'''
@@ -80,9 +79,11 @@ class Struct(
msgspec.msgpack.Encoder().encode(self)
)
+ # NOTE XXX: this won't work on frozen types!
+ # use ``.copy()`` above in such cases.
def typecast(
self,
- # fields: Optional[list[str]] = None,
+ # fields: list[str] | None = None,
) -> None:
for fname, ftype in self.__annotations__.items():
setattr(self, fname, ftype(getattr(self, fname)))
diff --git a/piker/data/validate.py b/piker/data/validate.py
new file mode 100644
index 00000000..321b4296
--- /dev/null
+++ b/piker/data/validate.py
@@ -0,0 +1,264 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+'''
+Data feed synchronization protocols, init msgs, and general
+data-provider-backend-agnostic schema definitions.
+
+'''
+from decimal import Decimal
+from pprint import pformat
+from types import ModuleType
+from typing import (
+ Any,
+ Callable,
+)
+
+from msgspec import field
+
+from .types import Struct
+from ..accounting import (
+ Asset,
+ MktPair,
+)
+from ._util import log
+
+
+class FeedInitializationError(ValueError):
+ '''
+ Live data feed setup failed due to API / msg incompatiblity!
+
+ '''
+
+
+class FeedInit(Struct, frozen=True):
+ '''
+ A stringent data provider startup msg schema validator.
+
+ The fields defined here are matched with those absolutely required
+ from each backend broker/data provider.
+
+ '''
+ mkt_info: MktPair
+
+ # NOTE: only field we use rn in ``.data.feed``
+ # TODO: maybe make a SamplerConfig(Struct)?
+ shm_write_opts: dict[str, Any] = field(
+ default_factory=lambda: {
+ 'has_vlm': True,
+ 'sum_tick_vlm': True,
+ })
+
+# XXX: we group backend endpoints into 3
+# groups to determine "degrees" of functionality.
+_eps: dict[str, list[str]] = {
+
+ # basic API `Client` layer
+ 'middleware': [
+ 'get_client',
+ ],
+
+ # (live) data streaming / loading / search
+ 'datad': [
+ 'get_mkt_info',
+ 'open_history_client',
+ 'open_symbol_search',
+ 'stream_quotes',
+ ],
+
+ # live order control and trading
+ 'brokerd': [
+ 'trades_dialogue',
+ # TODO: ledger normalizer helper?
+ # norm_trades(records: dict[str, Any]) -> TransactionLedger)
+ ],
+}
+
+
+def validate_backend(
+ mod: ModuleType,
+ syms: list[str],
+ init_msgs: list[FeedInit] | dict[str, dict[str, Any]],
+
+ # TODO: do a module method scan and report mismatches.
+ check_eps: bool = False,
+
+ api_log_msg_level: str = 'critical'
+
+) -> FeedInit:
+ '''
+ Fail on malformed live quotes feed config/init or warn on changes
+ that haven't been implemented by this backend yet.
+
+ '''
+ for daemon_name, eps in _eps.items():
+ for name in eps:
+ ep: Callable = getattr(
+ mod,
+ name,
+ None,
+ )
+ if ep is None:
+ log.warning(
+ f'Provider backend {mod.name} is missing '
+ f'{daemon_name} support :(\n'
+ f'The following endpoint is missing: {name}'
+ )
+
+ inits: list[
+ FeedInit | dict[str, Any]
+ ] = init_msgs
+
+ # convert to list if from old dict-style
+ if isinstance(init_msgs, dict):
+ inits = list(init_msgs.values())
+
+ init: FeedInit | dict[str, Any]
+ for i, init in enumerate(inits):
+
+ # XXX: eventually this WILL NOT necessarily be true.
+ if i > 0:
+ assert not len(init_msgs) == 1
+ if isinstance(init_msgs, dict):
+ keys: set = set(init_msgs.keys()) - set(syms)
+ raise FeedInitializationError(
+ 'TOO MANY INIT MSGS!\n'
+ f'Unexpected keys: {keys}\n'
+ 'ALL MSGS:\n'
+ f'{pformat(init_msgs)}\n'
+ )
+ else:
+ raise FeedInitializationError(
+ 'TOO MANY INIT MSGS!\n'
+ f'{pformat(init_msgs)}\n'
+ )
+
+ # TODO: once all backends are updated we can remove this branching.
+ rx_msg: bool = False
+ warn_msg: str = ''
+ if not isinstance(init, FeedInit):
+ warn_msg += (
+ '\n'
+ '--------------------------\n'
+ ':::DEPRECATED API STYLE:::\n'
+ '--------------------------\n'
+ f'`{mod.name}.stream_quotes()` should deliver '
+ '`.started(FeedInit)`\n'
+ f'|-> CURRENTLY it is using DEPRECATED `.started(dict)` style!\n'
+ f'|-> SEE `FeedInit` in `piker.data.validate`\n'
+ '--------------------------------------------\n'
+ )
+ else:
+ rx_msg = True
+
+ # verify feed init state / schema
+ bs_fqme: str # backend specific fqme
+ mkt: MktPair
+
+ match init:
+
+ # backend is using old dict msg delivery
+ case {
+ 'symbol_info': dict(symbol_info),
+ 'fqsn': bs_fqme,
+ } | {
+ 'mkt_info': dict(symbol_info),
+ 'fqsn': bs_fqme,
+ }:
+ symbol_info: dict
+ warn_msg += (
+ 'It may also be still using the legacy `Symbol` style API\n'
+ 'IT SHOULD BE PORTED TO THE NEW '
+ '`.accounting._mktinfo.MktPair`\n'
+ 'STATTTTT!!!\n'
+ )
+
+ # XXX use default legacy (aka discrete precision) mkt
+ # price/size_ticks if none delivered.
+ price_tick = symbol_info.get(
+ 'price_tick_size',
+ Decimal('0.01'),
+ )
+ size_tick = symbol_info.get(
+ 'lot_tick_size',
+ Decimal('1'),
+ )
+ bs_mktid = init.get('bs_mktid') or bs_fqme
+ mkt = MktPair.from_fqme(
+ fqme=f'{bs_fqme}.{mod.name}',
+
+ price_tick=price_tick,
+ size_tick=size_tick,
+
+ bs_mktid=str(bs_mktid),
+ _atype=symbol_info['asset_type']
+ )
+
+ # backend is using new `MktPair` but not entirely
+ case {
+ 'mkt_info': MktPair(
+ dst=Asset(),
+ ) as mkt,
+ 'fqsn': bs_fqme,
+ }:
+ warn_msg += (
+ f'{mod.name} in API compat transition?\n'
+ "It's half dict, half man..\n"
+ '-------------------------------------\n'
+ )
+
+ case FeedInit(
+ mkt_info=MktPair(dst=Asset()) as mkt,
+ shm_write_opts=dict(shm_opts),
+ ) as init:
+ name: str = mod.name
+ log.info(
+ f"{name}'s `MktPair` info:\n"
+ f'{pformat(mkt.to_dict())}\n'
+ f'shm conf: {pformat(shm_opts)}\n'
+ )
+
+ case _:
+ raise FeedInitializationError(init)
+
+ # build a msg if we received a dict for input.
+ if not rx_msg:
+ assert bs_fqme in mkt.fqme
+ init = FeedInit(
+ mkt_info=mkt,
+ shm_write_opts=init.get('shm_write_opts'),
+ )
+
+ # `MktPair` value audits
+ mkt = init.mkt_info
+ assert mkt.type_key
+
+ # backend is using new `MktPair` but not embedded `Asset` types
+ # for the .src/.dst..
+ if not isinstance(mkt.src, Asset):
+ warn_msg += (
+ f'ALSO, {mod.name.upper()} should try to deliver\n'
+ 'the new `MktPair.src: Asset` field!\n'
+ '-----------------------------------------------\n'
+ )
+
+ # complain about any non-idealities
+ if warn_msg:
+ # TODO: would be nice to register an API_COMPAT or something in
+ # maybe cyan for this in general throughput piker no?
+ logmeth = getattr(log, api_log_msg_level)
+ logmeth(warn_msg)
+
+ return init.copy()
diff --git a/piker/fsp/_api.py b/piker/fsp/_api.py
index 9654a2a1..11d1e7dc 100644
--- a/piker/fsp/_api.py
+++ b/piker/fsp/_api.py
@@ -174,15 +174,6 @@ def fsp(
return Fsp(wrapped, outputs=(wrapped.__name__,))
-def mk_fsp_shm_key(
- sym: str,
- target: Fsp
-
-) -> str:
- uid = tractor.current_actor().uid
- return f'{sym}.fsp.{target.name}.{".".join(uid)}'
-
-
def maybe_mk_fsp_shm(
sym: str,
target: Fsp,
@@ -206,7 +197,10 @@ def maybe_mk_fsp_shm(
[(field_name, float) for field_name in target.outputs]
)
- key = mk_fsp_shm_key(sym, target)
+ # (attempt to) uniquely key the fsp shm buffers
+ actor_name, uuid = tractor.current_actor().uid
+ uuid_snip: str = uuid[:16]
+ key: str = f'piker.{actor_name}[{uuid_snip}].{sym}.{target.name}'
shm, opened = maybe_open_shm_array(
key,
diff --git a/piker/fsp/_engine.py b/piker/fsp/_engine.py
index 37852cfc..9a6ebddb 100644
--- a/piker/fsp/_engine.py
+++ b/piker/fsp/_engine.py
@@ -45,7 +45,7 @@ from ..data._sampling import (
_default_delay_s,
open_sample_stream,
)
-from ..data._source import Symbol
+from ..accounting import MktPair
from ._api import (
Fsp,
_load_builtins,
@@ -85,7 +85,7 @@ async def filter_quotes_by_sym(
async def fsp_compute(
- symbol: Symbol,
+ mkt: MktPair,
flume: Flume,
quote_stream: trio.abc.ReceiveChannel,
@@ -104,14 +104,14 @@ async def fsp_compute(
disabled=True
)
- fqsn = symbol.front_fqsn()
+ fqme = mkt.fqme
out_stream = func(
# TODO: do we even need this if we do the feed api right?
# shouldn't a local stream do this before we get a handle
# to the async iterable? it's that or we do some kinda
# async itertools style?
- filter_quotes_by_sym(fqsn, quote_stream),
+ filter_quotes_by_sym(fqme, quote_stream),
# XXX: currently the ``ohlcv`` arg
flume.rt_shm,
@@ -271,7 +271,7 @@ async def cascade(
ctx: tractor.Context,
# data feed key
- fqsn: str,
+ fqme: str,
src_shm_token: dict,
dst_shm_token: tuple[str, np.dtype],
@@ -329,7 +329,7 @@ async def cascade(
# open a data feed stream with requested broker
feed: Feed
async with data.feed.maybe_open_feed(
- [fqsn],
+ [fqme],
# TODO throttle tick outputs from *this* daemon since
# it'll emit tons of ticks due to the throttle only
@@ -339,8 +339,8 @@ async def cascade(
) as feed:
- flume = feed.flumes[fqsn]
- symbol = flume.symbol
+ flume = feed.flumes[fqme]
+ mkt = flume.mkt
assert src.token == flume.rt_shm.token
profiler(f'{func}: feed up')
@@ -352,7 +352,7 @@ async def cascade(
fsp_target = partial(
fsp_compute,
- symbol=symbol,
+ mkt=mkt,
flume=flume,
quote_stream=flume.stream,
diff --git a/piker/service/__init__.py b/piker/service/__init__.py
index 3b9767cd..e6a17da0 100644
--- a/piker/service/__init__.py
+++ b/piker/service/__init__.py
@@ -32,8 +32,6 @@ from ._registry import ( # noqa
)
from ._daemon import ( # noqa
maybe_spawn_daemon,
- spawn_brokerd,
- maybe_spawn_brokerd,
spawn_emsd,
maybe_open_emsd,
)
@@ -43,6 +41,10 @@ from ._actor_runtime import (
open_pikerd,
get_tractor_runtime_kwargs,
)
+from ..brokers._daemon import (
+ spawn_brokerd,
+ maybe_spawn_brokerd,
+)
__all__ = [
diff --git a/piker/service/_actor_runtime.py b/piker/service/_actor_runtime.py
index b92ad221..78938a5f 100644
--- a/piker/service/_actor_runtime.py
+++ b/piker/service/_actor_runtime.py
@@ -19,8 +19,6 @@
"""
from __future__ import annotations
-from pprint import pformat
-from functools import partial
import os
from typing import (
Optional,
@@ -34,8 +32,7 @@ from contextlib import (
import tractor
import trio
-from ..log import (
- get_logger,
+from ._util import (
get_console_log,
)
from ._mngr import (
@@ -47,8 +44,6 @@ from ._registry import ( # noqa
open_registry,
)
-log = get_logger(__name__)
-
def get_tractor_runtime_kwargs() -> dict[str, Any]:
'''
@@ -135,8 +130,11 @@ _root_dname = 'pikerd'
_root_modules = [
__name__,
'piker.service._daemon',
+ 'piker.brokers._daemon',
+
'piker.clearing._ems',
'piker.clearing._client',
+
'piker.data._sampling',
]
@@ -151,11 +149,6 @@ async def open_pikerd(
debug_mode: bool = False,
registry_addr: None | tuple[str, int] = None,
- # db init flags
- tsdb: bool = False,
- es: bool = False,
- drop_root_perms_for_ahab: bool = True,
-
**kwargs,
) -> Services:
@@ -185,57 +178,16 @@ async def open_pikerd(
trio.open_nursery() as service_nursery,
):
if root_actor.accept_addr != reg_addr:
- raise RuntimeError(f'Daemon failed to bind on {reg_addr}!?')
+ raise RuntimeError(
+ f'`pikerd` failed to bind on {reg_addr}!\n'
+ 'Maybe you have another daemon already running?'
+ )
# assign globally for future daemon/task creation
Services.actor_n = actor_nursery
Services.service_n = service_nursery
Services.debug_mode = debug_mode
- if tsdb:
- from ._ahab import start_ahab
- from .marketstore import start_marketstore
-
- log.info('Spawning `marketstore` supervisor')
- ctn_ready, config, (cid, pid) = await service_nursery.start(
- partial(
- start_ahab,
- 'marketstored',
- start_marketstore,
- loglevel=loglevel,
- drop_root_perms=drop_root_perms_for_ahab,
- )
-
- )
- log.info(
- f'`marketstored` up!\n'
- f'pid: {pid}\n'
- f'container id: {cid[:12]}\n'
- f'config: {pformat(config)}'
- )
-
- if es:
- from ._ahab import start_ahab
- from .elastic import start_elasticsearch
-
- log.info('Spawning `elasticsearch` supervisor')
- ctn_ready, config, (cid, pid) = await service_nursery.start(
- partial(
- start_ahab,
- 'elasticsearch',
- start_elasticsearch,
- loglevel=loglevel,
- drop_root_perms=drop_root_perms_for_ahab,
- )
- )
-
- log.info(
- f'`elasticsearch` up!\n'
- f'pid: {pid}\n'
- f'container id: {cid[:12]}\n'
- f'config: {pformat(config)}'
- )
-
try:
yield Services
@@ -275,9 +227,6 @@ async def open_pikerd(
async def maybe_open_pikerd(
loglevel: Optional[str] = None,
registry_addr: None | tuple = None,
- tsdb: bool = False,
- es: bool = False,
- drop_root_perms_for_ahab: bool = True,
**kwargs,
@@ -331,11 +280,6 @@ async def maybe_open_pikerd(
loglevel=loglevel,
registry_addr=registry_addr,
- # ahabd (docker super) specific controls
- tsdb=tsdb,
- es=es,
- drop_root_perms_for_ahab=drop_root_perms_for_ahab,
-
# passthrough to ``tractor`` init
**kwargs,
diff --git a/piker/service/_ahab.py b/piker/service/_ahab.py
index 7c3133e1..49d72de6 100644
--- a/piker/service/_ahab.py
+++ b/piker/service/_ahab.py
@@ -19,6 +19,7 @@ Supervisor for ``docker`` with included async and SC wrapping
to ensure a cancellable container lifetime system.
'''
+from __future__ import annotations
from collections import ChainMap
from functools import partial
import os
@@ -48,14 +49,13 @@ from requests.exceptions import (
ReadTimeout,
)
-from ..log import (
- get_logger,
+from ._mngr import Services
+from ._util import (
+ log, # sub-sys logger
get_console_log,
)
from .. import config
-log = get_logger(__name__)
-
class DockerNotStarted(Exception):
'Prolly you dint start da daemon bruh'
@@ -189,7 +189,11 @@ class Container:
and entry not in seen_so_far
):
seen_so_far.add(entry)
- getattr(log, level.lower(), log.error)(f'{msg}')
+ getattr(
+ log,
+ level.lower(),
+ log.error
+ )(f'{msg}')
if level == 'fatal':
raise ApplicationLogError(msg)
@@ -265,8 +269,10 @@ class Container:
start = time.time()
for _ in range(6):
- with trio.move_on_after(0.5) as cs:
- log.cancel('polling for CNTR logs...')
+ with trio.move_on_after(1) as cs:
+ log.cancel(
+ 'polling for CNTR logs for {stop_predicate}..'
+ )
try:
await self.process_logs_until(
@@ -330,16 +336,13 @@ class Container:
async def open_ahabd(
ctx: tractor.Context,
endpoint: str, # ns-pointer str-msg-type
- loglevel: str | None = 'cancel',
+ loglevel: str | None = None,
- **kwargs,
+ **ep_kwargs,
) -> None:
- log = get_console_log(
- loglevel,
- name=__name__,
- )
+ log = get_console_log(loglevel or 'cancel')
async with open_docker() as client:
@@ -352,7 +355,7 @@ async def open_ahabd(
cntr_config,
start_pred,
stop_pred,
- ) = ep_func(client)
+ ) = ep_func(client, **ep_kwargs)
cntr = Container(dcntr)
conf: ChainMap[str, Any] = ChainMap(
@@ -448,10 +451,17 @@ async def open_ahabd(
)
-async def start_ahab(
+@acm
+async def start_ahab_service(
+ services: Services,
service_name: str,
+
+ # endpoint config passed as **kwargs
endpoint: Callable[docker.DockerClient, DockerContainer],
+ ep_kwargs: dict,
loglevel: str | None = 'cancel',
+
+ # supervisor config
drop_root_perms: bool = True,
task_status: TaskStatus[
@@ -472,6 +482,9 @@ async def start_ahab(
is started.
'''
+ # global log
+ log = get_console_log(loglevel or 'cancel')
+
cn_ready = trio.Event()
try:
async with tractor.open_nursery() as an:
@@ -500,21 +513,28 @@ async def start_ahab(
)[2] # named user's uid
)
- async with portal.open_context(
- open_ahabd,
+ cs, first = await services.start_service_task(
+ name=service_name,
+ portal=portal,
+
+ # rest: endpoint inputs
+ target=open_ahabd,
endpoint=str(NamespacePath.from_ref(endpoint)),
loglevel='cancel',
- ) as (ctx, first):
+ **ep_kwargs,
+ )
- cid, pid, cntr_config = first
+ cid, pid, cntr_config = first
- task_status.started((
+ try:
+ yield (
cn_ready,
cntr_config,
(cid, pid),
- ))
-
- await trio.sleep_forever()
+ )
+ finally:
+ log.info(f'Cancelling ahab service `{service_name}`')
+ await services.cancel_service(service_name)
# since we demoted root perms in this parent
# we'll get a perms error on proc cleanup in
diff --git a/piker/service/_daemon.py b/piker/service/_daemon.py
index 45d6cb81..df94a992 100644
--- a/piker/service/_daemon.py
+++ b/piker/service/_daemon.py
@@ -20,7 +20,6 @@ Daemon-actor spawning "endpoint-hooks".
"""
from __future__ import annotations
from typing import (
- Optional,
Callable,
Any,
)
@@ -30,41 +29,28 @@ from contextlib import (
import tractor
-from ..log import (
- get_logger,
- get_console_log,
+from ._util import (
+ log, # sub-sys logger
)
-from ..brokers import get_brokermod
from ._mngr import (
Services,
)
from ._actor_runtime import maybe_open_pikerd
from ._registry import find_service
-log = get_logger(__name__)
-
-# `brokerd` enabled modules
-# NOTE: keeping this list as small as possible is part of our caps-sec
-# model and should be treated with utmost care!
-_data_mods = [
- 'piker.brokers.core',
- 'piker.brokers.data',
- 'piker.data',
- 'piker.data.feed',
- 'piker.data._sampling'
-]
-
@acm
async def maybe_spawn_daemon(
service_name: str,
service_task_target: Callable,
- spawn_args: dict[str, Any],
- loglevel: Optional[str] = None,
+ spawn_args: dict[str, Any],
+
+ loglevel: str | None = None,
singleton: bool = False,
- **kwargs,
+
+ **pikerd_kwargs,
) -> tractor.Portal:
'''
@@ -79,9 +65,6 @@ async def maybe_spawn_daemon(
clients.
'''
- if loglevel:
- get_console_log(loglevel)
-
# serialize access to this section to avoid
# 2 or more tasks racing to create a daemon
lock = Services.locks[service_name]
@@ -93,18 +76,17 @@ async def maybe_spawn_daemon(
yield portal
return
- log.warning(f"Couldn't find any existing {service_name}")
-
- # TODO: really shouldn't the actor spawning be part of the service
- # starting method `Services.start_service()` ?
+ log.warning(
+ f"Couldn't find any existing {service_name}\n"
+ 'Attempting to spawn new daemon-service..'
+ )
# ask root ``pikerd`` daemon to spawn the daemon we need if
# pikerd is not live we now become the root of the
# process tree
async with maybe_open_pikerd(
-
loglevel=loglevel,
- **kwargs,
+ **pikerd_kwargs,
) as pikerd_portal:
@@ -117,108 +99,42 @@ async def maybe_spawn_daemon(
# service task for that actor.
started: bool
if pikerd_portal is None:
- started = await service_task_target(**spawn_args)
+ started = await service_task_target(
+ loglevel=loglevel,
+ **spawn_args,
+ )
else:
- # tell the remote `pikerd` to start the target,
- # the target can't return a non-serializable value
- # since it is expected that service startingn is
- # non-blocking and the target task will persist running
- # on `pikerd` after the client requesting it's start
- # disconnects.
+ # request a remote `pikerd` (service manager) to start the
+ # target daemon-task, the target can't return
+ # a non-serializable value since it is expected that service
+ # starting is non-blocking and the target task will persist
+ # running "under" or "within" the `pikerd` actor tree after
+ # the questing client disconnects. in other words this
+ # spawns a persistent daemon actor that continues to live
+ # for the lifespan of whatever the service manager inside
+ # `pikerd` says it should.
started = await pikerd_portal.run(
service_task_target,
+ loglevel=loglevel,
**spawn_args,
)
if started:
log.info(f'Service {service_name} started!')
+ # block until we can discover (by IPC connection) to the newly
+ # spawned daemon-actor and then deliver the portal to the
+ # caller.
async with tractor.wait_for_actor(service_name) as portal:
lock.release()
yield portal
await portal.cancel_actor()
-async def spawn_brokerd(
-
- brokername: str,
- loglevel: Optional[str] = None,
- **tractor_kwargs,
-
-) -> bool:
-
- log.info(f'Spawning {brokername} broker daemon')
-
- brokermod = get_brokermod(brokername)
- dname = f'brokerd.{brokername}'
-
- extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {})
- tractor_kwargs.update(extra_tractor_kwargs)
-
- # ask `pikerd` to spawn a new sub-actor and manage it under its
- # actor nursery
- modpath = brokermod.__name__
- broker_enable = [modpath]
- for submodname in getattr(
- brokermod,
- '__enable_modules__',
- [],
- ):
- subpath = f'{modpath}.{submodname}'
- broker_enable.append(subpath)
-
- portal = await Services.actor_n.start_actor(
- dname,
- enable_modules=_data_mods + broker_enable,
- loglevel=loglevel,
- debug_mode=Services.debug_mode,
- **tractor_kwargs
- )
-
- # non-blocking setup of brokerd service nursery
- from ..data import _setup_persistent_brokerd
-
- await Services.start_service_task(
- dname,
- portal,
- _setup_persistent_brokerd,
- brokername=brokername,
- )
- return True
-
-
-@acm
-async def maybe_spawn_brokerd(
-
- brokername: str,
- loglevel: Optional[str] = None,
- **kwargs,
-
-) -> tractor.Portal:
- '''
- Helper to spawn a brokerd service *from* a client
- who wishes to use the sub-actor-daemon.
-
- '''
- async with maybe_spawn_daemon(
-
- f'brokerd.{brokername}',
- service_task_target=spawn_brokerd,
- spawn_args={
- 'brokername': brokername,
- 'loglevel': loglevel,
- },
- loglevel=loglevel,
- **kwargs,
-
- ) as portal:
- yield portal
-
-
async def spawn_emsd(
- loglevel: Optional[str] = None,
+ loglevel: str | None = None,
**extra_tractor_kwargs
) -> bool:
@@ -245,7 +161,10 @@ async def spawn_emsd(
await Services.start_service_task(
'emsd',
portal,
+
+ # signature of target root-task endpoint
_setup_persistent_emsd,
+ loglevel=loglevel,
)
return True
@@ -254,18 +173,18 @@ async def spawn_emsd(
async def maybe_open_emsd(
brokername: str,
- loglevel: Optional[str] = None,
- **kwargs,
+ loglevel: str | None = None,
-) -> tractor._portal.Portal: # noqa
+ **pikerd_kwargs,
+
+) -> tractor.Portal: # noqa
async with maybe_spawn_daemon(
-
'emsd',
service_task_target=spawn_emsd,
- spawn_args={'loglevel': loglevel},
+ spawn_args={},
loglevel=loglevel,
- **kwargs,
+ **pikerd_kwargs,
) as portal:
yield portal
diff --git a/piker/service/_mngr.py b/piker/service/_mngr.py
index 04f396af..70771593 100644
--- a/piker/service/_mngr.py
+++ b/piker/service/_mngr.py
@@ -28,15 +28,16 @@ import trio
from trio_typing import TaskStatus
import tractor
-from ..log import (
- get_logger,
+from ._util import (
+ log, # sub-sys logger
)
-log = get_logger(__name__)
-
-# TODO: factor this into a ``tractor.highlevel`` extension
-# pack for the library.
+# TODO: we need remote wrapping and a general soln:
+# - factor this into a ``tractor.highlevel`` extension # pack for the
+# library.
+# - wrap a "remote api" wherein you can get a method proxy
+# to the pikerd actor for starting services remotely!
class Services:
actor_n: tractor._supervise.ActorNursery
@@ -58,7 +59,8 @@ class Services:
name: str,
portal: tractor.Portal,
target: Callable,
- **kwargs,
+ allow_overruns: bool = False,
+ **ctx_kwargs,
) -> (trio.CancelScope, tractor.Context):
'''
@@ -81,9 +83,11 @@ class Services:
) -> Any:
with trio.CancelScope() as cs:
+
async with portal.open_context(
target,
- **kwargs,
+ allow_overruns=allow_overruns,
+ **ctx_kwargs,
) as (ctx, first):
diff --git a/piker/service/_registry.py b/piker/service/_registry.py
index f487e2a4..7ae11937 100644
--- a/piker/service/_registry.py
+++ b/piker/service/_registry.py
@@ -28,13 +28,10 @@ from typing import (
import tractor
-
-from ..log import (
- get_logger,
+from ._util import (
+ log, # sub-sys logger
)
-log = get_logger(__name__)
-
_default_registry_host: str = '127.0.0.1'
_default_registry_port: int = 6116
_default_reg_addr: tuple[str, int] = (
diff --git a/piker/service/_util.py b/piker/service/_util.py
new file mode 100644
index 00000000..bdf23dab
--- /dev/null
+++ b/piker/service/_util.py
@@ -0,0 +1,33 @@
+# piker: trading gear for hackers
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
+
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+"""
+Sub-sys module commons.
+
+"""
+from functools import partial
+
+from ..log import (
+ get_logger,
+ get_console_log,
+)
+subsys: str = 'piker.service'
+
+log = get_logger(subsys)
+
+get_console_log = partial(
+ get_console_log,
+ name=subsys,
+)
diff --git a/piker/service/elastic.py b/piker/service/elastic.py
index 31221d57..902f4fde 100644
--- a/piker/service/elastic.py
+++ b/piker/service/elastic.py
@@ -15,26 +15,23 @@
# along with this program. If not, see .
from __future__ import annotations
+from contextlib import asynccontextmanager as acm
from typing import (
Any,
TYPE_CHECKING,
)
+import asks
if TYPE_CHECKING:
import docker
from ._ahab import DockerContainer
-from piker.log import (
- get_logger,
- get_console_log
+from ._util import log # sub-sys logger
+from ._util import (
+ get_console_log,
)
-import asks
-
-
-log = get_logger(__name__)
-
# container level config
_config = {
@@ -92,7 +89,7 @@ def start_elasticsearch(
'http://localhost:19200/_cat/health',
params={'format': 'json'}
)).json()
- kog.info(
+ log.info(
'ElasticSearch cntr health:\n'
f'{health}'
)
@@ -126,3 +123,47 @@ def start_elasticsearch(
health_query,
chk_for_closed_msg,
)
+
+
+@acm
+async def start_ahab_daemon(
+ service_mngr: Services,
+ user_config: dict | None = None,
+ loglevel: str | None = None,
+
+) -> tuple[str, dict]:
+ '''
+ Task entrypoint to start the estasticsearch docker container using
+ the service manager.
+
+ '''
+ from ._ahab import start_ahab_service
+
+ # dict-merge any user settings
+ conf: dict = _config.copy()
+ if user_config:
+ conf = conf | user_config
+
+ dname: str = 'esd'
+ log.info(f'Spawning `{dname}` supervisor')
+ async with start_ahab_service(
+ service_mngr,
+ dname,
+
+ # NOTE: docker-py client is passed at runtime
+ start_elasticsearch,
+ ep_kwargs={'user_config': conf},
+ loglevel=loglevel,
+
+ ) as (
+ ctn_ready,
+ config,
+ (cid, pid),
+ ):
+ log.info(
+ f'`{dname}` up!\n'
+ f'pid: {pid}\n'
+ f'container id: {cid[:12]}\n'
+ f'config: {pformat(config)}'
+ )
+ yield dname, conf
diff --git a/piker/service/marketstore.py b/piker/service/marketstore.py
index 5c4f90db..ac0ad0a4 100644
--- a/piker/service/marketstore.py
+++ b/piker/service/marketstore.py
@@ -26,9 +26,9 @@
from __future__ import annotations
from contextlib import asynccontextmanager as acm
from datetime import datetime
+from pprint import pformat
from typing import (
Any,
- Optional,
Union,
TYPE_CHECKING,
)
@@ -54,12 +54,14 @@ if TYPE_CHECKING:
import docker
from ._ahab import DockerContainer
+from ._util import (
+ log, # sub-sys logger
+ get_console_log,
+)
+from . import Services
from ..data.feed import maybe_open_feed
-from ..log import get_logger, get_console_log
from .._profile import Profiler
-
-
-log = get_logger(__name__)
+from .. import config
# ahabd-supervisor and container level config
@@ -70,7 +72,7 @@ _config = {
'startup_timeout': 2,
}
-_yaml_config = '''
+_yaml_config_str: str = '''
# piker's ``marketstore`` config.
# mount this config using:
@@ -89,6 +91,12 @@ stale_threshold: 5
enable_add: true
enable_remove: false
+# SUPER DUPER CRITICAL to address a super weird issue:
+# https://github.com/pikers/piker/issues/443
+# seems like "variable compression" is possibly borked
+# or snappy compression somehow breaks easily?
+disable_variable_compression: true
+
triggers:
- module: ondiskagg.so
on: "*/1Sec/OHLCV"
@@ -106,18 +114,18 @@ triggers:
# config:
# filter: "nasdaq"
-'''.format(**_config)
+'''
def start_marketstore(
client: docker.DockerClient,
-
+ user_config: dict,
**kwargs,
) -> tuple[DockerContainer, dict[str, Any]]:
'''
- Start and supervise a marketstore instance with its config bind-mounted
- in from the piker config directory on the system.
+ Start and supervise a marketstore instance with its config
+ bind-mounted in from the piker config directory on the system.
The equivalent cli cmd to this code is:
@@ -141,14 +149,16 @@ def start_marketstore(
os.mkdir(mktsdir)
yml_file = os.path.join(mktsdir, 'mkts.yml')
+ yaml_config = _yaml_config_str.format(**user_config)
+
if not os.path.isfile(yml_file):
log.warning(
f'No `marketstore` config exists?: {yml_file}\n'
'Generating new file from template:\n'
- f'{_yaml_config}\n'
+ f'{yaml_config}\n'
)
with open(yml_file, 'w') as yf:
- yf.write(_yaml_config)
+ yf.write(yaml_config)
# create a mount from user's local piker config dir into container
config_dir_mnt = docker.types.Mount(
@@ -171,6 +181,9 @@ def start_marketstore(
type='bind',
)
+ grpc_listen_port = int(user_config['grpc_listen_port'])
+ ws_listen_port = int(user_config['ws_listen_port'])
+
dcntr: DockerContainer = client.containers.run(
'alpacamarkets/marketstore:latest',
# do we need this for cmds?
@@ -178,8 +191,8 @@ def start_marketstore(
# '-p 5993:5993',
ports={
- '5993/tcp': 5993, # jsonrpc / ws?
- '5995/tcp': 5995, # grpc
+ f'{ws_listen_port}/tcp': ws_listen_port,
+ f'{grpc_listen_port}/tcp': grpc_listen_port,
},
mounts=[
config_dir_mnt,
@@ -199,7 +212,13 @@ def start_marketstore(
return "launching tcp listener for all services..." in msg
async def stop_matcher(msg: str):
- return "exiting..." in msg
+ return (
+ # not sure when this happens, some kinda stop condition
+ "exiting..." in msg
+
+ # after we send SIGINT..
+ or "initiating graceful shutdown due to 'interrupt' request" in msg
+ )
return (
dcntr,
@@ -211,6 +230,49 @@ def start_marketstore(
)
+@acm
+async def start_ahab_daemon(
+ service_mngr: Services,
+ user_config: dict | None = None,
+ loglevel: str | None = None,
+
+) -> tuple[str, dict]:
+ '''
+ Task entrypoint to start the marketstore docker container using the
+ service manager.
+
+ '''
+ from ._ahab import start_ahab_service
+
+ # dict-merge any user settings
+ conf: dict = _config.copy()
+ if user_config:
+ conf: dict = conf | user_config
+
+ dname: str = 'marketstored'
+ log.info(f'Spawning `{dname}` supervisor')
+ async with start_ahab_service(
+ service_mngr,
+ dname,
+
+ # NOTE: docker-py client is passed at runtime
+ start_marketstore,
+ ep_kwargs={'user_config': conf},
+ loglevel=loglevel,
+ ) as (
+ _,
+ config,
+ (cid, pid),
+ ):
+ log.info(
+ f'`{dname}` up!\n'
+ f'pid: {pid}\n'
+ f'container id: {cid[:12]}\n'
+ f'config: {pformat(config)}'
+ )
+ yield dname, conf
+
+
_tick_tbk_ids: tuple[str, str] = ('1Sec', 'TICK')
_tick_tbk: str = '{}/' + '/'.join(_tick_tbk_ids)
@@ -286,7 +348,7 @@ def mk_tbk(keys: tuple[str, str, str]) -> str:
def quote_to_marketstore_structarray(
quote: dict[str, Any],
- last_fill: Optional[float]
+ last_fill: float | None,
) -> np.array:
'''
@@ -327,8 +389,8 @@ def quote_to_marketstore_structarray(
@acm
async def get_client(
- host: str = 'localhost',
- port: int = _config['grpc_listen_port'],
+ host: str | None,
+ port: int | None,
) -> MarketstoreClient:
'''
@@ -337,8 +399,8 @@ async def get_client(
'''
async with open_marketstore_client(
- host,
- port
+ host or 'localhost',
+ port or _config['grpc_listen_port'],
) as client:
yield client
@@ -402,18 +464,18 @@ class Storage:
async def load(
self,
- fqsn: str,
+ fqme: str,
timeframe: int,
) -> tuple[
np.ndarray, # timeframe sampled array-series
- Optional[datetime], # first dt
- Optional[datetime], # last dt
+ datetime | None, # first dt
+ datetime | None, # last dt
]:
first_tsdb_dt, last_tsdb_dt = None, None
hist = await self.read_ohlcv(
- fqsn,
+ fqme,
# on first load we don't need to pull the max
# history per request size worth.
limit=3000,
@@ -436,9 +498,9 @@ class Storage:
async def read_ohlcv(
self,
- fqsn: str,
+ fqme: str,
timeframe: int | str,
- end: Optional[int] = None,
+ end: int | None = None,
limit: int = int(800e3),
) -> np.ndarray:
@@ -446,14 +508,14 @@ class Storage:
client = self.client
syms = await client.list_symbols()
- if fqsn not in syms:
+ if fqme not in syms:
return {}
# use the provided timeframe or 1s by default
tfstr = tf_in_1s.get(timeframe, tf_in_1s[1])
params = Params(
- symbols=fqsn,
+ symbols=fqme,
timeframe=tfstr,
attrgroup='OHLCV',
end=end,
@@ -464,20 +526,26 @@ class Storage:
limit=limit,
)
- try:
- result = await client.query(params)
- except purerpc.grpclib.exceptions.UnknownError as err:
- # indicate there is no history for this timeframe
- log.exception(
- f'Unknown mkts QUERY error: {params}\n'
- f'{err.args}'
- )
+ for i in range(3):
+ try:
+ result = await client.query(params)
+ break
+ except purerpc.grpclib.exceptions.UnknownError as err:
+ if 'snappy' in err.args:
+ await tractor.breakpoint()
+
+ # indicate there is no history for this timeframe
+ log.exception(
+ f'Unknown mkts QUERY error: {params}\n'
+ f'{err.args}'
+ )
+ else:
return {}
# TODO: it turns out column access on recarrays is actually slower:
# https://jakevdp.github.io/PythonDataScienceHandbook/02.09-structured-data-numpy.html#RecordArrays:-Structured-Arrays-with-a-Twist
# it might make sense to make these structured arrays?
- data_set = result.by_symbols()[fqsn]
+ data_set = result.by_symbols()[fqme]
array = data_set.array
# XXX: ensure sample rate is as expected
@@ -492,11 +560,11 @@ class Storage:
'YOUR DATABASE LIKELY CONTAINS BAD DATA FROM AN OLD BUG'
f'WIPING HISTORY FOR {ts}s'
)
- await self.delete_ts(fqsn, timeframe)
+ await self.delete_ts(fqme, timeframe)
# try reading again..
return await self.read_ohlcv(
- fqsn,
+ fqme,
timeframe,
end,
limit,
@@ -507,7 +575,7 @@ class Storage:
async def delete_ts(
self,
key: str,
- timeframe: Optional[Union[int, str]] = None,
+ timeframe: Union[int, str | None] = None,
fmt: str = 'OHLCV',
) -> bool:
@@ -515,6 +583,7 @@ class Storage:
client = self.client
syms = await client.list_symbols()
if key not in syms:
+ await tractor.breakpoint()
raise KeyError(f'`{key}` table key not found in\n{syms}?')
tbk = mk_tbk((
@@ -526,7 +595,7 @@ class Storage:
async def write_ohlcv(
self,
- fqsn: str,
+ fqme: str,
ohlcv: np.ndarray,
timeframe: int,
append_and_duplicate: bool = True,
@@ -559,7 +628,7 @@ class Storage:
# write to db
resp = await self.client.write(
to_push,
- tbk=f'{fqsn}/{tfkey}/OHLCV',
+ tbk=f'{fqme}/{tfkey}/OHLCV',
# NOTE: will will append duplicates
# for the same timestamp-index.
@@ -582,7 +651,7 @@ class Storage:
# write to db
resp = await self.client.write(
to_push,
- tbk=f'{fqsn}/{tfkey}/OHLCV',
+ tbk=f'{fqme}/{tfkey}/OHLCV',
# NOTE: will will append duplicates
# for the same timestamp-index.
@@ -614,8 +683,8 @@ class Storage:
@acm
async def open_storage_client(
- fqsn: str,
- period: Optional[Union[int, str]] = None, # in seconds
+ host: str,
+ grpc_port: int,
) -> tuple[Storage, dict[str, np.ndarray]]:
'''
@@ -624,7 +693,10 @@ async def open_storage_client(
'''
async with (
# eventually a storage backend endpoint
- get_client() as client,
+ get_client(
+ host=host,
+ port=grpc_port,
+ ) as client,
):
# slap on our wrapper api
yield Storage(client)
@@ -632,7 +704,7 @@ async def open_storage_client(
@acm
async def open_tsdb_client(
- fqsn: str,
+ fqme: str,
) -> Storage:
# TODO: real-time dedicated task for ensuring
@@ -666,25 +738,34 @@ async def open_tsdb_client(
delayed=False,
)
+ # load any user service settings for connecting to
+ rootconf, path = config.load(
+ 'conf',
+ touch_if_dne=True,
+ )
+ tsdbconf = rootconf['network'].get('tsdb')
+ # backend = tsdbconf.pop('backend')
async with (
- open_storage_client(fqsn) as storage,
+ open_storage_client(
+ **tsdbconf,
+ ) as storage,
maybe_open_feed(
- [fqsn],
+ [fqme],
start_stream=False,
) as feed,
):
- profiler(f'opened feed for {fqsn}')
+ profiler(f'opened feed for {fqme}')
# to_append = feed.hist_shm.array
# to_prepend = None
- if fqsn:
- flume = feed.flumes[fqsn]
- symbol = flume.symbol
+ if fqme:
+ flume = feed.flumes[fqme]
+ symbol = flume.mkt
if symbol:
- fqsn = symbol.fqsn
+ fqme = symbol.fqme
# diff db history with shm and only write the missing portions
# ohlcv = flume.hist_shm.array
@@ -692,7 +773,7 @@ async def open_tsdb_client(
# TODO: use pg profiler
# for secs in (1, 60):
# tsdb_array = await storage.read_ohlcv(
- # fqsn,
+ # fqme,
# timeframe=timeframe,
# )
# # hist diffing:
@@ -703,7 +784,7 @@ async def open_tsdb_client(
# profiler('Finished db arrays diffs')
- syms = await storage.client.list_symbols()
+ _ = await storage.client.list_symbols()
# log.info(f'Existing tsdb symbol set:\n{pformat(syms)}')
# profiler(f'listed symbols {syms}')
yield storage
@@ -715,7 +796,7 @@ async def open_tsdb_client(
# log.info(
# f'Writing datums {array.size} -> to tsdb from shm\n'
# )
- # await storage.write_ohlcv(fqsn, array)
+ # await storage.write_ohlcv(fqme, array)
# profiler('Finished db writes')
@@ -882,3 +963,5 @@ async def stream_quotes(
if quotes:
yield quotes
+
+
diff --git a/piker/ui/_app.py b/piker/ui/_app.py
index 9978dbe3..ee4faf57 100644
--- a/piker/ui/_app.py
+++ b/piker/ui/_app.py
@@ -28,7 +28,7 @@ from ..service import maybe_spawn_brokerd
from . import _event
from ._exec import run_qtractor
from ..data.feed import install_brokerd_search
-from ..data._source import unpack_fqsn
+from ..accounting import unpack_fqme
from . import _search
from ._chart import GodWidget
from ..log import get_logger
@@ -100,8 +100,8 @@ async def _async_main(
starting_done = sbar.open_status('starting ze sexy chartz')
needed_brokermods: dict[str, ModuleType] = {}
- for fqsn in syms:
- brokername, *_ = unpack_fqsn(fqsn)
+ for fqme in syms:
+ brokername, *_ = unpack_fqme(fqme)
needed_brokermods[brokername] = brokers[brokername]
async with (
@@ -120,7 +120,7 @@ async def _async_main(
# this internally starts a ``display_symbol_data()`` task above
order_mode_ready = await godwidget.load_symbols(
- fqsns=syms,
+ fqmes=syms,
loglevel=loglevel,
)
diff --git a/piker/ui/_axes.py b/piker/ui/_axes.py
index 62214f60..040d0552 100644
--- a/piker/ui/_axes.py
+++ b/piker/ui/_axes.py
@@ -29,7 +29,7 @@ from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QPointF
from . import _pg_overrides as pgo
-from ..data._source import float_digits
+from ..accounting._mktinfo import float_digits
from ._label import Label
from ._style import DpiAwareFont, hcolor, _font
from ._interaction import ChartView
diff --git a/piker/ui/_chart.py b/piker/ui/_chart.py
index 7811278b..a1935641 100644
--- a/piker/ui/_chart.py
+++ b/piker/ui/_chart.py
@@ -68,7 +68,9 @@ from ..data.feed import (
Feed,
Flume,
)
-from ..data._source import Symbol
+from ..accounting import (
+ MktPair,
+)
from ..log import get_logger
from ._interaction import ChartView
from ._forms import FieldsForm
@@ -152,7 +154,7 @@ class GodWidget(QWidget):
def set_chart_symbols(
self,
- group_key: tuple[str], # of form .
+ group_key: tuple[str], # of form .
all_linked: tuple[LinkedSplits, LinkedSplits], # type: ignore
) -> None:
@@ -170,7 +172,7 @@ class GodWidget(QWidget):
async def load_symbols(
self,
- fqsns: list[str],
+ fqmes: list[str],
loglevel: str,
reset: bool = False,
@@ -183,7 +185,7 @@ class GodWidget(QWidget):
'''
# NOTE: for now we use the first symbol in the set as the "key"
# for the overlay of feeds on the chart.
- group_key: tuple[str] = tuple(fqsns)
+ group_key: tuple[str] = tuple(fqmes)
all_linked = self.get_chart_symbols(group_key)
order_mode_started = trio.Event()
@@ -217,7 +219,7 @@ class GodWidget(QWidget):
self._root_n.start_soon(
display_symbol_data,
self,
- fqsns,
+ fqmes,
loglevel,
order_mode_started,
)
@@ -287,11 +289,11 @@ class GodWidget(QWidget):
pp_nav.hide()
# set window titlebar info
- symbol = self.rt_linked.symbol
+ symbol = self.rt_linked.mkt
if symbol is not None:
self.window.setWindowTitle(
- f'{symbol.front_fqsn()} '
- f'tick:{symbol.tick_size}'
+ f'{symbol.fqme} '
+ f'tick:{symbol.size_tick}'
)
return order_mode_started
@@ -452,7 +454,7 @@ class LinkedSplits(QWidget):
# update the UI for a given "chart instance".
self.display_state: DisplayState | None = None
- self._symbol: Symbol = None
+ self._mkt: MktPair = None
def on_splitter_adjust(
self,
@@ -474,9 +476,15 @@ class LinkedSplits(QWidget):
**kwargs,
)
+ def set_mkt_info(
+ self,
+ mkt: MktPair,
+ ) -> None:
+ self._mkt = mkt
+
@property
- def symbol(self) -> Symbol:
- return self._symbol
+ def mkt(self) -> MktPair:
+ return self._mkt
def set_split_sizes(
self,
@@ -521,7 +529,7 @@ class LinkedSplits(QWidget):
def plot_ohlc_main(
self,
- symbol: Symbol,
+ mkt: MktPair,
shm: ShmArray,
flume: Flume,
sidepane: FieldsForm,
@@ -540,7 +548,7 @@ class LinkedSplits(QWidget):
# add crosshairs
self.cursor = Cursor(
linkedsplits=self,
- digits=symbol.tick_size_digits,
+ digits=mkt.price_tick_digits,
)
# NOTE: atm the first (and only) OHLC price chart for the symbol
@@ -548,7 +556,7 @@ class LinkedSplits(QWidget):
# be no distinction since we will have multiple symbols per
# view as part of "aggregate feeds".
self.chart = self.add_plot(
- name=symbol.fqsn,
+ name=mkt.fqme,
shm=shm,
flume=flume,
style=style,
@@ -1030,7 +1038,7 @@ class ChartPlotWidget(pg.PlotWidget):
'''
view = vb or self.view
viz = self.main_viz
- l, r = viz.view_range()
+ left, right = viz.view_range()
x_shift = viz.index_step() * datums
if datums >= 300:
@@ -1040,8 +1048,8 @@ class ChartPlotWidget(pg.PlotWidget):
# should trigger broadcast on all overlays right?
view.setXRange(
- min=l + x_shift,
- max=r + x_shift,
+ min=left + x_shift,
+ max=right + x_shift,
# TODO: holy shit, wtf dude... why tf would this not be 0 by
# default... speechless.
@@ -1222,12 +1230,12 @@ class ChartPlotWidget(pg.PlotWidget):
# TODO: UGH! just make this not here! we should
# be making the sticky from code which has access
- # to the ``Symbol`` instance..
+ # to the ``MktPair`` instance..
# if the sticky is for our symbol
# use the tick size precision for display
name = name or pi.name
- sym = self.linked.symbol
+ sym = self.linked.mkt
digits = None
if name == sym.key:
digits = sym.tick_size_digits
diff --git a/piker/ui/_cursor.py b/piker/ui/_cursor.py
index 79df305b..0a2c82b1 100644
--- a/piker/ui/_cursor.py
+++ b/piker/ui/_cursor.py
@@ -228,7 +228,7 @@ class ContentsLabel(pg.LabelItem):
'bar_wap',
]
],
- name=name,
+ # name=name,
index=ix,
)
)
@@ -363,7 +363,8 @@ class Cursor(pg.GraphicsObject):
# value used for rounding y-axis discreet tick steps
# computing once, up front, here cuz why not
- self._y_incr_mult = 1 / self.linked._symbol.tick_size
+ mkt = self.linked.mkt
+ self._y_tick_mult = 1/float(mkt.price_tick)
# line width in view coordinates
self._lw = self.pixelWidth() * self.lines_pen.width()
@@ -571,9 +572,15 @@ class Cursor(pg.GraphicsObject):
line_offset = self._lw / 2
# round y value to nearest tick step
- m = self._y_incr_mult
+ m = self._y_tick_mult
iy = round(y * m) / m
vl_y = iy - line_offset
+ # print(
+ # f'tick: {self._y_tick}\n'
+ # f'y: {y}\n'
+ # f'iy: {iy}\n'
+ # f'vl_y: {vl_y}\n'
+ # )
# update y-range items
if iy != last_iy:
diff --git a/piker/ui/_dataviz.py b/piker/ui/_dataviz.py
index 3c686619..a24c7d5c 100644
--- a/piker/ui/_dataviz.py
+++ b/piker/ui/_dataviz.py
@@ -23,6 +23,8 @@ from functools import lru_cache
from math import (
ceil,
floor,
+ isnan,
+ log as logf,
)
from typing import (
Literal,
@@ -332,6 +334,8 @@ class Viz(Struct):
float,
] = {}
+ _mxmn_cache_enabled: bool = True
+
# to make lru_cache-ing work, see
# https://docs.python.org/3/faq/programming.html#how-do-i-cache-method-calls
def __eq__(self, other):
@@ -432,12 +436,12 @@ class Viz(Struct):
else:
if x_range is None:
(
- l,
+ xl,
_,
lbar,
rbar,
_,
- r,
+ xr,
) = self.datums_range()
profiler(f'{self.name} got bars range')
@@ -447,7 +451,10 @@ class Viz(Struct):
# https://stackoverflow.com/a/29980872
ixrng = lbar, rbar = round(x_range[0]), round(x_range[1])
- if use_caching:
+ if (
+ use_caching
+ and self._mxmn_cache_enabled
+ ):
cached_result = self._mxmns.get(ixrng)
if cached_result:
if do_print:
@@ -521,8 +528,31 @@ class Viz(Struct):
)
# cache result for input range
- assert mxmn
- self._mxmns[ixrng] = (read_slc, mxmn)
+ ylow, yhi = mxmn
+
+ try:
+ prolly_anomaly: bool = (
+ (
+ abs(logf(ylow, 10)) > 16
+ if ylow
+ else False
+ )
+ or (
+ isnan(ylow) or isnan(yhi)
+ )
+ )
+ except ValueError:
+ prolly_anomaly = True
+
+ if prolly_anomaly:
+ return None
+
+ if (
+ not isnan(ylow)
+ and not prolly_anomaly
+ ):
+ self._mxmns[ixrng] = (read_slc, mxmn)
+
self.vs.yrange = mxmn
profiler(f'yrange mxmn cacheing: {x_range} -> {mxmn}')
return (
@@ -555,12 +585,12 @@ class Viz(Struct):
Return a range tuple for the datums present in view.
'''
- l, r = view_range or self.view_range()
+ xl, xr = view_range or self.view_range()
index_field: str = index_field or self.index_field
if index_field == 'index':
- l: int = round(l)
- r: int = round(r)
+ xl: int = round(xl)
+ xr: int = round(xr)
if array is None:
array = self.shm.array
@@ -571,12 +601,12 @@ class Viz(Struct):
# invalid view state
if (
- r < l
- or l < 0
- or r < 0
+ xr < xl
+ or xl < 0
+ or xr < 0
or (
- l > last
- and r > last
+ xl > last
+ and xr > last
)
):
leftmost: int = first
@@ -586,12 +616,12 @@ class Viz(Struct):
# determine first and last datums in view determined by
# l -> r view range.
rightmost = max(
- min(last, ceil(r)),
+ min(last, ceil(xr)),
first,
)
leftmost = min(
- max(first, floor(l)),
+ max(first, floor(xl)),
last,
rightmost - 1,
)
@@ -602,12 +632,12 @@ class Viz(Struct):
self.vs.xrange = leftmost, rightmost
return (
- l, # left x-in-view
+ xl, # left x-in-view
first, # first datum
leftmost,
rightmost,
last, # last_datum
- r, # right-x-in-view
+ xr, # right-x-in-view
)
def read(
@@ -635,12 +665,12 @@ class Viz(Struct):
profiler('self.shm.array READ')
(
- l,
+ xl,
ifirst,
lbar,
rbar,
ilast,
- r,
+ xr,
) = self.datums_range(
index_field=index_field,
array=array,
@@ -685,8 +715,8 @@ class Viz(Struct):
# a uniform time stamp step size?
else:
# get read-relative indices adjusting for master shm index.
- lbar_i = max(l, ifirst) - ifirst
- rbar_i = min(r, ilast) - ifirst
+ lbar_i = max(xl, ifirst) - ifirst
+ rbar_i = min(xr, ilast) - ifirst
# NOTE: the slice here does NOT include the extra ``+ 1``
# BUT the ``in_view`` slice DOES..
@@ -1214,18 +1244,25 @@ class Viz(Struct):
'''
# get most recent right datum index in-view
- l, start, datum_start, datum_stop, stop, r = self.datums_range()
+ (
+ xl,
+ start,
+ datum_start,
+ datum_stop,
+ stop,
+ xr,
+ ) = self.datums_range()
lasts = self.shm.array[-1]
i_step = lasts['index'] # last index-specific step.
i_step_t = lasts['time'] # last time step.
- # fqsn = self.flume.symbol.fqsn
+ # fqme = self.flume.mkt.fqme
# check if "last (is) in view" -> is a real-time update necessary?
if self.index_field == 'index':
- liv = (r >= i_step)
+ liv = (xr >= i_step)
else:
- liv = (r >= i_step_t)
+ liv = (xr >= i_step_t)
# compute the first available graphic obj's x-units-per-pixel
# TODO: make this not loop through all vizs each time!
@@ -1274,7 +1311,7 @@ class Viz(Struct):
varz['i_last_append'] = i_step
# print(
- # f'DOING APPEND => {fqsn}\n'
+ # f'DOING APPEND => {fqme}\n'
# f'i_step: {i_step}\n'
# f'i_step_t: {i_step_t}\n'
# f'glast: {glast}\n'
diff --git a/piker/ui/_display.py b/piker/ui/_display.py
index 3da33809..c747eb31 100644
--- a/piker/ui/_display.py
+++ b/piker/ui/_display.py
@@ -37,6 +37,9 @@ import pyqtgraph as pg
from msgspec import field
# from .. import brokers
+from ..accounting import (
+ MktPair,
+)
from ..data.feed import (
open_feed,
Feed,
@@ -62,7 +65,6 @@ from ._style import hcolor
from ._fsp import (
update_fsp_chart,
start_fsp_displays,
- has_vlm,
open_vlm_displays,
)
from ._forms import (
@@ -163,7 +165,7 @@ class DisplayState(Struct):
Chart-local real-time graphics state container.
'''
- fqsn: str
+ fqme: str
godwidget: GodWidget
quotes: dict[str, Any]
@@ -223,7 +225,7 @@ async def increment_history_view(
async for msg in istream:
profiler = Profiler(
- msg=f'History chart cycle for: `{ds.fqsn}`',
+ msg=f'History chart cycle for: `{ds.fqme}`',
delayed=True,
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
@@ -232,7 +234,7 @@ async def increment_history_view(
# l3 = ds.viz.shm.array[-3:]
# print(
- # f'fast step for {ds.flume.symbol.fqsn}:\n'
+ # f'fast step for {ds.flume.mkt.fqme}:\n'
# f'{list(l3["time"])}\n'
# f'{l3}\n'
# )
@@ -317,17 +319,17 @@ async def graphics_update_loop(
dss: dict[str, DisplayState] = {}
- for fqsn, flume in feed.flumes.items():
+ for fqme, flume in feed.flumes.items():
ohlcv = flume.rt_shm
hist_ohlcv = flume.hist_shm
- symbol = flume.symbol
- fqsn = symbol.fqsn
+ mkt = flume.mkt
+ fqme = mkt.fqme
# update last price sticky
- fast_viz = fast_chart._vizs[fqsn]
+ fast_viz = fast_chart._vizs[fqme]
index_field = fast_viz.index_field
fast_pi = fast_viz.plot
- last_price_sticky = fast_pi.getAxis('right')._stickies[fqsn]
+ last_price_sticky = fast_pi.getAxis('right')._stickies[fqme]
last_price_sticky.update_from_data(
*ohlcv.array[-1][[
index_field,
@@ -336,9 +338,9 @@ async def graphics_update_loop(
)
last_price_sticky.show()
- hist_viz = hist_chart._vizs[fqsn]
+ hist_viz = hist_chart._vizs[fqme]
slow_pi = hist_viz.plot
- hist_last_price_sticky = slow_pi.getAxis('right')._stickies[fqsn]
+ hist_last_price_sticky = slow_pi.getAxis('right')._stickies[fqme]
hist_last_price_sticky.update_from_data(
*hist_ohlcv.array[-1][[
index_field,
@@ -346,7 +348,7 @@ async def graphics_update_loop(
]]
)
- vlm_chart = vlm_charts[fqsn]
+ vlm_chart = vlm_charts[fqme]
vlm_viz = vlm_chart._vizs.get('volume') if vlm_chart else None
(
@@ -361,13 +363,13 @@ async def graphics_update_loop(
last, volume = ohlcv.array[-1][['close', 'volume']]
- symbol = flume.symbol
+ mkt = flume.mkt
l1 = L1Labels(
fast_pi,
# determine precision/decimal lengths
- digits=symbol.tick_size_digits,
- size_digits=symbol.lot_size_digits,
+ digits=mkt.price_tick_digits,
+ size_digits=mkt.size_tick_digits,
)
# TODO:
@@ -381,8 +383,8 @@ async def graphics_update_loop(
fast_chart.show()
last_quote_s = time.time()
- dss[fqsn] = ds = linked.display_state = DisplayState(**{
- 'fqsn': fqsn,
+ dss[fqme] = ds = linked.display_state = DisplayState(**{
+ 'fqme': fqme,
'godwidget': godwidget,
'quotes': {},
@@ -450,15 +452,15 @@ async def graphics_update_loop(
and quote_rate >= display_rate
):
pass
- # log.warning(f'High quote rate {symbol.key}: {quote_rate}')
+ # log.warning(f'High quote rate {mkt.fqme}: {quote_rate}')
last_quote_s = time.time()
- for fqsn, quote in quotes.items():
- ds = dss[fqsn]
+ for fqme, quote in quotes.items():
+ ds = dss[fqme]
ds.quotes = quote
- rt_pi, hist_pi = pis[fqsn]
+ rt_pi, hist_pi = pis[fqme]
# chart isn't active/shown so skip render cycle and
# pause feed(s)
@@ -466,14 +468,14 @@ async def graphics_update_loop(
fast_chart.linked.isHidden()
or not rt_pi.isVisible()
):
- print(f'{fqsn} skipping update for HIDDEN CHART')
+ print(f'{fqme} skipping update for HIDDEN CHART')
fast_chart.pause_all_feeds()
continue
ic = fast_chart.view._in_interact
if ic:
fast_chart.pause_all_feeds()
- print(f'{fqsn} PAUSING DURING INTERACTION')
+ print(f'{fqme} PAUSING DURING INTERACTION')
await ic.wait()
fast_chart.resume_all_feeds()
@@ -495,7 +497,7 @@ def graphics_update_cycle(
) -> None:
profiler = Profiler(
- msg=f'Graphics loop cycle for: `{ds.fqsn}`',
+ msg=f'Graphics loop cycle for: `{ds.fqme}`',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
delayed=True,
@@ -509,7 +511,7 @@ def graphics_update_cycle(
# - use a streaming minmax algo and drop the use of the
# state-tracking ``multi_maxmin()`` routine from above?
- fqsn = ds.fqsn
+ fqme = ds.fqme
chart = ds.chart
vlm_chart = ds.vlm_chart
@@ -548,7 +550,7 @@ def graphics_update_cycle(
# the true range? This way you can slap in orders outside the
# current L1 (only) book range.
main_vb: ChartView = main_viz.plot.vb
- this_viz: Viz = chart._vizs[fqsn]
+ this_viz: Viz = chart._vizs[fqme]
this_vb: ChartView = this_viz.plot.vb
this_yr = this_vb._yrange
if this_yr:
@@ -600,7 +602,7 @@ def graphics_update_cycle(
profiler,
)
- profiler(f'{fqsn} `multi_maxmin()` call')
+ profiler(f'{fqme} `multi_maxmin()` call')
# iterate frames of ticks-by-type such that we only update graphics
# using the last update per type where possible.
@@ -828,7 +830,7 @@ def graphics_update_cycle(
# update any overlayed fsp flows
if (
- curve_name != fqsn
+ curve_name != fqme
):
update_fsp_chart(
viz,
@@ -939,7 +941,7 @@ def graphics_update_cycle(
liv and do_rt_update
or do_px_step
)
- and curve_name not in {fqsn}
+ and curve_name not in {fqme}
):
update_fsp_chart(
viz,
@@ -1008,7 +1010,7 @@ async def link_views_with_region(
hist_pi.addItem(region, ignoreBounds=True)
region.setOpacity(6/16)
- viz = rt_chart.get_viz(flume.symbol.fqsn)
+ viz = rt_chart.get_viz(flume.mkt.fqme)
assert viz
index_field = viz.index_field
@@ -1035,7 +1037,7 @@ async def link_views_with_region(
# HFT/real-time chart.
rng = mn, mx = viewRange[0]
- # hist_viz = hist_chart.get_viz(flume.symbol.fqsn)
+ # hist_viz = hist_chart.get_viz(flume.mkt.fqme)
# hist = hist_viz.shm.array[-3:]
# print(
# f'mn: {mn}\n'
@@ -1153,7 +1155,7 @@ _quote_throttle_rate: int = 60 - 6
async def display_symbol_data(
godwidget: GodWidget,
- fqsns: list[str],
+ fqmes: list[str],
loglevel: str,
order_mode_started: trio.Event,
@@ -1176,9 +1178,9 @@ async def display_symbol_data(
# group_key=loading_sym_key,
# )
- for fqsn in fqsns:
+ for fqme in fqmes:
loading_sym_key = sbar.open_status(
- f'loading {fqsn} ->',
+ f'loading {fqme} ->',
group_key=True
)
@@ -1197,7 +1199,7 @@ async def display_symbol_data(
# TODO: we should be able to increase this if we use some
# `mypyc` speedups elsewhere? 22ish seems to be the sweet
# spot for single-feed chart.
- num_of_feeds = len(fqsns)
+ num_of_feeds = len(fqmes)
mx: int = 22
if num_of_feeds > 1:
# there will be more ctx switches with more than 1 feed so we
@@ -1213,20 +1215,19 @@ async def display_symbol_data(
feed: Feed
async with open_feed(
- fqsns,
+ fqmes,
loglevel=loglevel,
tick_throttle=cycles_per_feed,
) as feed:
# use expanded contract symbols passed back from feed layer.
- fqsns = list(feed.flumes.keys())
-
+ fqmes = list(feed.flumes.keys())
# step_size_s = 1
# tf_key = tf_in_1s[step_size_s]
godwidget.window.setWindowTitle(
- f'{fqsns} '
- # f'tick:{symbol.tick_size} '
+ f'{fqmes} '
+ # f'tick:{mkt.tick_size} '
# f'step:{tf_key} '
)
# generate order mode side-pane UI
@@ -1236,8 +1237,8 @@ async def display_symbol_data(
godwidget.pp_pane = pp_pane
# create top history view chart above the "main rt chart".
- rt_linked = godwidget.rt_linked
- hist_linked = godwidget.hist_linked
+ rt_linked: LinkedSplits = godwidget.rt_linked
+ hist_linked: LinkedSplits = godwidget.hist_linked
# NOTE: here we insert the slow-history chart set into
# the fast chart's splitter -> so it's a splitter of charts
@@ -1277,22 +1278,21 @@ async def display_symbol_data(
# for the "first"/selected symbol we create new chart widgets
# and sub-charts for FSPs
- fqsn, flume = fitems[0]
+ fqme, flume = fitems[0]
# TODO NOTE: THIS CONTROLS WHAT SYMBOL IS USED FOR ORDER MODE
# SUBMISSIONS, we need to make this switch based on selection.
- rt_linked._symbol = flume.symbol
- hist_linked._symbol = flume.symbol
+ rt_linked.set_mkt_info(flume.mkt)
+ hist_linked.set_mkt_info(flume.mkt)
ohlcv: ShmArray = flume.rt_shm
hist_ohlcv: ShmArray = flume.hist_shm
- symbol = flume.symbol
- brokername = symbol.brokers[0]
- fqsn = symbol.fqsn
+ mkt: MktPair = flume.mkt
+ fqme = mkt.fqme
hist_chart = hist_linked.plot_ohlc_main(
- symbol,
+ mkt,
hist_ohlcv,
flume,
# in the case of history chart we explicitly set `False`
@@ -1306,15 +1306,15 @@ async def display_symbol_data(
# ensure the last datum graphic is generated
# for zoom-interaction purposes.
- hist_viz = hist_chart.get_viz(fqsn)
- hist_viz.draw_last(array_key=fqsn)
- pis.setdefault(fqsn, [None, None])[1] = hist_chart.plotItem
+ hist_viz = hist_chart.get_viz(fqme)
+ hist_viz.draw_last(array_key=fqme)
+ pis.setdefault(fqme, [None, None])[1] = hist_chart.plotItem
# don't show when not focussed
hist_linked.cursor.always_show_xlabel = False
rt_chart = rt_linked.plot_ohlc_main(
- symbol,
+ mkt,
ohlcv,
flume,
# in the case of history chart we explicitly set `False`
@@ -1324,8 +1324,8 @@ async def display_symbol_data(
'last_step_color': 'original',
},
)
- rt_viz = rt_chart.get_viz(fqsn)
- pis.setdefault(fqsn, [None, None])[0] = rt_chart.plotItem
+ rt_viz = rt_chart.get_viz(fqme)
+ pis.setdefault(fqme, [None, None])[0] = rt_chart.plotItem
# for pause/resume on mouse interaction
rt_chart.feed = feed
@@ -1337,11 +1337,10 @@ async def display_symbol_data(
None | ChartPlotWidget
] = {}.fromkeys(feed.flumes)
if (
- not symbol.broker_info[brokername].get('no_vlm', False)
- and has_vlm(ohlcv)
+ flume.has_vlm()
and vlm_chart is None
):
- vlm_chart = vlm_charts[fqsn] = await ln.start(
+ vlm_chart = vlm_charts[fqme] = await ln.start(
open_vlm_displays,
rt_linked,
flume,
@@ -1375,26 +1374,26 @@ async def display_symbol_data(
godwidget.resize_all()
await trio.sleep(0)
- for fqsn, flume in fitems[1:]:
+ for fqme, flume in fitems[1:]:
# get a new color from the palette
bg_chart_color, bg_last_bar_color = next(palette)
ohlcv: ShmArray = flume.rt_shm
hist_ohlcv: ShmArray = flume.hist_shm
- symbol = flume.symbol
- fqsn = symbol.fqsn
+ mkt = flume.mkt
+ fqme = mkt.fqme
hist_pi = hist_chart.overlay_plotitem(
- name=fqsn,
- axis_title=fqsn,
+ name=fqme,
+ axis_title=fqme,
)
hist_viz = hist_chart.draw_curve(
- fqsn,
+ fqme,
hist_ohlcv,
flume,
- array_key=fqsn,
+ array_key=fqme,
overlay=hist_pi,
pi=hist_pi,
is_ohlc=True,
@@ -1405,26 +1404,26 @@ async def display_symbol_data(
# ensure the last datum graphic is generated
# for zoom-interaction purposes.
- hist_viz.draw_last(array_key=fqsn)
+ hist_viz.draw_last(array_key=fqme)
# TODO: we need a better API to do this..
# specially store ref to shm for lookup in display loop
# since only a placeholder of `None` is entered in
# ``.draw_curve()``.
- hist_viz = hist_chart._vizs[fqsn]
+ hist_viz = hist_chart._vizs[fqme]
assert hist_viz.plot is hist_pi
- pis.setdefault(fqsn, [None, None])[1] = hist_pi
+ pis.setdefault(fqme, [None, None])[1] = hist_pi
rt_pi = rt_chart.overlay_plotitem(
- name=fqsn,
- axis_title=fqsn,
+ name=fqme,
+ axis_title=fqme,
)
rt_viz = rt_chart.draw_curve(
- fqsn,
+ fqme,
ohlcv,
flume,
- array_key=fqsn,
+ array_key=fqme,
overlay=rt_pi,
pi=rt_pi,
is_ohlc=True,
@@ -1437,9 +1436,9 @@ async def display_symbol_data(
# specially store ref to shm for lookup in display loop
# since only a placeholder of `None` is entered in
# ``.draw_curve()``.
- rt_viz = rt_chart._vizs[fqsn]
+ rt_viz = rt_chart._vizs[fqme]
assert rt_viz.plot is rt_pi
- pis.setdefault(fqsn, [None, None])[0] = rt_pi
+ pis.setdefault(fqme, [None, None])[0] = rt_pi
rt_chart.setFocus()
@@ -1455,7 +1454,7 @@ async def display_symbol_data(
# greedily do a view range default and pane resizing
# on startup before loading the order-mode machinery.
- for fqsn, flume in feed.flumes.items():
+ for fqme, flume in feed.flumes.items():
# size view to data prior to order mode init
rt_chart.main_viz.default_view(
@@ -1473,6 +1472,7 @@ async def display_symbol_data(
hist_chart.main_viz.default_view(
do_min_bars=True,
+ do_ds=False,
)
hist_linked.graphics_cycle()
@@ -1497,13 +1497,13 @@ async def display_symbol_data(
)
# boot order-mode
- order_ctl_symbol: str = fqsns[0]
+ order_ctl_fqme: str = fqmes[0]
mode: OrderMode
async with (
open_order_mode(
feed,
godwidget,
- fqsns[0],
+ order_ctl_fqme,
order_mode_started,
loglevel=loglevel
) as mode
@@ -1511,7 +1511,7 @@ async def display_symbol_data(
rt_linked.mode = mode
- rt_viz = rt_chart.get_viz(order_ctl_symbol)
+ rt_viz = rt_chart.get_viz(order_ctl_fqme)
rt_viz.plot.setFocus()
# default view adjuments and sidepane alignment
@@ -1524,7 +1524,7 @@ async def display_symbol_data(
hist_chart.main_viz.default_view(
do_min_bars=True,
)
- hist_viz = hist_chart.get_viz(fqsn)
+ hist_viz = hist_chart.get_viz(fqme)
await trio.sleep(0)
godwidget.resize_all()
diff --git a/piker/ui/_fsp.py b/piker/ui/_fsp.py
index 6e600743..b4aa2b10 100644
--- a/piker/ui/_fsp.py
+++ b/piker/ui/_fsp.py
@@ -29,7 +29,6 @@ from typing import (
Any,
)
-import numpy as np
import msgspec
import tractor
import pyqtgraph as pg
@@ -46,7 +45,7 @@ from ..data._sharedmem import (
try_read,
)
from ..data.feed import Flume
-from ..data._source import Symbol
+from ..accounting import MktPair
from ._chart import (
ChartPlotWidget,
LinkedSplits,
@@ -72,14 +71,6 @@ from .._profile import Profiler
log = get_logger(__name__)
-def has_vlm(ohlcv: ShmArray) -> bool:
- # make sure that the instrument supports volume history
- # (sometimes this is not the case for some commodities and
- # derivatives)
- vlm = ohlcv.array['volume']
- return not bool(np.all(np.isin(vlm, -1)) or np.all(np.isnan(vlm)))
-
-
def update_fsp_chart(
viz,
graphics_name: str,
@@ -398,7 +389,7 @@ class FspAdmin:
portal: tractor.Portal,
complete: trio.Event,
started: trio.Event,
- fqsn: str,
+ fqme: str,
dst_fsp_flume: Flume,
conf: dict,
target: Fsp,
@@ -418,7 +409,7 @@ class FspAdmin:
cascade,
# data feed key
- fqsn=fqsn,
+ fqme=fqme,
# TODO: pass `Flume.to_msg()`s here?
# mems
@@ -436,7 +427,7 @@ class FspAdmin:
in self._flow_registry.items()
],
- ) as (ctx, last_index),
+ ) as (ctx, _),
ctx.open_stream() as stream,
):
@@ -444,7 +435,7 @@ class FspAdmin:
# register output data
self._registry[
- (fqsn, ns_path)
+ (fqme, ns_path)
] = (
stream,
dst_fsp_flume.rt_shm,
@@ -484,26 +475,42 @@ class FspAdmin:
) -> (Flume, trio.Event):
- fqsn = self.flume.symbol.fqsn
+ src_mkt: MktPair = self.flume.mkt
+ fqme: str = src_mkt.get_fqme(delim_char='')
# allocate an output shm array
key, dst_shm, opened = maybe_mk_fsp_shm(
- fqsn,
+ fqme,
target=target,
readonly=True,
)
- portal = self.cluster.get(worker_name) or self.rr_next_portal()
- provider_tag = portal.channel.uid
+ portal: tractor.Portal = (
+ self.cluster.get(worker_name)
+ or self.rr_next_portal()
+ )
- symbol = Symbol(
- key=key,
- broker_info={
- provider_tag: {'asset_type': 'fsp'},
- },
+ # TODO: this should probably be turned into a
+ # ``Cascade`` type which describes the routing
+ # of an fsp's IO in terms of sinc -> source
+ # shm/IPC endpoints?
+ mkt = MktPair(
+
+ # make this a couple addrs encapsing
+ # the flume routing?
+ src=src_mkt.dst,
+ dst=target.name,
+
+ # make this a precision / rounding value?
+ price_tick=src_mkt.price_tick,
+ size_tick=src_mkt.size_tick,
+
+ bs_mktid=target.name,
+ broker='piker',
+ _atype='fsp',
)
dst_fsp_flume = Flume(
- symbol=symbol,
+ mkt=mkt,
_rt_shm_token=dst_shm.token,
first_quote={},
@@ -519,7 +526,7 @@ class FspAdmin:
# if not opened:
# raise RuntimeError(
- # f'Already started FSP `{fqsn}:{func_name}`'
+ # f'Already started FSP `{fqme}:{func_name}`'
# )
complete = trio.Event()
@@ -529,7 +536,7 @@ class FspAdmin:
portal,
complete,
started,
- fqsn,
+ fqme,
dst_fsp_flume,
conf,
target,
diff --git a/piker/ui/_lines.py b/piker/ui/_lines.py
index 4469a673..62ce9de1 100644
--- a/piker/ui/_lines.py
+++ b/piker/ui/_lines.py
@@ -123,10 +123,10 @@ class LevelLine(pg.InfiniteLine):
self._track_cursor: bool = False
self.always_show_labels = always_show_labels
- self._on_drag_start = lambda l: None
- self._on_drag_end = lambda l: None
+ self._on_drag_start = lambda lvln: None
+ self._on_drag_end = lambda lvln: None
- self._y_incr_mult = 1 / chart.linked.symbol.tick_size
+ self._y_incr_mult = float(1 / chart.linked.mkt.size_tick)
self._right_end_sc: float = 0
# use px caching
diff --git a/piker/ui/_notify.py b/piker/ui/_notify.py
index 4a33dabb..8cc45e89 100644
--- a/piker/ui/_notify.py
+++ b/piker/ui/_notify.py
@@ -93,7 +93,7 @@ async def notify_from_ems_status_msg(
# TODO: add in standard fill/exec info that maybe we
# pack in a broker independent way?
f"'{msg.pformat()}'",
- ],
+ ],
capture_stdout=True,
capture_stderr=True,
check=False,
@@ -104,4 +104,6 @@ async def notify_from_ems_status_msg(
log.runtime(result)
except FileNotFoundError:
- log.warn('Tried to send a notification but \'notify-send\' not present')
+ log.warn(
+ 'Tried to send a notification but \'notify-send\' not present'
+ )
diff --git a/piker/ui/_position.py b/piker/ui/_position.py
index 41421fb6..a2e6c19e 100644
--- a/piker/ui/_position.py
+++ b/piker/ui/_position.py
@@ -14,10 +14,10 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-"""
-Position info and display
+'''
+Position (pos) info and display to track ur PnLz B)
-"""
+'''
from __future__ import annotations
from copy import copy
from dataclasses import dataclass
@@ -45,8 +45,17 @@ from ..calc import (
pnl,
puterize,
)
-from ..clearing._allocate import Allocator
-from ..pp import Position
+from ..accounting import (
+ Allocator,
+ MktPair,
+)
+from ..accounting import (
+ Position,
+)
+from ..accounting._mktinfo import (
+ _derivs,
+)
+
from ..data._normalize import iterticks
from ..data.feed import (
Feed,
@@ -85,7 +94,7 @@ async def update_pnl_from_feed(
pp: PositionTracker = order_mode.current_pp
live: Position = pp.live_pp
- key: str = live.symbol.front_fqsn()
+ key: str = live.mkt.fqme
log.info(f'Starting pnl display for {pp.alloc.account}')
@@ -119,7 +128,7 @@ async def update_pnl_from_feed(
# watch out for wrong quote msg-data if you muck
# with backend feed subs code..
- # assert sym == quote['fqsn']
+ # assert sym == quote['fqme']
for tick in iterticks(quote, types):
# print(f'{1/period} Hz')
@@ -238,7 +247,7 @@ class SettingsPane:
# a ``brokerd`) then error and switch back to the last
# selection.
if tracker is None:
- sym = old_tracker.charts[0].linked.symbol.key
+ sym: str = old_tracker.charts[0].linked.mkt.fqme
log.error(
f'Account `{account_name}` can not be set for {sym}'
)
@@ -409,9 +418,10 @@ class SettingsPane:
'''
mode = self.order_mode
- sym = mode.chart.linked.symbol
+ mkt: MktPair = mode.chart.linked.mkt
size = tracker.live_pp.size
- flume: Feed = mode.feed.flumes[sym.fqsn]
+ fqme: str = mkt.fqme
+ flume: Feed = mode.feed.flumes[fqme]
pnl_value = 0
if size:
@@ -424,9 +434,8 @@ class SettingsPane:
# maybe start update task
global _pnl_tasks
- fqsn = sym.front_fqsn()
- if fqsn not in _pnl_tasks:
- _pnl_tasks[fqsn] = True
+ if fqme not in _pnl_tasks:
+ _pnl_tasks[fqme] = True
self.order_mode.nursery.start_soon(
update_pnl_from_feed,
flume,
@@ -495,14 +504,6 @@ def pp_line(
return line
-_derivs = (
- 'future',
- 'continuous_future',
- 'option',
- 'futures_option',
-)
-
-
# TODO: move into annoate module?
def mk_level_marker(
chart: ChartPlotWidget,
@@ -557,7 +558,7 @@ class Nav(Struct):
'''
for key, chart in self.charts.items():
- size_digits = size_digits or chart.linked.symbol.lot_size_digits
+ size_digits = size_digits or chart.linked.mkt.size_tick_digits
line = self.lines.get(key)
level_marker = self.level_markers[key]
pp_label = self.pp_labels[key]
@@ -864,7 +865,7 @@ class PositionTracker:
alloc = self.alloc
# update allocator settings
- asset_type = pp.symbol.type_key
+ asset_type = pp.mkt.type_key
# specific configs by asset class / type
if asset_type in _derivs:
diff --git a/piker/ui/_search.py b/piker/ui/_search.py
index 9627e83d..216a94b6 100644
--- a/piker/ui/_search.py
+++ b/piker/ui/_search.py
@@ -639,10 +639,10 @@ class SearchWidget(QtWidgets.QWidget):
godw = self.godwidget
# first entry in the cache is the current symbol(s)
- fqsns = set()
- for multi_fqsns in list(godw._chart_cache):
- for fqsn in set(multi_fqsns):
- fqsns.add(fqsn)
+ fqmes = set()
+ for multi_fqmes in list(godw._chart_cache):
+ for fqme in set(multi_fqmes):
+ fqmes.add(fqme)
if keep_current_item_selected:
sel = self.view.selectionModel()
@@ -650,7 +650,7 @@ class SearchWidget(QtWidgets.QWidget):
self.view.set_section_entries(
'cache',
- list(fqsns),
+ list(fqmes),
# remove all other completion results except for cache
clear_all=only,
reverse=True,
@@ -722,18 +722,18 @@ class SearchWidget(QtWidgets.QWidget):
cidx, provider, symbol = value
godw = self.godwidget
- fqsn = f'{symbol}.{provider}'
- log.info(f'Requesting symbol: {fqsn}')
+ fqme = f'{symbol}.{provider}'
+ log.info(f'Requesting symbol: {fqme}')
# assert provider in symbol
await godw.load_symbols(
- fqsns=[fqsn],
+ fqmes=[fqme],
loglevel='info',
)
# fully qualified symbol name (SNS i guess is what we're
# making?)
- fqsn = '.'.join([symbol, provider]).lower()
+ fqme = '.'.join([symbol, provider]).lower()
if clear_to_cache:
@@ -743,7 +743,7 @@ class SearchWidget(QtWidgets.QWidget):
# LIFO order. this is normally only done internally by
# the chart on new symbols being loaded into memory
godw.set_chart_symbols(
- (fqsn,), (
+ (fqme,), (
godw.hist_linked,
godw.rt_linked,
)
@@ -753,7 +753,7 @@ class SearchWidget(QtWidgets.QWidget):
)
self.bar.focus()
- return fqsn
+ return fqme
def space_dims(self) -> tuple[float, float]:
'''
diff --git a/piker/ui/_signalling.py b/piker/ui/_signalling.py
index 13bc2fc8..c952b49d 100644
--- a/piker/ui/_signalling.py
+++ b/piker/ui/_signalling.py
@@ -23,7 +23,10 @@ WARNING: this code likely doesn't work at all (yet)
"""
import numpy as np
import pyqtgraph as pg
-from PyQt5 import QtCore, QtGui, QtWidgets
+from PyQt5 import (
+ QtCore,
+ QtWidgets,
+)
from .quantdom.charts import CenteredTextItem
from .quantdom.base import Quotes
diff --git a/piker/ui/order_mode.py b/piker/ui/order_mode.py
index cf5f53b1..2cd22610 100644
--- a/piker/ui/order_mode.py
+++ b/piker/ui/order_mode.py
@@ -1,5 +1,5 @@
# piker: trading gear for hackers
-# Copyright (C) Tyler Goodlet (in stewardship for piker0)
+# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@@ -36,13 +36,18 @@ import trio
from PyQt5.QtCore import Qt
from .. import config
-from ..pp import Position
-from ..clearing._client import open_ems, OrderBook
-from ..clearing._allocate import (
+from ..accounting import (
+ Allocator,
+ Position,
mk_allocator,
+ MktPair,
+ Symbol,
+)
+from ..clearing._client import (
+ open_ems,
+ OrderClient,
)
from ._style import _font
-from ..data._source import Symbol
from ..data.feed import (
Feed,
Flume,
@@ -89,7 +94,7 @@ class Dialog(Struct):
order: Order
symbol: str
lines: list[LevelLine]
- last_status_close: Callable = lambda: None
+ last_status_close: Callable | None = None
msgs: dict[str, dict] = {}
fills: dict[str, Any] = {}
@@ -120,7 +125,7 @@ class OrderMode:
chart: ChartPlotWidget # type: ignore # noqa
hist_chart: ChartPlotWidget # type: ignore # noqa
nursery: trio.Nursery # used by ``ui._position`` code?
- book: OrderBook
+ client: OrderClient
lines: LineEditor
arrows: ArrowEditor
multistatus: MultiStatus
@@ -284,15 +289,29 @@ class OrderMode:
# since that's illogical / a no-op.
return
- symbol = self.chart.linked.symbol
+ mkt: MktPair = self.chart.linked.mkt
+
+ # NOTE : we could also use instead,
+ # mkt.quantize(price, quantity_type='price')
+ # but it returns a Decimal and it's probably gonna
+ # be slower?
+ # TODO: should we be enforcing this precision
+ # at a different layer in the stack? right now
+ # any precision error will literally be relayed
+ # all the way back from the backend.
+
+ price = round(
+ price,
+ ndigits=mkt.price_tick_digits,
+ )
order = self._staged_order = Order(
action=action,
price=price,
account=self.current_pp.alloc.account,
size=0,
- symbol=symbol,
- brokers=symbol.brokers,
+ symbol=mkt.fqme,
+ brokers=[mkt.broker],
oid='', # filled in on submit
exec_mode=trigger_type, # dark or live
)
@@ -349,12 +368,17 @@ class OrderMode:
'''
if not order:
staged: Order = self._staged_order
+
# apply order fields for ems
oid = str(uuid.uuid4())
- order = staged.copy()
- order.oid = oid
- order.symbol = order.symbol.front_fqsn()
+ # NOTE: we have to str-ify `MktPair` first since we can't
+ # cast to it without being mega explicit with
+ # `msgspec.Struct`, which we're not yet..
+ order: Order = staged.copy({
+ 'symbol': str(staged.symbol),
+ 'oid': oid,
+ })
lines = self.lines_from_order(
order,
@@ -401,13 +425,13 @@ class OrderMode:
# send order cmd to ems
if send_msg:
- self.book.send(order)
+ self.client.send_nowait(order)
else:
# just register for control over this order
# TODO: some kind of mini-perms system here based on
# an out-of-band tagging/auth sub-sys for multiplayer
# order control?
- self.book._sent_orders[order.oid] = order
+ self.client._sent_orders[order.oid] = order
return dialog
@@ -428,14 +452,23 @@ class OrderMode:
line: LevelLine,
) -> None:
+ '''
+ Retreive the level line's end state, compute the size
+ and price for the new price-level, send an update msg to
+ the EMS, adjust mirrored level line on secondary chart.
- level = line.value()
+ '''
+ mktinfo: MktPair = self.chart.linked.mkt
+ level = round(
+ line.value(),
+ ndigits=mktinfo.price_tick_digits,
+ )
# updated by level change callback set in ``.new_line_from_order()``
dialog = line.dialog
size = dialog.order.size
# NOTE: sends modified order msg to EMS
- self.book.send_update(
+ self.client.update_nowait(
uuid=line.dialog.uuid,
price=level,
size=size,
@@ -465,7 +498,9 @@ class OrderMode:
# a submission is the start of a new order dialog
dialog = self.dialogs[uuid]
dialog.lines = lines
- dialog.last_status_close()
+ cls: Callable | None = dialog.last_status_close
+ if cls:
+ cls()
for line in lines:
@@ -517,7 +552,7 @@ class OrderMode:
# XXX: seems to fail on certain types of races?
# assert len(lines) == 2
if lines:
- flume: Flume = self.feed.flumes[chart.linked.symbol.fqsn]
+ flume: Flume = self.feed.flumes[chart.linked.mkt.fqme]
_, _, ratio = flume.get_ds_info()
for chart, shm in [
@@ -551,7 +586,7 @@ class OrderMode:
) -> None:
- msg = self.book._sent_orders.pop(uuid, None)
+ msg = self.client._sent_orders.pop(uuid, None)
if msg is not None:
self.lines.remove_line(uuid=uuid)
@@ -607,7 +642,7 @@ class OrderMode:
dialog.last_status_close = cancel_status_close
ids.append(oid)
- self.book.cancel(uuid=oid)
+ self.client.cancel_nowait(uuid=oid)
return ids
@@ -629,17 +664,21 @@ class OrderMode:
and src not in ('dark', 'paperboi')
and src not in symbol
):
- fqsn = symbol + '.' + src
+ fqme = symbol + '.' + src
brokername = src
else:
- fqsn = symbol
- *head, brokername = fqsn.rsplit('.')
+ fqme = symbol
+ *head, brokername = fqme.rsplit('.')
# fill out complex fields
order.oid = str(order.oid)
order.brokers = [brokername]
- order.symbol = Symbol.from_fqsn(
- fqsn=fqsn,
+
+ # TODO: change this over to `MktPair`, but it's
+ # gonna be tough since we don't have any such data
+ # really in our clearing msg schema..
+ order.symbol = Symbol.from_fqme(
+ fqsn=fqme,
info={},
)
dialog = self.submit_order(
@@ -655,7 +694,7 @@ async def open_order_mode(
feed: Feed,
godw: GodWidget,
- fqsn: str,
+ fqme: str,
started: trio.Event,
loglevel: str = 'info'
@@ -674,19 +713,22 @@ async def open_order_mode(
multistatus = chart.window().status_bar
done = multistatus.open_status('starting order mode..')
- book: OrderBook
+ client: OrderClient
trades_stream: tractor.MsgStream
# The keys in this dict **must** be in set our set of "normalized"
# symbol names (i.e. the same names you'd get back in search
# results) in order for position msgs to correctly trigger the
# display of a position indicator on screen.
- position_msgs: dict[str, list[BrokerdPosition]]
+ position_msgs: dict[str, dict[str, BrokerdPosition]]
# spawn EMS actor-service
async with (
- open_ems(fqsn, loglevel=loglevel) as (
- book,
+ open_ems(
+ fqme,
+ loglevel=loglevel,
+ ) as (
+ client,
trades_stream,
position_msgs,
brokerd_accounts,
@@ -695,21 +737,21 @@ async def open_order_mode(
trio.open_nursery() as tn,
):
- log.info(f'Opening order mode for {fqsn}')
+ log.info(f'Opening order mode for {fqme}')
# annotations editors
lines = LineEditor(godw=godw)
arrows = ArrowEditor(godw=godw)
- # symbol id
- symbol = chart.linked.symbol
+ # market endpoint info
+ mkt: MktPair = chart.linked.mkt
# map of per-provider account keys to position tracker instances
trackers: dict[str, PositionTracker] = {}
# load account names from ``brokers.toml``
accounts_def = config.load_accounts(
- providers=symbol.brokers
+ providers=[mkt.broker],
)
# XXX: ``brokerd`` delivers a set of account names that it
@@ -732,17 +774,17 @@ async def open_order_mode(
# net-zero pp
startup_pp = Position(
- symbol=symbol,
+ mkt=mkt,
size=0,
ppu=0,
# XXX: BLEH, do we care about this on the client side?
- bsuid=symbol,
+ bs_mktid=mkt.key,
)
# allocator config
- alloc = mk_allocator(
- symbol=symbol,
+ alloc: Allocator = mk_allocator(
+ mkt=mkt,
account=account_name,
# if this startup size is greater the allocator limit,
@@ -813,7 +855,7 @@ async def open_order_mode(
chart,
hist_chart,
tn,
- book,
+ client,
lines,
arrows,
multistatus,
@@ -861,12 +903,14 @@ async def open_order_mode(
# Pack position messages by account, should only be one-to-one.
# NOTE: requires the backend exactly specifies
# the expected symbol key in its positions msg.
- for (broker, acctid), msgs in position_msgs.items():
- for msg in msgs:
- log.info(f'Loading pp for {acctid}@{broker}:\n{pformat(msg)}')
+ for (
+ (broker, acctid),
+ pps_by_fqme
+ ) in position_msgs.items():
+ for msg in pps_by_fqme.values():
await process_trade_msg(
mode,
- book,
+ client,
msg,
)
@@ -900,7 +944,7 @@ async def open_order_mode(
await process_trade_msg(
mode,
- book,
+ client,
msg,
)
@@ -908,7 +952,7 @@ async def open_order_mode(
process_trades_and_update_ui,
trades_stream,
mode,
- book,
+ client,
)
yield mode
@@ -918,7 +962,7 @@ async def process_trades_and_update_ui(
trades_stream: tractor.MsgStream,
mode: OrderMode,
- book: OrderBook,
+ client: OrderClient,
) -> None:
@@ -927,16 +971,22 @@ async def process_trades_and_update_ui(
async for msg in trades_stream:
await process_trade_msg(
mode,
- book,
+ client,
msg,
)
async def process_trade_msg(
mode: OrderMode,
- book: OrderBook,
+ client: OrderClient,
msg: dict,
+ # emit linux DE notification?
+ # XXX: currently my experience with `dunst` is that this
+ # is horrible slow and clunky and invasive and noisy so i'm
+ # disabling it for now until we find a better UX solution..
+ do_notify: bool = False,
+
) -> tuple[Dialog, Status]:
fmsg = pformat(msg)
@@ -946,18 +996,24 @@ async def process_trade_msg(
if name in (
'position',
):
- sym = mode.chart.linked.symbol
+ sym: MktPair = mode.chart.linked.mkt
pp_msg_symbol = msg['symbol'].lower()
- fqsn = sym.front_fqsn()
- broker, key = sym.front_feed()
+ fqme = sym.fqme
+ broker = sym.broker
if (
- pp_msg_symbol == fqsn
- or pp_msg_symbol == fqsn.removesuffix(f'.{broker}')
+ pp_msg_symbol == fqme
+ or pp_msg_symbol == fqme.removesuffix(f'.{broker}')
):
- log.info(f'{fqsn} matched pp msg: {fmsg}')
+ log.info(
+ f'Loading position for `{fqme}`:\n'
+ f'{fmsg}'
+ )
tracker = mode.trackers[msg['account']]
tracker.live_pp.update_from_msg(msg)
- tracker.update_from_pp(set_as_startup=True) # status/pane UI
+ tracker.update_from_pp(
+ set_as_startup=True,
+ )
+ # status/pane UI
mode.pane.update_status_ui(tracker)
if tracker.live_pp.size:
@@ -974,7 +1030,7 @@ async def process_trade_msg(
dialog: Dialog = mode.dialogs.get(oid)
if dialog:
- fqsn = dialog.symbol
+ fqme = dialog.symbol
match msg:
case Status(
@@ -996,17 +1052,17 @@ async def process_trade_msg(
)
assert msg.resp in ('open', 'dark_open'), f'Unknown msg: {msg}'
- sym = mode.chart.linked.symbol
- fqsn = sym.front_fqsn()
+ sym: MktPair = mode.chart.linked.mkt
+ fqme = sym.fqme
if (
- ((order.symbol + f'.{msg.src}') == fqsn)
+ ((order.symbol + f'.{msg.src}') == fqme)
# a existing dark order for the same symbol
or (
- order.symbol == fqsn
+ order.symbol == fqme
and (
msg.src in ('dark', 'paperboi')
- or (msg.src in fqsn)
+ or (msg.src in fqme)
)
)
@@ -1053,7 +1109,8 @@ async def process_trade_msg(
)
mode.lines.remove_line(uuid=oid)
msg.req = req
- await notify_from_ems_status_msg(msg)
+ if do_notify:
+ await notify_from_ems_status_msg(msg)
# response to completed 'dialog' for order request
case Status(
@@ -1062,14 +1119,15 @@ async def process_trade_msg(
req=req,
):
msg.req = Order(**req)
- await notify_from_ems_status_msg(msg)
+ if do_notify:
+ await notify_from_ems_status_msg(msg)
mode.lines.remove_line(uuid=oid)
# each clearing tick is responded individually
case Status(resp='fill'):
# handle out-of-piker fills reporting?
- order: Order = book._sent_orders.get(oid)
+ order: Order = client._sent_orders.get(oid)
if not order:
log.warning(f'order {oid} is unknown')
order = msg.req
diff --git a/requirements-test.txt b/requirements-test.txt
index e079f8a6..ad27fc5d 100644
--- a/requirements-test.txt
+++ b/requirements-test.txt
@@ -1 +1,3 @@
pytest
+docker
+elasticsearch
diff --git a/requirements.txt b/requirements.txt
index 5e10a4ff..742c2efa 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -14,5 +14,7 @@
# ``asyncvnc`` for sending interactions to ib-gw inside docker
-e git+https://github.com/pikers/asyncvnc.git@main#egg=asyncvnc
-# ``cryptofeed`` for connecting to various crypto exchanges + custom fixes
--e git+https://github.com/pikers/cryptofeed.git@date_parsing#egg=cryptofeed
+
+# ``tomlkit`` for account files and configs; we've
+# added some new features that need to get upstreamed:
+-e git+https://github.com/pikers/tomlkit.git@piker_pin#egg=tomlkit
diff --git a/setup.py b/setup.py
index 0cd9d3fb..c63622b2 100755
--- a/setup.py
+++ b/setup.py
@@ -40,18 +40,21 @@ setup(
'console_scripts': [
'piker = piker.cli:cli',
'pikerd = piker.cli:pikerd',
+ 'ledger = piker.accounting.cli:ledger',
]
},
install_requires=[
- 'toml',
- 'tomli', # fastest pure py reader
- 'click',
+ # 'tomlkit', # fork & fix for now..
+ 'tomli', # for pre-3.11
+ 'tomli-w', # for fast ledger writing
'colorlog',
'attrs',
'pygments',
'colorama', # numba traceback coloring
'msgspec', # performant IPC messaging and structs
'protobuf',
+ 'typer',
+ 'rich',
# async
'trio',
@@ -63,8 +66,7 @@ setup(
# normally pinned to particular git hashes..
# 'tractor',
# 'asyncvnc',
- # 'pyqtgraph',
- # anyio-marketstore # mkts tsdb client
+ # 'anyio-marketstore', # mkts tsdb client
# brokers
'asks', # for non-ws rest apis
diff --git a/tests/conftest.py b/tests/conftest.py
index 3a0afba2..366d5d95 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -13,7 +13,6 @@ from piker.service import (
Services,
)
from piker.log import get_console_log
-from piker.clearing._client import open_ems
def pytest_addoption(parser):
@@ -87,8 +86,11 @@ def log(
@acm
async def _open_test_pikerd(
tmpconfdir: str,
+
reg_addr: tuple[str, int] | None = None,
loglevel: str = 'warning',
+ debug_mode: bool = False,
+
**kwargs,
) -> tuple[
@@ -101,6 +103,9 @@ async def _open_test_pikerd(
a different port then the default to allow testing alongside
a running stack.
+ Calls `.service._actor_runtime.maybe_open_pikerd()``
+ to boot the root actor / tractor runtime.
+
'''
import random
from piker.service import maybe_open_pikerd
@@ -118,10 +123,7 @@ async def _open_test_pikerd(
'piker_test_dir': tmpconfdir,
},
- # tests may need to spawn containers dynamically
- # or just in sequence per test, so we keep root.
- drop_root_perms_for_ahab=False,
-
+ debug_mode=debug_mode,
**kwargs,
) as service_manager,
@@ -143,14 +145,60 @@ async def _open_test_pikerd(
)
+@pytest.fixture
+def tmpconfdir(
+ tmp_path: Path,
+) -> Path:
+ '''
+ Ensure the `brokers.toml` file for the test run exists
+ since we changed it to not touch files by default.
+
+ Here we override the default (in the user dir) and
+ set the global module var the same as we do inside
+ the `tmpconfdir` fixture.
+
+ '''
+ tmpconfdir: Path = tmp_path / '_testing'
+ tmpconfdir.mkdir()
+
+ # touch the `brokers.toml` file since it won't
+ # exist in the tmp test dir by default!
+ # override config dir in the root actor (aka
+ # this top level testing process).
+ from piker import config
+ config._config_dir: Path = tmpconfdir
+
+ conf, path = config.load(
+ conf_name='brokers',
+ touch_if_dne=True,
+ )
+ assert path.is_file(), 'WTH.. `brokers.toml` not created!?'
+
+ return tmpconfdir
+
+ # NOTE: the `tmp_dir` fixture will wipe any files older then 3 test
+ # sessions by default:
+ # https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory
+ # BUT, if we wanted to always wipe conf dir and all contained files,
+ # rmtree(str(tmp_path))
+
+
+@pytest.fixture
+def root_conf(tmpconfdir) -> dict:
+ return config.load(
+ 'conf',
+ touch_if_dne=True,
+ )
+
+
@pytest.fixture
def open_test_pikerd(
request: pytest.FixtureRequest,
tmp_path: Path,
+ tmpconfdir: Path,
loglevel: str,
):
- tmpconfdir: Path = tmp_path / '_testing'
- tmpconfdir.mkdir()
+
tmpconfdir_str: str = str(tmpconfdir)
# NOTE: on linux the tmp config dir is generally located at:
@@ -160,6 +208,20 @@ def open_test_pikerd(
# https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory
print(f'CURRENT TEST CONF DIR: {tmpconfdir}')
+ conf = request.config
+ debug_mode: bool = conf.option.usepdb
+ if (
+ debug_mode
+ and conf.option.capture != 'no'
+ ):
+ # TODO: how to disable capture dynamically?
+ # conf._configured = False
+ # conf._do_configure()
+ pytest.fail(
+ 'To use `--pdb` (with `tractor` subactors) you also must also '
+ 'pass `-s`!'
+ )
+
yield partial(
_open_test_pikerd,
@@ -171,49 +233,11 @@ def open_test_pikerd(
# bind in level from fixture, which is itself set by
# `--ll ` cli flag.
loglevel=loglevel,
- )
- # NOTE: the `tmp_dir` fixture will wipe any files older then 3 test
- # sessions by default:
- # https://docs.pytest.org/en/6.2.x/tmpdir.html#the-default-base-temporary-directory
- # BUT, if we wanted to always wipe conf dir and all contained files,
- # rmtree(str(tmp_path))
+ debug_mode=debug_mode,
+ )
# TODO: teardown checks such as,
# - no leaked subprocs or shm buffers
# - all requested container service are torn down
# - certain ``tractor`` runtime state?
-
-
-@acm
-async def _open_test_pikerd_and_ems(
- fqsn,
- mode,
- loglevel,
- open_test_pikerd
-):
- async with (
- open_test_pikerd() as (_, _, _, services),
- open_ems(
- fqsn,
- mode=mode,
- loglevel=loglevel,
- ) as ems_services,
- ):
- yield (services, ems_services)
-
-
-@pytest.fixture
-def open_test_pikerd_and_ems(
- open_test_pikerd,
- fqsn: str = 'xbtusdt.kraken',
- mode: str = 'paper',
- loglevel: str = 'info',
-):
- yield partial(
- _open_test_pikerd_and_ems,
- fqsn,
- mode,
- loglevel,
- open_test_pikerd
- )
diff --git a/tests/test_accounting.py b/tests/test_accounting.py
new file mode 100644
index 00000000..f5a3bd8d
--- /dev/null
+++ b/tests/test_accounting.py
@@ -0,0 +1,35 @@
+'''
+`piker.accounting` mgmt calculations for
+- positioning
+- ledger updates
+- config file IO
+
+'''
+from pathlib import Path
+
+from piker import config
+
+
+def test_root_conf_networking_section(
+ root_conf: dict,
+):
+ conf, path = config.load(
+ 'conf',
+ touch_if_dne=True,
+ )
+ assert conf['network']['tsdb']
+
+
+def test_account_file_default_empty(
+ tmpconfdir: Path,
+):
+ conf, path = config.load_account(
+ 'kraken',
+ 'paper',
+ )
+
+ # ensure the account file empty but created
+ # and in the correct place in the filesystem!
+ assert not conf
+ assert path.parent.is_dir()
+ assert path.parent.name == 'accounting'
diff --git a/tests/test_databases.py b/tests/test_docker_services.py
similarity index 71%
rename from tests/test_databases.py
rename to tests/test_docker_services.py
index 554b0990..66438e33 100644
--- a/tests/test_databases.py
+++ b/tests/test_docker_services.py
@@ -1,11 +1,12 @@
from typing import AsyncContextManager
import logging
-import trio
+import pytest
from elasticsearch import (
Elasticsearch,
ConnectionError,
)
+import trio
from piker.service import marketstore
from piker.service import elastic
@@ -14,6 +15,7 @@ from piker.service import elastic
def test_marketstore_startup_and_version(
open_test_pikerd: AsyncContextManager,
loglevel: str,
+ root_conf: dict,
):
'''
Verify marketstore tsdb starts up and we can
@@ -21,18 +23,39 @@ def test_marketstore_startup_and_version(
'''
async def main():
+ user_conf: dict = {
+ 'grpc_listen_port': 5995 + 6,
+ 'ws_listen_port': 5993 + 6,
+ }
+
+ dname: str # service name
+ config: dict # service name
async with (
open_test_pikerd(
loglevel=loglevel,
- tsdb=True
+ # tsdb=True
) as (
_, # host
_, # port
pikerd_portal,
services,
),
+
+ marketstore.start_ahab_daemon(
+ services,
+ user_conf,
+ loglevel=loglevel,
+
+ ) as (dname, config)
):
+ # ensure user config was applied
+ for k, v in user_conf.items():
+ assert config[k] == v
+
+ # netconf: dict = root_conf['network']
+ # tsdbconf = netconf['tsdb']
+
# TODO: we should probably make this connection poll
# loop part of the `get_client()` implementation no?
@@ -45,7 +68,12 @@ def test_marketstore_startup_and_version(
for _ in range(3):
# NOTE: default sockaddr is embedded within
- async with marketstore.get_client() as client:
+ async with marketstore.get_client(
+ host='localhost',
+ port=user_conf['grpc_listen_port'],
+
+ ) as client:
+ print(f'Client is up @ {user_conf}!')
with trio.move_on_after(1) as cs:
syms = await client.list_symbols()
@@ -64,11 +92,18 @@ def test_marketstore_startup_and_version(
)
print('VERSION CHECKED')
+
break # get out of retry-connect loop
+ else:
+ raise RuntimeError('Failed to connect to {conf}!')
+
+ # gracefully teardown docker-daemon-service
+ print(f'Cancelling docker service {dname}')
trio.run(main)
+@pytest.mark.skip
def test_elasticsearch_startup_and_version(
open_test_pikerd: AsyncContextManager,
loglevel: str,
@@ -80,18 +115,29 @@ def test_elasticsearch_startup_and_version(
'''
async def main():
- port = 19200
+ port: int = 19200
+ user_conf: dict = {
+ 'port': port,
+ }
+
+ dname: str # service name
+ config: dict # service name
async with (
open_test_pikerd(
loglevel=loglevel,
- es=True
) as (
_, # host
_, # port
pikerd_portal,
services,
),
+ elastic.start_ahab_daemon(
+ services,
+ user_conf,
+ loglevel=loglevel,
+
+ ) as (dname, config)
):
# TODO: much like the above connect loop for mkts, we should
# probably make this sync start part of the
diff --git a/tests/test_ems.py b/tests/test_ems.py
new file mode 100644
index 00000000..f9c010f0
--- /dev/null
+++ b/tests/test_ems.py
@@ -0,0 +1,406 @@
+'''
+Execution mgmt system (EMS) e2e testing.
+
+Most tests leverage our paper clearing engine found (currently) in
+``piker.clearing._paper_engine`.
+
+Ideally in the longer run we are able to support forms of (non-clearing)
+live order tests against certain backends that make it possible to do
+so..
+
+'''
+from contextlib import (
+ contextmanager as cm,
+)
+from typing import (
+ Awaitable,
+ Callable,
+ AsyncContextManager,
+ Literal,
+)
+
+import trio
+from exceptiongroup import BaseExceptionGroup
+
+import pytest
+import tractor
+from uuid import uuid4
+
+from piker.service import Services
+from piker.log import get_logger
+from piker.clearing._messages import (
+ Order,
+ Status,
+ # Cancel,
+ BrokerdPosition,
+)
+from piker.clearing import (
+ open_ems,
+ OrderClient,
+)
+from piker.accounting import (
+ unpack_fqme,
+)
+from piker.accounting import (
+ open_pps,
+ Position,
+)
+
+log = get_logger(__name__)
+
+
+
+async def order_and_and_wait_for_ppmsg(
+ client: OrderClient,
+ trades_stream: tractor.MsgStream,
+ fqme: str,
+
+ action: Literal['buy', 'sell'],
+ price: float = 100e3, # just a super high price.
+ size: float = 0.01,
+
+ exec_mode: str = 'live',
+ account: str = 'paper',
+
+) -> list[Status | BrokerdPosition]:
+ '''
+ Start piker, place a trade and assert data in
+ pps stream, ledger and position table.
+
+ '''
+ sent: list[Order] = []
+ broker, mktep, venue, suffix = unpack_fqme(fqme)
+
+ order = Order(
+ exec_mode=exec_mode,
+ action=action, # TODO: remove this from our schema?
+ oid=str(uuid4()),
+ account=account,
+ size=size,
+ symbol=fqme,
+ price=price,
+ brokers=[broker],
+ )
+ sent.append(order)
+ await client.send(order)
+
+ # TODO: i guess we should still test the old sync-API?
+ # client.send_nowait(order)
+
+ # Wait for position message before moving on to verify flow(s)
+ # for the multi-order position entry/exit.
+ msgs: list[Status | BrokerdPosition] = []
+ async for msg in trades_stream:
+ match msg:
+ case {'name': 'position'}:
+ ppmsg = BrokerdPosition(**msg)
+ msgs.append(ppmsg)
+ break
+
+ case {'name': 'status'}:
+ msgs.append(Status(**msg))
+
+ return sent, msgs
+
+
+def run_and_tollerate_cancels(
+ fn: Callable[..., Awaitable],
+
+ expect_errs: tuple[Exception] | None = None,
+ tollerate_errs: tuple[Exception] = (tractor.ContextCancelled,),
+
+):
+ '''
+ Run ``trio``-``piker`` runtime with potential tolerance for
+ inter-actor cancellation during teardown (normally just
+ `tractor.ContextCancelled`s).
+
+ '''
+ if expect_errs:
+ with pytest.raises(BaseExceptionGroup) as exc_info:
+ trio.run(fn)
+
+ for err in exc_info.value.exceptions:
+ assert type(err) in expect_errs
+ else:
+ try:
+ trio.run(fn)
+ except tollerate_errs:
+ pass
+
+
+@cm
+def load_and_check_pos(
+ order: Order,
+ ppmsg: BrokerdPosition,
+
+) -> None:
+
+ with open_pps(ppmsg.broker, ppmsg.account) as table:
+
+ if ppmsg.size == 0:
+ assert ppmsg.symbol not in table.pps
+ yield None
+ return
+
+ else:
+ # NOTE: a special case is here since the `PpTable.pps` are
+ # normally indexed by the particular broker's
+ # `Position.bs_mktid: str` (a unique market / symbol id provided
+ # by their systems/design) but for the paper engine case, this
+ # is the same the fqme.
+ pp: Position = table.pps[ppmsg.symbol]
+
+ assert ppmsg.size == pp.size
+ assert ppmsg.avg_price == pp.ppu
+
+ yield pp
+
+
+def test_ems_err_on_bad_broker(
+ open_test_pikerd: Services,
+ loglevel: str,
+):
+ async def load_bad_fqme():
+ try:
+ async with (
+ open_test_pikerd() as (_, _, _, _),
+
+ open_ems(
+ 'doggycoin.doggy',
+ mode='paper',
+ loglevel=loglevel,
+ ) as _
+ ):
+ pytest.fail('EMS is working on non-broker!?')
+ except ModuleNotFoundError:
+ pass
+
+ run_and_tollerate_cancels(load_bad_fqme)
+
+
+async def match_ppmsgs_on_ems_boot(
+ ppmsgs: list[BrokerdPosition],
+
+) -> None:
+ '''
+ Given a list of input position msgs, verify they match
+ what is loaded from the EMS on connect.
+
+ '''
+ by_acct: dict[tuple, list[BrokerdPosition]] = {}
+ for msg in ppmsgs:
+ by_acct.setdefault(
+ (msg.broker, msg.account),
+ [],
+ ).append(msg)
+
+ # TODO: actually support multi-mkts to `open_ems()`
+ # but for now just pass the first fqme.
+ fqme = msg.symbol
+
+ # disconnect from EMS, reconnect and ensure we get our same
+ # position relayed to us again in the startup msg.
+ async with (
+ open_ems(
+ fqme,
+ mode='paper',
+ loglevel='info',
+ ) as (
+ _, # OrderClient
+ _, # tractor.MsgStream
+ startup_pps,
+ accounts,
+ _, # dialogs,
+ )
+ ):
+ for (broker, account), ppmsgs in by_acct.items():
+ assert account in accounts
+
+ # lookup all msgs rx-ed for this account
+ rx_msgs = startup_pps[(broker, account)]
+
+ for expect_ppmsg in ppmsgs:
+ rx_msg = BrokerdPosition(**rx_msgs[expect_ppmsg.symbol])
+ assert rx_msg == expect_ppmsg
+
+
+async def submit_and_check(
+ fills: tuple[dict],
+ loglevel: str,
+
+) -> tuple[
+ BrokerdPosition,
+ Position,
+]:
+ '''
+ Enter a trade and assert entries are made in pps and ledger files.
+
+ Shutdown the ems-client and ensure on reconnect we get the expected
+ matching ``BrokerdPosition`` and pps.toml entries.
+
+ '''
+ broker: str = 'kraken'
+ mkt_key: str = 'xbtusdt'
+ fqme: str = f'{mkt_key}.{broker}'
+
+ startup_pps: dict[
+ tuple[str, str], # brokername, acctid
+ list[BrokerdPosition],
+ ]
+ async with (
+ open_ems(
+ fqme,
+ mode='paper',
+ loglevel=loglevel,
+ ) as (
+ client, # OrderClient
+ trades_stream, # tractor.MsgStream
+ startup_pps,
+ accounts,
+ _, # dialogs
+ )
+ ):
+ # no positions on startup
+ assert not startup_pps
+ assert 'paper' in accounts
+
+ od: dict
+ for od in fills:
+ print(f'Sending order {od} for fill')
+ size = od['size']
+ sent, msgs = await order_and_and_wait_for_ppmsg(
+ client,
+ trades_stream,
+ fqme,
+ action='buy' if size > 0 else 'sell',
+ price=100e3 if size > 0 else 0,
+ size=size,
+ )
+
+ last_order: Order = sent[-1]
+ last_resp = msgs[-1]
+ assert isinstance(last_resp, BrokerdPosition)
+ ppmsg = last_resp
+
+ # check that pps.toml for account has been updated
+ # and all ems position msgs match that state.
+ with load_and_check_pos(
+ last_order,
+ ppmsg,
+ ) as pos:
+ pass
+
+ return ppmsg, pos
+
+
+@pytest.mark.parametrize(
+ 'fills',
+ [
+ # buy and leave
+ ({'size': 0.001},),
+
+ # sell short, then buy back to net-zero in dst
+ (
+ {'size': -0.001},
+ {'size': 0.001},
+ ),
+
+ # multi-partial entry and exits from net-zero, to short and back
+ # to net-zero.
+ (
+ # enters
+ {'size': 0.001},
+ {'size': 0.002},
+
+ # partial exit
+ {'size': -0.001},
+
+ # partial enter
+ {'size': 0.0015},
+ {'size': 0.001},
+ {'size': 0.002},
+
+ # nearly back to zero.
+ {'size': -0.001},
+
+ # switch to net-short
+ {'size': -0.025},
+ {'size': -0.0195},
+
+ # another entry
+ {'size': 0.001},
+
+ # final cover to net-zero again.
+ {'size': 0.038},
+ ),
+ ],
+ ids='fills={}'.format,
+)
+def test_multi_fill_positions(
+ open_test_pikerd: AsyncContextManager,
+ loglevel: str,
+
+ fills: tuple[dict],
+
+ check_cross_session: bool = False,
+
+) -> None:
+
+ ppmsg: BrokerdPosition
+ pos: Position
+
+ accum_size: float = 0
+ for fill in fills:
+ accum_size += fill['size']
+
+ async def atest():
+
+ # export to outer scope for audit on second runtime-boot.
+ nonlocal ppmsg, pos
+
+ async with (
+ open_test_pikerd() as (_, _, _, _),
+ ):
+ ppmsg, pos = await submit_and_check(
+ fills=fills,
+ loglevel=loglevel,
+ )
+ assert ppmsg.size == accum_size
+
+ run_and_tollerate_cancels(atest)
+
+ if (
+ check_cross_session
+ or accum_size != 0
+ ):
+ # rerun just to check that position info is persistent for the paper
+ # account (i.e. a user can expect to see paper pps persist across
+ # runtime sessions.
+ async def just_check_pp():
+ nonlocal ppmsg
+
+ async with (
+ open_test_pikerd() as (_, _, _, _),
+ ):
+ await match_ppmsgs_on_ems_boot([ppmsg])
+
+ run_and_tollerate_cancels(just_check_pp)
+
+
+# TODO: still need to implement offline storage of darks/alerts/paper
+# lives probably all the same way.. see
+# https://github.com/pikers/piker/issues/463
+def test_open_orders_reloaded(
+ open_test_pikerd: AsyncContextManager,
+ loglevel: str,
+
+ # fills: tuple[dict],
+
+ check_cross_session: bool = False,
+):
+ ...
+
+
+def test_dark_order_clearing():
+ ...
diff --git a/tests/test_feeds.py b/tests/test_feeds.py
index a79ca861..07d368fa 100644
--- a/tests/test_feeds.py
+++ b/tests/test_feeds.py
@@ -7,19 +7,20 @@ from pprint import pprint
from typing import AsyncContextManager
import pytest
-# import tractor
import trio
+
from piker.data import (
ShmArray,
open_feed,
)
-from piker.data._source import (
- unpack_fqsn,
+from piker.data.flows import Flume
+from piker.accounting import (
+ unpack_fqme,
)
@pytest.mark.parametrize(
- 'fqsns',
+ 'fqmes',
[
# binance
(100, {'btcusdt.binance', 'ethusdt.binance'}, False),
@@ -30,20 +31,20 @@ from piker.data._source import (
# binance + kraken
(100, {'btcusdt.binance', 'xbtusd.kraken'}, False),
],
- ids=lambda param: f'quotes={param[0]}@fqsns={param[1]}',
+ ids=lambda param: f'quotes={param[0]}@fqmes={param[1]}',
)
def test_multi_fqsn_feed(
open_test_pikerd: AsyncContextManager,
- fqsns: set[str],
+ fqmes: set[str],
loglevel: str,
- ci_env: bool
+ ci_env: bool,
):
'''
- Start a real-time data feed for provided fqsn and pull
+ Start a real-time data feed for provided fqme and pull
a few quotes then simply shut down.
'''
- max_quotes, fqsns, run_in_ci = fqsns
+ max_quotes, fqmes, run_in_ci = fqmes
if (
ci_env
@@ -52,15 +53,15 @@ def test_multi_fqsn_feed(
pytest.skip('Skipping CI disabled test due to feed restrictions')
brokers = set()
- for fqsn in fqsns:
- brokername, key, suffix = unpack_fqsn(fqsn)
+ for fqme in fqmes:
+ brokername, *_ = unpack_fqme(fqme)
brokers.add(brokername)
async def main():
async with (
open_test_pikerd(),
open_feed(
- fqsns,
+ fqmes,
loglevel=loglevel,
# TODO: ensure throttle rate is applied
@@ -71,20 +72,20 @@ def test_multi_fqsn_feed(
) as feed
):
# verify shm buffers exist
- for fqin in fqsns:
+ for fqin in fqmes:
flume = feed.flumes[fqin]
ohlcv: ShmArray = flume.rt_shm
hist_ohlcv: ShmArray = flume.hist_shm
async with feed.open_multi_stream(brokers) as stream:
- # pull the first startup quotes, one for each fqsn, and
+ # pull the first startup quotes, one for each fqme, and
# ensure they match each flume's startup quote value.
- fqsns_copy = fqsns.copy()
+ fqsns_copy = fqmes.copy()
with trio.fail_after(0.5):
for _ in range(1):
first_quotes = await stream.receive()
- for fqsn, quote in first_quotes.items():
+ for fqme, quote in first_quotes.items():
# XXX: TODO: WTF apparently this error will get
# supressed and only show up in the teardown
@@ -92,18 +93,17 @@ def test_multi_fqsn_feed(
#
# assert 0
- fqsns_copy.remove(fqsn)
- flume = feed.flumes[fqsn]
+ fqsns_copy.remove(fqme)
+ flume: Flume = feed.flumes[fqme]
assert quote['last'] == flume.first_quote['last']
cntr = Counter()
with trio.fail_after(6):
async for quotes in stream:
- for fqsn, quote in quotes.items():
- cntr[fqsn] += 1
+ for fqme, quote in quotes.items():
+ cntr[fqme] += 1
- # await tractor.breakpoint()
- flume = feed.flumes[fqsn]
+ flume = feed.flumes[fqme]
ohlcv: ShmArray = flume.rt_shm
hist_ohlcv: ShmArray = flume.hist_shm
@@ -116,7 +116,7 @@ def test_multi_fqsn_feed(
# assert last == rt_row['close']
# assert last == hist_row['close']
pprint(
- f'{fqsn}: {quote}\n'
+ f'{fqme}: {quote}\n'
f'rt_ohlc: {rt_row}\n'
f'hist_ohlc: {hist_row}\n'
)
@@ -124,6 +124,6 @@ def test_multi_fqsn_feed(
if cntr.total() >= max_quotes:
break
- assert set(cntr.keys()) == fqsns
+ assert set(cntr.keys()) == fqmes
trio.run(main)
diff --git a/tests/test_paper.py b/tests/test_paper.py
deleted file mode 100644
index 53e03f47..00000000
--- a/tests/test_paper.py
+++ /dev/null
@@ -1,230 +0,0 @@
-'''
-Paper-mode testing
-'''
-
-import trio
-from exceptiongroup import BaseExceptionGroup
-from typing import (
- AsyncContextManager,
- Literal,
-)
-
-import pytest
-from tractor._exceptions import ContextCancelled
-from uuid import uuid4
-from functools import partial
-
-from piker.log import get_logger
-from piker.clearing._messages import Order
-from piker.pp import (
- open_pps,
-)
-
-log = get_logger(__name__)
-
-
-def get_fqsn(broker, symbol):
- fqsn = f'{symbol}.{broker}'
- return (fqsn, symbol, broker)
-
-
-oid = ''
-test_exec_mode = 'live'
-(fqsn, symbol, broker) = get_fqsn('kraken', 'xbtusdt')
-brokers = [broker]
-account = 'paper'
-
-
-async def _async_main(
- open_test_pikerd_and_ems: AsyncContextManager,
- action: Literal['buy', 'sell'] | None = None,
- price: int = 30000,
- executions: int = 1,
- size: float = 0.01,
-
- # Assert options
- assert_entries: bool = False,
- assert_pps: bool = False,
- assert_zeroed_pps: bool = False,
- assert_msg: bool = False,
-
-) -> None:
- '''
- Start piker, place a trade and assert data in
- pps stream, ledger and position table.
-
- '''
- oid: str = ''
- last_msg = {}
-
- async with open_test_pikerd_and_ems() as (
- services,
- (book, trades_stream, pps, accounts, dialogs),
- ):
- if action:
- for x in range(executions):
- oid = str(uuid4())
- order = Order(
- exec_mode=test_exec_mode,
- action=action,
- oid=oid,
- account=account,
- size=size,
- symbol=fqsn,
- price=price,
- brokers=brokers,
- )
- # This is actually a syncronous call to push a message
- book.send(order)
-
- async for msg in trades_stream:
- last_msg = msg
- match msg:
- # Wait for position message before moving on
- case {'name': 'position'}:
- break
-
- # Teardown piker like a user would
- raise KeyboardInterrupt
-
- if assert_entries or assert_pps or assert_zeroed_pps or assert_msg:
- _assert(
- assert_entries,
- assert_pps,
- assert_zeroed_pps,
- pps,
- last_msg,
- size,
- executions,
- )
-
-
-def _assert(
- assert_entries,
- assert_pps,
- assert_zerod_pps,
- pps,
- last_msg,
- size,
- executions,
-):
- with (
- open_pps(broker, account, write_on_exit=False) as table,
- ):
- '''
- Assert multiple cases including pps,
- ledger and final position message state
-
- '''
- if assert_entries:
- for key, val in [
- ('broker', broker),
- ('account', account),
- ('symbol', fqsn),
- ('size', size * executions),
- ('currency', symbol),
- ('avg_price', table.pps[symbol].ppu)
- ]:
- assert last_msg[key] == val
-
- if assert_pps:
- last_ppu = pps[(broker, account)][-1]
- assert last_ppu['avg_price'] == table.pps[symbol].ppu
-
- if assert_zerod_pps:
- assert not bool(table.pps)
-
-
-def _run_test_and_check(fn):
- '''
- Close position and assert empty position in pps
-
- '''
- with pytest.raises(BaseExceptionGroup) as exc_info:
- trio.run(fn)
-
- for exception in exc_info.value.exceptions:
- assert isinstance(exception, KeyboardInterrupt) or isinstance(
- exception, ContextCancelled
- )
-
-
-def test_buy(
- open_test_pikerd_and_ems: AsyncContextManager,
-):
- '''
- Enter a trade and assert entries are made in pps and ledger files.
-
- '''
- _run_test_and_check(
- partial(
- _async_main,
- open_test_pikerd_and_ems=open_test_pikerd_and_ems,
- action='buy',
- assert_entries=True,
- ),
- )
-
- # Open ems and assert existence of pps entries
- _run_test_and_check(
- partial(
- _async_main,
- open_test_pikerd_and_ems=open_test_pikerd_and_ems,
- assert_pps=True,
- ),
- )
-
-
-def test_sell(
- open_test_pikerd_and_ems: AsyncContextManager,
-):
- '''
- Sell position and ensure pps are zeroed.
-
- '''
- _run_test_and_check(
- partial(
- _async_main,
- open_test_pikerd_and_ems=open_test_pikerd_and_ems,
- action='sell',
- price=1,
- ),
- )
-
- _run_test_and_check(
- partial(
- _async_main,
- open_test_pikerd_and_ems=open_test_pikerd_and_ems,
- assert_zeroed_pps=True,
- ),
- )
-
-
-def test_multi_sell(
- open_test_pikerd_and_ems: AsyncContextManager,
-):
- '''
- Make 5 market limit buy orders and
- then sell 5 slots at the same price.
- Finally, assert cleared positions.
-
- '''
- _run_test_and_check(
- partial(
- _async_main,
- open_test_pikerd_and_ems=open_test_pikerd_and_ems,
- action='buy',
- executions=5,
- ),
- )
-
- _run_test_and_check(
- partial(
- _async_main,
- open_test_pikerd_and_ems=open_test_pikerd_and_ems,
- action='sell',
- executions=5,
- price=1,
- assert_zeroed_pps=True,
- ),
- )
diff --git a/tests/test_services.py b/tests/test_services.py
index 29e613e3..433e97f3 100644
--- a/tests/test_services.py
+++ b/tests/test_services.py
@@ -2,9 +2,13 @@
Actor tree daemon sub-service verifications
'''
-from typing import AsyncContextManager
+from typing import (
+ AsyncContextManager,
+ Callable,
+)
from contextlib import asynccontextmanager as acm
+from exceptiongroup import BaseExceptionGroup
import pytest
import trio
import tractor
@@ -24,7 +28,7 @@ from piker.clearing._messages import (
Status,
)
from piker.clearing._client import (
- OrderBook,
+ OrderClient,
)
@@ -33,8 +37,8 @@ def test_runtime_boot(
):
'''
Verify we can boot the `pikerd` service stack using the
- `open_test_pikerd` fixture helper and that registry address details
- match up.
+ `open_test_pikerd()` fixture helper and that contact-registry
+ address details match up.
'''
async def main():
@@ -55,6 +59,46 @@ def test_runtime_boot(
assert pikerd_portal.channel.raddr == daemon_addr
assert pikerd_portal.channel.raddr == portal.channel.raddr
+ # no service tasks should be started
+ assert not services.service_tasks
+
+ trio.run(main)
+
+
+def test_ensure_datafeed_actors(
+ open_test_pikerd: AsyncContextManager,
+ loglevel: str,
+ # cancel_method: str,
+
+) -> None:
+ '''
+ Verify that booting a data feed starts a `brokerd`
+ actor and a singleton global `samplerd` and opening
+ an order mode in paper opens the `paperboi` service.
+
+ '''
+ actor_name: str = 'brokerd'
+ backend: str = 'kraken'
+ brokerd_name: str = f'{actor_name}.{backend}'
+
+ async def main():
+ async with (
+ open_test_pikerd(),
+
+ open_feed(
+ ['xbtusdt.kraken'],
+ loglevel=loglevel,
+ ) as feed
+ ):
+ # halt rt quote streams since we aren't testing them
+ await feed.pause()
+
+ async with (
+ ensure_service(brokerd_name),
+ ensure_service('samplerd'),
+ ):
+ await trio.sleep(0.1)
+
trio.run(main)
@@ -73,45 +117,69 @@ async def ensure_service(
yield portal
-def test_ensure_datafeed_actors(
- open_test_pikerd: AsyncContextManager,
- loglevel: str,
+def run_test_w_cancel_method(
+ cancel_method: str,
+ main: Callable,
) -> None:
'''
- Verify that booting a data feed starts a `brokerd`
- actor and a singleton global `samplerd` and opening
- an order mode in paper opens the `paperboi` service.
+ Run our runtime under trio and expect a certain type of cancel condition
+ depending on input.
'''
- actor_name: str = 'brokerd'
- backend: str = 'kraken'
- brokerd_name: str = f'{actor_name}.{backend}'
+ cancelled_msg: str = (
+ "was remotely cancelled by remote actor (\'pikerd\'")
- async def main():
- async with (
- open_test_pikerd(),
- open_feed(
- ['xbtusdt.kraken'],
- loglevel=loglevel,
- ) as feed
- ):
- # halt rt quote streams since we aren't testing them
- await feed.pause()
+ if cancel_method == 'sigint':
+ with pytest.raises(
+ BaseExceptionGroup,
+ ) as exc_info:
+ trio.run(main)
- async with (
- ensure_service(brokerd_name),
- ensure_service('samplerd'),
- ):
- pass
+ multi = exc_info.value
- trio.run(main)
+ for suberr in multi.exceptions:
+ match suberr:
+ # ensure we receive a remote cancellation error caused
+ # by the pikerd root actor since we used the
+ # `.cancel_service()` API above B)
+ case tractor.ContextCancelled():
+ assert cancelled_msg in suberr.args[0]
+
+ case KeyboardInterrupt():
+ pass
+
+ case _:
+ pytest.fail(f'Unexpected error {suberr}')
+
+ elif cancel_method == 'services':
+
+ # XXX NOTE: oddly, when you pass --pdb to pytest, i think since
+ # we also use that to enable the underlying tractor debug mode,
+ # it causes this to not raise for some reason? So if you see
+ # that while changing this test.. it's prolly that.
+
+ with pytest.raises(
+ tractor.ContextCancelled
+ ) as exc_info:
+ trio.run(main)
+
+ assert cancelled_msg in exc_info.value.args[0]
+
+ else:
+ pytest.fail(f'Test is broken due to {cancel_method}')
+@pytest.mark.parametrize(
+ 'cancel_method',
+ ['services', 'sigint'],
+)
def test_ensure_ems_in_paper_actors(
open_test_pikerd: AsyncContextManager,
loglevel: str,
+ cancel_method: str,
+
) -> None:
actor_name: str = 'brokerd'
@@ -121,8 +189,7 @@ def test_ensure_ems_in_paper_actors(
async def main():
# type declares
- book: OrderBook
- trades_stream: tractor.MsgStream
+ client: OrderClient
pps: dict[str, list[BrokerdPosition]]
accounts: list[str]
dialogs: dict[str, Status]
@@ -139,8 +206,8 @@ def test_ensure_ems_in_paper_actors(
mode='paper',
loglevel=loglevel,
) as (
- book,
- trades_stream,
+ client,
+ _, # trades_stream: tractor.MsgStream
pps,
accounts,
dialogs,
@@ -151,6 +218,9 @@ def test_ensure_ems_in_paper_actors(
# local ledger and `pps.toml` state ;)
assert not pps
assert not dialogs
+ # XXX: should be new client with no state from other tests
+ assert not client._sent_orders
+ assert accounts
pikerd_subservices = ['emsd', 'samplerd']
@@ -166,13 +236,13 @@ def test_ensure_ems_in_paper_actors(
# implicitly by the ems.
assert brokerd_name in services.service_tasks
- print('ALL SERVICES STARTED, terminating..')
- await services.cancel_service('emsd')
+ print('ALL SERVICES STARTED, cancelling runtime with:\n'
+ f'-> {cancel_method}')
- with pytest.raises(
- tractor._exceptions.ContextCancelled,
- ) as exc_info:
- trio.run(main)
+ if cancel_method == 'services':
+ await services.cancel_service('emsd')
- cancel_msg: str = '_emsd_main()` was remotely cancelled by its caller'
- assert cancel_msg in exc_info.value.args[0]
+ elif cancel_method == 'sigint':
+ raise KeyboardInterrupt
+
+ run_test_w_cancel_method(cancel_method, main)