Compare commits
	
		
			77 Commits 
		
	
	
		
			gitea_feat
			...
			fix_deribi
		
	
	| Author | SHA1 | Date | 
|---|---|---|
|  | 3e5e704c2f | |
|  | 767d25a9b9 | |
|  | b4d3bcf240 | |
|  | 5cefe8bcdb | |
|  | d96e9d4f11 | |
|  | a0dcf14aba | |
|  | 1705afb607 | |
|  | dafd5a3ca5 | |
|  | b9dde98d1e | |
|  | 1616cc0e82 | |
|  | 0a2ed195a7 | |
|  | 28e8628c61 | |
|  | b734245183 | |
|  | dc2c379d86 | |
|  | be84d0dae1 | |
|  | bdc3bc9219 | |
|  | 9232d09440 | |
|  | f96bd51442 | |
|  | 6555ccfbba | |
|  | 75d1d007fb | |
|  | 2bdbe0f20e | |
|  | a117177759 | |
|  | 30060a83c9 | |
|  | 156a35b606 | |
|  | 89e241c132 | |
|  | df8d1274ae | |
|  | 0916b707e2 | |
|  | 45788b0b53 | |
|  | 38a1f0b9ee | |
|  | f291654dbe | |
|  | e9fa422916 | |
|  | 5304a36b87 | |
|  | 089c79e905 | |
|  | d848050b52 | |
|  | ddffe2bec6 | |
|  | 19b4ca9d85 | |
|  | f037f851d8 | |
|  | a3ab8dd8fe | |
|  | 6fa0d4bcf3 | |
|  | a4f7fa9c1a | |
|  | 266ecf6206 | |
|  | ea6126d310 | |
|  | 1f4a5b80c4 | |
|  | ac6f52088a | |
|  | 960298514c | |
|  | 71f3a0a4cd | |
|  | b25a7699ab | |
|  | b39affc96e | |
|  | be8629929b | |
|  | 4776be6736 | |
|  | 008e68174b | |
|  | b4a9b86783 | |
|  | d3ca571c0e | |
|  | b3bbef30c0 | |
|  | 499b2d0090 | |
|  | 8b0f1e7045 | |
|  | b2cfa3444f | |
|  | 0be454c3d6 | |
|  | de6189da4d | |
|  | cc5b21a7e6 | |
|  | 35a9d8ec9d | |
|  | a831212c86 | |
|  | e987d7d7c4 | |
|  | 5ec756234a | |
|  | b577180773 | |
|  | f12c452d96 | |
|  | 3531c2edc1 | |
|  | 97dd7e766a | |
|  | ab1463d942 | |
|  | 5314cb79d4 | |
|  | 0c0b7116e3 | |
|  | 19c343e8b2 | |
|  | b7883325a9 | |
|  | 37ca081555 | |
|  | 44b8c70521 | |
|  | e6af97c596 | |
|  | 95ace5acb8 | 
|  | @ -0,0 +1,82 @@ | |||
| with (import <nixpkgs> {}); | ||||
| with python312Packages; | ||||
| let | ||||
|   glibStorePath = lib.getLib glib; | ||||
|   qtpyStorePath = lib.getLib qtpy; | ||||
|   pyqt6StorePath = lib.getLib pyqt6; | ||||
|   pyqt6SipStorePath = lib.getLib pyqt6-sip; | ||||
|   qt6baseStorePath = lib.getLib qt6.qtbase; | ||||
|   rapidfuzzStorePath = lib.getLib rapidfuzz; | ||||
|   qdarkstyleStorePath = lib.getLib qdarkstyle; | ||||
| in | ||||
| stdenv.mkDerivation { | ||||
|   name = "piker-qt6-poetry-shell"; | ||||
|   buildInputs = [ | ||||
|     # System requirements. | ||||
|     glib | ||||
|     qt6.qtbase | ||||
|     libgcc.lib | ||||
| 
 | ||||
|     # Python requirements. | ||||
|     python312Full | ||||
|     poetry-core | ||||
|     qdarkstyle | ||||
|     rapidfuzz | ||||
|     pyqt6 | ||||
|     qtpy | ||||
|   ]; | ||||
|   src = null; | ||||
|   shellHook = '' | ||||
|     set -e | ||||
| 
 | ||||
|     export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${libgcc.lib}/lib:${glibStorePath}/lib | ||||
| 
 | ||||
|     # Set the Qt plugin path | ||||
|     # export QT_DEBUG_PLUGINS=1 | ||||
| 
 | ||||
|     QTBASE_PATH="${qt6baseStorePath}" | ||||
|     echo "qtbase path:    $QTBASE_PATH" | ||||
|     echo "" | ||||
|     export QT_PLUGIN_PATH="$QTBASE_PATH/lib/qt-6/plugins" | ||||
|     export QT_QPA_PLATFORM_PLUGIN_PATH="$QT_PLUGIN_PATH/platforms" | ||||
|     echo "qt plugin path: $QT_PLUGIN_PATH" | ||||
|     echo "" | ||||
| 
 | ||||
|     # Maybe create venv & install deps | ||||
|     poetry install --with uis | ||||
| 
 | ||||
|     # Use pyqt6 from System, patch activate script | ||||
|     ACTIVATE_SCRIPT_PATH="$(poetry env info --path)/bin/activate" | ||||
| 
 | ||||
|     export RPDFUZZ_PATH="${rapidfuzzStorePath}/lib/python3.12/site-packages" | ||||
|     export QDRKSTYLE_PATH="${qdarkstyleStorePath}/lib/python3.12/site-packages" | ||||
|     export QTPY_PATH="${qtpyStorePath}/lib/python3.12/site-packages" | ||||
|     export PYQT6_PATH="${pyqt6StorePath}/lib/python3.12/site-packages" | ||||
|     export PYQT6_SIP_PATH="${pyqt6SipStorePath}/lib/python3.12/site-packages" | ||||
|     echo "rapidfuzz at:   $RPDFUZZ_PATH" | ||||
|     echo "qdarkstyle at:  $QDRKSTYLE_PATH" | ||||
|     echo "qtpy at:        $QTPY_PATH"  | ||||
|     echo "pyqt6 at:       $PYQT6_PATH" | ||||
|     echo "pyqt6-sip at:   $PYQT6_SIP_PATH" | ||||
|     echo "" | ||||
| 
 | ||||
|     PATCH="export PYTHONPATH=\"" | ||||
| 
 | ||||
|     PATCH="$PATCH\$RPDFUZZ_PATH" | ||||
|     PATCH="$PATCH:\$QDRKSTYLE_PATH" | ||||
|     PATCH="$PATCH:\$QTPY_PATH" | ||||
|     PATCH="$PATCH:\$PYQT6_PATH" | ||||
|     PATCH="$PATCH:\$PYQT6_SIP_PATH" | ||||
| 
 | ||||
|     PATCH="$PATCH\"" | ||||
| 
 | ||||
|     if grep -q "$PATCH" "$ACTIVATE_SCRIPT_PATH"; then | ||||
|         echo "venv is already patched." | ||||
|     else | ||||
|         echo "patching $ACTIVATE_SCRIPT_PATH to use pyqt6 from nixos..." | ||||
|         sed -i "\$i$PATCH" $ACTIVATE_SCRIPT_PATH | ||||
|     fi | ||||
| 
 | ||||
|     poetry shell | ||||
|   ''; | ||||
| } | ||||
|  | @ -50,7 +50,8 @@ __brokers__: list[str] = [ | |||
|     'binance', | ||||
|     'ib', | ||||
|     'kraken', | ||||
|     'kucoin' | ||||
|     'kucoin', | ||||
|     'deribit', | ||||
| 
 | ||||
|     # broken but used to work | ||||
|     # 'questrade', | ||||
|  | @ -61,7 +62,6 @@ __brokers__: list[str] = [ | |||
|     # wstrade | ||||
|     # iex | ||||
| 
 | ||||
|     # deribit | ||||
|     # bitso | ||||
| ] | ||||
| 
 | ||||
|  | @ -71,7 +71,7 @@ def get_brokermod(brokername: str) -> ModuleType: | |||
|     Return the imported broker module by name. | ||||
| 
 | ||||
|     ''' | ||||
|     module = import_module('.' + brokername, 'piker.brokers') | ||||
|     module: ModuleType = import_module('.' + brokername, 'piker.brokers') | ||||
|     # we only allow monkeying because it's for internal keying | ||||
|     module.name = module.__name__.split('.')[-1] | ||||
|     return module | ||||
|  |  | |||
|  | @ -23,6 +23,7 @@ from __future__ import annotations | |||
| from contextlib import ( | ||||
|     asynccontextmanager as acm, | ||||
| ) | ||||
| from functools import partial | ||||
| from types import ModuleType | ||||
| from typing import ( | ||||
|     TYPE_CHECKING, | ||||
|  | @ -60,12 +61,13 @@ async def _setup_persistent_brokerd( | |||
|     ctx: tractor.Context, | ||||
|     brokername: str, | ||||
|     loglevel: str | None = None, | ||||
|     debug_mode: bool = False, | ||||
| 
 | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Allocate a actor-wide service nursery in ``brokerd`` | ||||
|     such that feeds can be run in the background persistently by | ||||
|     the broker backend as needed. | ||||
|     Allocate a actor-wide service nursery in `brokerd` such that | ||||
|     feeds can be run in the background persistently by the broker | ||||
|     backend as needed. | ||||
| 
 | ||||
|     ''' | ||||
|     # NOTE: we only need to setup logging once (and only) here | ||||
|  | @ -86,6 +88,18 @@ async def _setup_persistent_brokerd( | |||
|     from piker.data import feed | ||||
|     assert not feed._bus | ||||
| 
 | ||||
|     if ( | ||||
|         debug_mode | ||||
|         and | ||||
|         tractor.current_actor().is_infected_aio() | ||||
|     ): | ||||
|         # NOTE, whenever running `asyncio` in provider's actor | ||||
|         # runtime be sure we enabled `breakpoint()` support | ||||
|         # for non-`trio.Task` usage. | ||||
|         from tractor.devx._debug import maybe_init_greenback | ||||
|         await maybe_init_greenback() | ||||
|         # breakpoint()  # XXX, SHOULD WORK from `trio.Task`! | ||||
| 
 | ||||
|     # allocate a nursery to the bus for spawning background | ||||
|     # tasks to service client IPC requests, normally | ||||
|     # `tractor.Context` connections to explicitly required | ||||
|  | @ -145,18 +159,21 @@ def broker_init( | |||
|       above. | ||||
| 
 | ||||
|     ''' | ||||
|     from ..brokers import get_brokermod | ||||
|     brokermod = get_brokermod(brokername) | ||||
|     brokermod: ModuleType = get_brokermod(brokername) | ||||
|     modpath: str = brokermod.__name__ | ||||
| 
 | ||||
|     start_actor_kwargs['name'] = f'brokerd.{brokername}' | ||||
|     start_actor_kwargs.update( | ||||
|         getattr( | ||||
|             brokermod, | ||||
|             '_spawn_kwargs', | ||||
|             {}, | ||||
|         ) | ||||
|     spawn_kws: dict = getattr( | ||||
|         brokermod, | ||||
|         '_spawn_kwargs', | ||||
|         {}, | ||||
|     ) | ||||
|     # ^^ NOTE, here we pull any runtime parameters specific | ||||
|     # to spawning the sub-actor for the backend. For ex. | ||||
|     # both `ib` and `deribit` rely on, | ||||
|     #  `'infect_asyncio': True,` since they both | ||||
|     #  use `tractor`'s "infected `asyncio` mode" | ||||
|     #  for their libs but you could also do something like | ||||
|     #  `'debug_mode: True` which would be like passing | ||||
|     #  `--pdb` for just that provider backend. | ||||
| 
 | ||||
|     # XXX TODO: make this not so hacky/monkeypatched.. | ||||
|     # -> we need a sane way to configure the logging level for all | ||||
|  | @ -166,8 +183,7 @@ def broker_init( | |||
| 
 | ||||
|     # lookup actor-enabled modules declared by the backend offering the | ||||
|     # `brokerd` endpoint(s). | ||||
|     enabled: list[str] | ||||
|     enabled = start_actor_kwargs['enable_modules'] = [ | ||||
|     enabled: list[str] = [ | ||||
|         __name__,  # so that eps from THIS mod can be invoked | ||||
|         modpath, | ||||
|     ] | ||||
|  | @ -179,9 +195,13 @@ def broker_init( | |||
|         subpath: str = f'{modpath}.{submodname}' | ||||
|         enabled.append(subpath) | ||||
| 
 | ||||
|     datad_kwargs: dict = { | ||||
|         'name': f'brokerd.{brokername}', | ||||
|         'enable_modules': enabled, | ||||
|     } | ||||
|     return ( | ||||
|         brokermod, | ||||
|         start_actor_kwargs,  # to `ActorNursery.start_actor()` | ||||
|         start_actor_kwargs | datad_kwargs | spawn_kws,  # to `ActorNursery.start_actor()` | ||||
| 
 | ||||
|         # XXX see impl above; contains all (actor global) | ||||
|         # setup/teardown expected in all `brokerd` actor instances. | ||||
|  | @ -190,14 +210,17 @@ def broker_init( | |||
| 
 | ||||
| 
 | ||||
| async def spawn_brokerd( | ||||
| 
 | ||||
|     brokername: str, | ||||
|     loglevel: str | None = None, | ||||
| 
 | ||||
|     **tractor_kwargs, | ||||
| 
 | ||||
| ) -> bool: | ||||
|     ''' | ||||
|     Spawn a `brokerd.<backendname>` subactor service daemon | ||||
|     using `pikerd`'s service mngr. | ||||
| 
 | ||||
|     ''' | ||||
|     from piker.service._util import log  # use service mngr log | ||||
|     log.info(f'Spawning {brokername} broker daemon') | ||||
| 
 | ||||
|  | @ -211,33 +234,41 @@ async def spawn_brokerd( | |||
|         **tractor_kwargs, | ||||
|     ) | ||||
| 
 | ||||
|     brokermod = get_brokermod(brokername) | ||||
|     extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {}) | ||||
|     tractor_kwargs.update(extra_tractor_kwargs) | ||||
| 
 | ||||
|     # ask `pikerd` to spawn a new sub-actor and manage it under its | ||||
|     # actor nursery | ||||
|     from piker.service import Services | ||||
| 
 | ||||
|     from piker.service import ( | ||||
|         get_service_mngr, | ||||
|         ServiceMngr, | ||||
|     ) | ||||
|     dname: str = tractor_kwargs.pop('name')  # f'brokerd.{brokername}' | ||||
|     portal = await Services.actor_n.start_actor( | ||||
|         dname, | ||||
|         enable_modules=_data_mods + tractor_kwargs.pop('enable_modules'), | ||||
|         debug_mode=Services.debug_mode, | ||||
|     mngr: ServiceMngr = get_service_mngr() | ||||
|     ctx: tractor.Context = await mngr.start_service( | ||||
|         daemon_name=dname, | ||||
|         ctx_ep=partial( | ||||
|             # signature of target root-task endpoint | ||||
|             daemon_fixture_ep, | ||||
| 
 | ||||
|             # passed to daemon_fixture_ep(**kwargs) | ||||
|             brokername=brokername, | ||||
|             loglevel=loglevel, | ||||
|             debug_mode=mngr.debug_mode, | ||||
|         ), | ||||
|         debug_mode=mngr.debug_mode, | ||||
|         # ^TODO, allow overriding this per-daemon from client side? | ||||
|         # |_ it's already supported in `tractor` so.. | ||||
| 
 | ||||
|         loglevel=loglevel, | ||||
|         enable_modules=( | ||||
|             _data_mods | ||||
|             + | ||||
|             tractor_kwargs.pop('enable_modules') | ||||
|         ), | ||||
|         **tractor_kwargs | ||||
|     ) | ||||
| 
 | ||||
|     # NOTE: the service mngr expects an already spawned actor + its | ||||
|     # portal ref in order to do non-blocking setup of brokerd | ||||
|     # service nursery. | ||||
|     await Services.start_service_task( | ||||
|         dname, | ||||
|         portal, | ||||
| 
 | ||||
|         # signature of target root-task endpoint | ||||
|         daemon_fixture_ep, | ||||
|         brokername=brokername, | ||||
|         loglevel=loglevel, | ||||
|     assert ( | ||||
|         not ctx.cancel_called | ||||
|         and ctx.portal  # parent side | ||||
|         and dname in ctx.chan.uid  # subactor is named as desired | ||||
|     ) | ||||
|     return True | ||||
| 
 | ||||
|  | @ -262,8 +293,7 @@ async def maybe_spawn_brokerd( | |||
|     from piker.service import maybe_spawn_daemon | ||||
| 
 | ||||
|     async with maybe_spawn_daemon( | ||||
| 
 | ||||
|         f'brokerd.{brokername}', | ||||
|         service_name=f'brokerd.{brokername}', | ||||
|         service_task_target=spawn_brokerd, | ||||
|         spawn_args={ | ||||
|             'brokername': brokername, | ||||
|  |  | |||
|  | @ -18,10 +18,11 @@ | |||
| Handy cross-broker utils. | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| from functools import partial | ||||
| 
 | ||||
| import json | ||||
| import asks | ||||
| import httpx | ||||
| import logging | ||||
| 
 | ||||
| from ..log import ( | ||||
|  | @ -60,11 +61,11 @@ class NoData(BrokerError): | |||
|     def __init__( | ||||
|         self, | ||||
|         *args, | ||||
|         info: dict, | ||||
|         info: dict|None = None, | ||||
| 
 | ||||
|     ) -> None: | ||||
|         super().__init__(*args) | ||||
|         self.info: dict = info | ||||
|         self.info: dict|None = info | ||||
| 
 | ||||
|         # when raised, machinery can check if the backend | ||||
|         # set a "frame size" for doing datetime calcs. | ||||
|  | @ -90,16 +91,18 @@ class DataThrottle(BrokerError): | |||
| 
 | ||||
| 
 | ||||
| def resproc( | ||||
|     resp: asks.response_objects.Response, | ||||
|     resp: httpx.Response, | ||||
|     log: logging.Logger, | ||||
|     return_json: bool = True, | ||||
|     log_resp: bool = False, | ||||
| 
 | ||||
| ) -> asks.response_objects.Response: | ||||
|     """Process response and return its json content. | ||||
| ) -> httpx.Response: | ||||
|     ''' | ||||
|     Process response and return its json content. | ||||
| 
 | ||||
|     Raise the appropriate error on non-200 OK responses. | ||||
|     """ | ||||
| 
 | ||||
|     ''' | ||||
|     if not resp.status_code == 200: | ||||
|         raise BrokerError(resp.body) | ||||
|     try: | ||||
|  |  | |||
|  | @ -1,8 +1,8 @@ | |||
| # piker: trading gear for hackers | ||||
| # Copyright (C) | ||||
| #   Guillermo Rodriguez (aka ze jefe) | ||||
| #   Tyler Goodlet | ||||
| #   (in stewardship for pikers) | ||||
| #  Guillermo Rodriguez (aka ze jefe) | ||||
| #  Tyler Goodlet | ||||
| #  (in stewardship for pikers) | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
|  | @ -25,14 +25,13 @@ from __future__ import annotations | |||
| from collections import ChainMap | ||||
| from contextlib import ( | ||||
|     asynccontextmanager as acm, | ||||
|     AsyncExitStack, | ||||
| ) | ||||
| from datetime import datetime | ||||
| from pprint import pformat | ||||
| from typing import ( | ||||
|     Any, | ||||
|     Callable, | ||||
|     Hashable, | ||||
|     Sequence, | ||||
|     Type, | ||||
| ) | ||||
| import hmac | ||||
|  | @ -43,8 +42,7 @@ import trio | |||
| from pendulum import ( | ||||
|     now, | ||||
| ) | ||||
| import asks | ||||
| from rapidfuzz import process as fuzzy | ||||
| import httpx | ||||
| import numpy as np | ||||
| 
 | ||||
| from piker import config | ||||
|  | @ -54,6 +52,7 @@ from piker.clearing._messages import ( | |||
| from piker.accounting import ( | ||||
|     Asset, | ||||
|     digits_to_dec, | ||||
|     MktPair, | ||||
| ) | ||||
| from piker.types import Struct | ||||
| from piker.data import ( | ||||
|  | @ -69,7 +68,6 @@ from .venues import ( | |||
|     PAIRTYPES, | ||||
|     Pair, | ||||
|     MarketType, | ||||
| 
 | ||||
|     _spot_url, | ||||
|     _futes_url, | ||||
|     _testnet_futes_url, | ||||
|  | @ -79,19 +77,18 @@ from .venues import ( | |||
| log = get_logger('piker.brokers.binance') | ||||
| 
 | ||||
| 
 | ||||
| def get_config() -> dict: | ||||
| 
 | ||||
| def get_config() -> dict[str, Any]: | ||||
|     conf: dict | ||||
|     path: Path | ||||
|     conf, path = config.load( | ||||
|         conf_name='brokers', | ||||
|         touch_if_dne=True, | ||||
|     ) | ||||
| 
 | ||||
|     section = conf.get('binance') | ||||
| 
 | ||||
|     section: dict = conf.get('binance') | ||||
|     if not section: | ||||
|         log.warning(f'No config section found for binance in {path}') | ||||
|         log.warning( | ||||
|             f'No config section found for binance in {path}' | ||||
|         ) | ||||
|         return {} | ||||
| 
 | ||||
|     return section | ||||
|  | @ -147,7 +144,7 @@ def binance_timestamp( | |||
| 
 | ||||
| class Client: | ||||
|     ''' | ||||
|     Async ReST API client using ``trio`` + ``asks`` B) | ||||
|     Async ReST API client using `trio` + `httpx` B) | ||||
| 
 | ||||
|     Supports all of the spot, margin and futures endpoints depending | ||||
|     on method. | ||||
|  | @ -156,10 +153,17 @@ class Client: | |||
|     def __init__( | ||||
|         self, | ||||
| 
 | ||||
|         venue_sessions: dict[ | ||||
|             str,  # venue key | ||||
|             tuple[httpx.AsyncClient, str]  # session, eps path | ||||
|         ], | ||||
|         conf: dict[str, Any], | ||||
|         # TODO: change this to `Client.[mkt_]venue: MarketType`? | ||||
|         mkt_mode: MarketType = 'spot', | ||||
| 
 | ||||
|     ) -> None: | ||||
|         self.conf = conf | ||||
| 
 | ||||
|         # build out pair info tables for each market type | ||||
|         # and wrap in a chain-map view for search / query. | ||||
|         self._spot_pairs: dict[str, Pair] = {}  # spot info table | ||||
|  | @ -186,44 +190,13 @@ class Client: | |||
|         # market symbols for use by search. See `.exch_info()`. | ||||
|         self._pairs: ChainMap[str, Pair] = ChainMap() | ||||
| 
 | ||||
|         # spot EPs sesh | ||||
|         self._sesh = asks.Session(connections=4) | ||||
|         self._sesh.base_location: str = _spot_url | ||||
|         # spot testnet | ||||
|         self._test_sesh: asks.Session = asks.Session(connections=4) | ||||
|         self._test_sesh.base_location: str = _testnet_spot_url | ||||
| 
 | ||||
|         # margin and extended spot endpoints session. | ||||
|         self._sapi_sesh = asks.Session(connections=4) | ||||
|         self._sapi_sesh.base_location: str = _spot_url | ||||
| 
 | ||||
|         # futes EPs sesh | ||||
|         self._fapi_sesh = asks.Session(connections=4) | ||||
|         self._fapi_sesh.base_location: str = _futes_url | ||||
|         # futes testnet | ||||
|         self._test_fapi_sesh: asks.Session = asks.Session(connections=4) | ||||
|         self._test_fapi_sesh.base_location: str = _testnet_futes_url | ||||
| 
 | ||||
|         # global client "venue selection" mode. | ||||
|         # set this when you want to switch venues and not have to | ||||
|         # specify the venue for the next request. | ||||
|         self.mkt_mode: MarketType = mkt_mode | ||||
| 
 | ||||
|         # per 8 | ||||
|         self.venue_sesh: dict[ | ||||
|             str,  # venue key | ||||
|             tuple[asks.Session, str]  # session, eps path | ||||
|         ] = { | ||||
|             'spot': (self._sesh, '/api/v3/'), | ||||
|             'spot_testnet': (self._test_sesh, '/fapi/v1/'), | ||||
| 
 | ||||
|             'margin': (self._sapi_sesh, '/sapi/v1/'), | ||||
| 
 | ||||
|             'usdtm_futes': (self._fapi_sesh, '/fapi/v1/'), | ||||
|             'usdtm_futes_testnet': (self._test_fapi_sesh, '/fapi/v1/'), | ||||
| 
 | ||||
|             # 'futes_coin': self._dapi,  # TODO | ||||
|         } | ||||
|         # per-mkt-venue API client table | ||||
|         self.venue_sesh = venue_sessions | ||||
| 
 | ||||
|         # lookup for going from `.mkt_mode: str` to the config | ||||
|         # subsection `key: str` | ||||
|  | @ -238,40 +211,6 @@ class Client: | |||
|             'futes': ['usdtm_futes'], | ||||
|         } | ||||
| 
 | ||||
|         # for creating API keys see, | ||||
|         # https://www.binance.com/en/support/faq/how-to-create-api-keys-on-binance-360002502072 | ||||
|         self.conf: dict = get_config() | ||||
| 
 | ||||
|         for key, subconf in self.conf.items(): | ||||
|             if api_key := subconf.get('api_key', ''): | ||||
|                 venue_keys: list[str] = self.confkey2venuekeys[key] | ||||
| 
 | ||||
|                 venue_key: str | ||||
|                 sesh: asks.Session | ||||
|                 for venue_key in venue_keys: | ||||
|                     sesh, _ = self.venue_sesh[venue_key] | ||||
| 
 | ||||
|                     api_key_header: dict = { | ||||
|                         # taken from official: | ||||
|                         # https://github.com/binance/binance-futures-connector-python/blob/main/binance/api.py#L47 | ||||
|                         "Content-Type": "application/json;charset=utf-8", | ||||
| 
 | ||||
|                         # TODO: prolly should just always query and copy | ||||
|                         # in the real latest ver? | ||||
|                         "User-Agent": "binance-connector/6.1.6smbz6", | ||||
|                         "X-MBX-APIKEY": api_key, | ||||
|                     } | ||||
|                     sesh.headers.update(api_key_header) | ||||
| 
 | ||||
|                     # if `.use_tesnet = true` in the config then | ||||
|                     # also add headers for the testnet session which | ||||
|                     # will be used for all order control | ||||
|                     if subconf.get('use_testnet', False): | ||||
|                         testnet_sesh, _ = self.venue_sesh[ | ||||
|                             venue_key + '_testnet' | ||||
|                         ] | ||||
|                         testnet_sesh.headers.update(api_key_header) | ||||
| 
 | ||||
|     def _mk_sig( | ||||
|         self, | ||||
|         data: dict, | ||||
|  | @ -290,7 +229,6 @@ class Client: | |||
|                 'to define the creds for auth-ed endpoints!?' | ||||
|             ) | ||||
| 
 | ||||
| 
 | ||||
|         # XXX: Info on security and authentification | ||||
|         # https://binance-docs.github.io/apidocs/#endpoint-security-type | ||||
|         if not (api_secret := subconf.get('api_secret')): | ||||
|  | @ -319,7 +257,7 @@ class Client: | |||
|         params: dict, | ||||
| 
 | ||||
|         method: str = 'get', | ||||
|         venue: str | None = None,  # if None use `.mkt_mode` state | ||||
|         venue: str|None = None,  # if None use `.mkt_mode` state | ||||
|         signed: bool = False, | ||||
|         allow_testnet: bool = False, | ||||
| 
 | ||||
|  | @ -330,8 +268,9 @@ class Client: | |||
|         - /fapi/v3/ USD-M FUTURES, or | ||||
|         - /api/v3/ SPOT/MARGIN | ||||
| 
 | ||||
|         account/market endpoint request depending on either passed in `venue: str` | ||||
|         or the current setting `.mkt_mode: str` setting, default `'spot'`. | ||||
|         account/market endpoint request depending on either passed in | ||||
|         `venue: str` or the current setting `.mkt_mode: str` setting, | ||||
|         default `'spot'`. | ||||
| 
 | ||||
| 
 | ||||
|         Docs per venue API: | ||||
|  | @ -360,9 +299,6 @@ class Client: | |||
|                 venue=venue_key, | ||||
|             ) | ||||
| 
 | ||||
|         sesh: asks.Session | ||||
|         path: str | ||||
| 
 | ||||
|         # Check if we're configured to route order requests to the | ||||
|         # venue equivalent's testnet. | ||||
|         use_testnet: bool = False | ||||
|  | @ -387,11 +323,12 @@ class Client: | |||
|             # ctl machinery B) | ||||
|             venue_key += '_testnet' | ||||
| 
 | ||||
|         sesh, path = self.venue_sesh[venue_key] | ||||
| 
 | ||||
|         meth: Callable = getattr(sesh, method) | ||||
|         client: httpx.AsyncClient | ||||
|         path: str | ||||
|         client, path = self.venue_sesh[venue_key] | ||||
|         meth: Callable = getattr(client, method) | ||||
|         resp = await meth( | ||||
|             path=path + endpoint, | ||||
|             url=path + endpoint, | ||||
|             params=params, | ||||
|             timeout=float('inf'), | ||||
|         ) | ||||
|  | @ -433,7 +370,15 @@ class Client: | |||
|                 item['filters'] = filters | ||||
| 
 | ||||
|             pair_type: Type = PAIRTYPES[venue] | ||||
|             pair: Pair = pair_type(**item) | ||||
|             try: | ||||
|                 pair: Pair = pair_type(**item) | ||||
|             except Exception as e: | ||||
|                 e.add_note( | ||||
|                     "\nDon't panic, prolly stupid binance changed their symbology schema again..\n" | ||||
|                     'Check out their API docs here:\n\n' | ||||
|                     'https://binance-docs.github.io/apidocs/spot/en/#exchange-information' | ||||
|                 ) | ||||
|                 raise | ||||
|             pair_table[pair.symbol.upper()] = pair | ||||
| 
 | ||||
|             # update an additional top-level-cross-venue-table | ||||
|  | @ -528,7 +473,9 @@ class Client: | |||
| 
 | ||||
|         ''' | ||||
|         pair_table: dict[str, Pair] = self._venue2pairs[ | ||||
|             venue or self.mkt_mode | ||||
|             venue | ||||
|             or | ||||
|             self.mkt_mode | ||||
|         ] | ||||
|         if ( | ||||
|             expiry | ||||
|  | @ -547,9 +494,9 @@ class Client: | |||
|             venues: list[str] = [venue] | ||||
| 
 | ||||
|         # batch per-venue download of all exchange infos | ||||
|         async with trio.open_nursery() as rn: | ||||
|         async with trio.open_nursery() as tn: | ||||
|             for ven in venues: | ||||
|                 rn.start_soon( | ||||
|                 tn.start_soon( | ||||
|                     self._cache_pairs, | ||||
|                     ven, | ||||
|                 ) | ||||
|  | @ -602,11 +549,11 @@ class Client: | |||
| 
 | ||||
|     ) -> dict[str, Any]: | ||||
| 
 | ||||
|         fq_pairs: dict = await self.exch_info() | ||||
|         fq_pairs: dict[str, Pair] = await self.exch_info() | ||||
| 
 | ||||
|         # TODO: cache this list like we were in | ||||
|         # `open_symbol_search()`? | ||||
|         keys: list[str] = list(fq_pairs) | ||||
|         # keys: list[str] = list(fq_pairs) | ||||
| 
 | ||||
|         return match_from_pairs( | ||||
|             pairs=fq_pairs, | ||||
|  | @ -614,9 +561,20 @@ class Client: | |||
|             score_cutoff=50, | ||||
|         ) | ||||
| 
 | ||||
|     def pair2venuekey( | ||||
|         self, | ||||
|         pair: Pair, | ||||
|     ) -> str: | ||||
|         return { | ||||
|             'USDTM': 'usdtm_futes', | ||||
|             'SPOT': 'spot', | ||||
|             # 'COINM': 'coin_futes', | ||||
|             # ^-TODO-^ bc someone might want it..? | ||||
|         }[pair.venue] | ||||
| 
 | ||||
|     async def bars( | ||||
|         self, | ||||
|         symbol: str, | ||||
|         mkt: MktPair, | ||||
| 
 | ||||
|         start_dt: datetime | None = None, | ||||
|         end_dt: datetime | None = None, | ||||
|  | @ -646,16 +604,20 @@ class Client: | |||
|         start_time = binance_timestamp(start_dt) | ||||
|         end_time = binance_timestamp(end_dt) | ||||
| 
 | ||||
|         bs_pair: Pair = self._pairs[mkt.bs_fqme.upper()] | ||||
| 
 | ||||
|         # https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-data | ||||
|         bars = await self._api( | ||||
|             'klines', | ||||
|             params={ | ||||
|                 'symbol': symbol.upper(), | ||||
|                 # NOTE: always query using their native symbology! | ||||
|                 'symbol': mkt.bs_mktid.upper(), | ||||
|                 'interval': '1m', | ||||
|                 'startTime': start_time, | ||||
|                 'endTime': end_time, | ||||
|                 'limit': limit | ||||
|             }, | ||||
|             venue=self.pair2venuekey(bs_pair), | ||||
|             allow_testnet=False, | ||||
|         ) | ||||
|         new_bars: list[tuple] = [] | ||||
|  | @ -972,17 +934,148 @@ class Client: | |||
|         await self.close_listen_key(key) | ||||
| 
 | ||||
| 
 | ||||
| _venue_urls: dict[str, str] = { | ||||
|     'spot': ( | ||||
|         _spot_url, | ||||
|         '/api/v3/', | ||||
|     ), | ||||
|     'spot_testnet': ( | ||||
|         _testnet_spot_url, | ||||
|         '/fapi/v1/' | ||||
|     ), | ||||
|     # margin and extended spot endpoints session. | ||||
|     # TODO: did this ever get implemented fully? | ||||
|     # 'margin': ( | ||||
|     #     _spot_url, | ||||
|     #     '/sapi/v1/' | ||||
|     # ), | ||||
| 
 | ||||
|     'usdtm_futes': ( | ||||
|         _futes_url, | ||||
|         '/fapi/v1/', | ||||
|     ), | ||||
| 
 | ||||
|     'usdtm_futes_testnet': ( | ||||
|         _testnet_futes_url, | ||||
|         '/fapi/v1/', | ||||
|     ), | ||||
| 
 | ||||
|     # TODO: for anyone who actually needs it ;P | ||||
|     # 'coin_futes': () | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| def init_api_keys( | ||||
|     client: Client, | ||||
|     conf: dict[str, Any], | ||||
| ) -> None: | ||||
|     ''' | ||||
|     Set up per-venue API keys each http client according to the user's | ||||
|     `brokers.conf`. | ||||
| 
 | ||||
|     For ex, to use spot-testnet and live usdt futures APIs: | ||||
| 
 | ||||
|     ```toml | ||||
|         [binance] | ||||
|         # spot test net | ||||
|         spot.use_testnet = true | ||||
|         spot.api_key = '<spot_api_key_from_binance_account>' | ||||
|         spot.api_secret = '<spot_api_key_password>' | ||||
| 
 | ||||
|         # futes live | ||||
|         futes.use_testnet = false | ||||
|         accounts.usdtm = 'futes' | ||||
|         futes.api_key = '<futes_api_key_from_binance>' | ||||
|         futes.api_secret = '<futes_api_key_password>'' | ||||
| 
 | ||||
|         # if uncommented will use the built-in paper engine and not | ||||
|         # connect to `binance` API servers for order ctl. | ||||
|         # accounts.paper = 'paper' | ||||
|     ``` | ||||
| 
 | ||||
|     ''' | ||||
|     for key, subconf in conf.items(): | ||||
|         if api_key := subconf.get('api_key', ''): | ||||
|             venue_keys: list[str] = client.confkey2venuekeys[key] | ||||
| 
 | ||||
|             venue_key: str | ||||
|             client: httpx.AsyncClient | ||||
|             for venue_key in venue_keys: | ||||
|                 client, _ = client.venue_sesh[venue_key] | ||||
| 
 | ||||
|                 api_key_header: dict = { | ||||
|                     # taken from official: | ||||
|                     # https://github.com/binance/binance-futures-connector-python/blob/main/binance/api.py#L47 | ||||
|                     "Content-Type": "application/json;charset=utf-8", | ||||
| 
 | ||||
|                     # TODO: prolly should just always query and copy | ||||
|                     # in the real latest ver? | ||||
|                     "User-Agent": "binance-connector/6.1.6smbz6", | ||||
|                     "X-MBX-APIKEY": api_key, | ||||
|                 } | ||||
|                 client.headers.update(api_key_header) | ||||
| 
 | ||||
|                 # if `.use_tesnet = true` in the config then | ||||
|                 # also add headers for the testnet session which | ||||
|                 # will be used for all order control | ||||
|                 if subconf.get('use_testnet', False): | ||||
|                     testnet_sesh, _ = client.venue_sesh[ | ||||
|                         venue_key + '_testnet' | ||||
|                     ] | ||||
|                     testnet_sesh.headers.update(api_key_header) | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
| async def get_client() -> Client: | ||||
| async def get_client( | ||||
|     mkt_mode: MarketType = 'spot', | ||||
| ) -> Client: | ||||
|     ''' | ||||
|     Construct an single `piker` client which composes multiple underlying venue | ||||
|     specific API clients both for live and test networks. | ||||
| 
 | ||||
|     client = Client() | ||||
|     await client.exch_info() | ||||
|     log.info( | ||||
|         f'{client} in {client.mkt_mode} mode: caching exchange infos..\n' | ||||
|         'Cached multi-market pairs:\n' | ||||
|         f'spot: {len(client._spot_pairs)}\n' | ||||
|         f'usdtm_futes: {len(client._ufutes_pairs)}\n' | ||||
|         f'Total: {len(client._pairs)}\n' | ||||
|     ) | ||||
|     ''' | ||||
|     venue_sessions: dict[ | ||||
|         str,  # venue key | ||||
|         tuple[httpx.AsyncClient, str]  # session, eps path | ||||
|     ] = {} | ||||
|     async with AsyncExitStack() as client_stack: | ||||
|         for name, (base_url, path) in _venue_urls.items(): | ||||
|             api: httpx.AsyncClient = await client_stack.enter_async_context( | ||||
|                 httpx.AsyncClient( | ||||
|                     base_url=base_url, | ||||
|                     # headers={}, | ||||
| 
 | ||||
|     yield client | ||||
|                     # TODO: is there a way to numerate this? | ||||
|                     # https://www.python-httpx.org/advanced/clients/#why-use-a-client | ||||
|                     # connections=4 | ||||
|                 ) | ||||
|             ) | ||||
|             venue_sessions[name] = ( | ||||
|                 api, | ||||
|                 path, | ||||
|             ) | ||||
| 
 | ||||
|         conf: dict[str, Any] = get_config() | ||||
|         # for creating API keys see, | ||||
|         # https://www.binance.com/en/support/faq/how-to-create-api-keys-on-binance-360002502072 | ||||
|         client = Client( | ||||
|             venue_sessions=venue_sessions, | ||||
|             conf=conf, | ||||
|             mkt_mode=mkt_mode, | ||||
|         ) | ||||
|         init_api_keys( | ||||
|             client=client, | ||||
|             conf=conf, | ||||
|         ) | ||||
|         fq_pairs: dict[str, Pair] = await client.exch_info() | ||||
|         assert fq_pairs | ||||
|         log.info( | ||||
|             f'Loaded multi-venue `Client` in mkt_mode={client.mkt_mode!r}\n\n' | ||||
|             f'Symbology Summary:\n' | ||||
|             f'------ - ------\n' | ||||
|             f'spot: {len(client._spot_pairs)}\n' | ||||
|             f'usdtm_futes: {len(client._ufutes_pairs)}\n' | ||||
|             '------ - ------\n' | ||||
|             f'total: {len(client._pairs)}\n' | ||||
|         ) | ||||
|         yield client | ||||
|  |  | |||
|  | @ -264,15 +264,20 @@ async def open_trade_dialog( | |||
|     # do a open_symcache() call.. though maybe we can hide | ||||
|     # this in a new async version of open_account()? | ||||
|     async with open_cached_client('binance') as client: | ||||
|         subconf: dict = client.conf[venue_name] | ||||
|         use_testnet = subconf.get('use_testnet', False) | ||||
|         subconf: dict|None = client.conf.get(venue_name) | ||||
| 
 | ||||
|         # XXX: if no futes.api_key or spot.api_key has been set we | ||||
|         # always fall back to the paper engine! | ||||
|         if not subconf.get('api_key'): | ||||
|         if ( | ||||
|             not subconf | ||||
|             or | ||||
|             not subconf.get('api_key') | ||||
|         ): | ||||
|             await ctx.started('paper') | ||||
|             return | ||||
| 
 | ||||
|         use_testnet: bool = subconf.get('use_testnet', False) | ||||
| 
 | ||||
|     async with ( | ||||
|         open_cached_client('binance') as client, | ||||
|     ): | ||||
|  |  | |||
|  | @ -42,12 +42,12 @@ from trio_typing import TaskStatus | |||
| from pendulum import ( | ||||
|     from_timestamp, | ||||
| ) | ||||
| from rapidfuzz import process as fuzzy | ||||
| import numpy as np | ||||
| import tractor | ||||
| 
 | ||||
| from piker.brokers import ( | ||||
|     open_cached_client, | ||||
|     NoData, | ||||
| ) | ||||
| from piker._cacheables import ( | ||||
|     async_lifo_cache, | ||||
|  | @ -252,24 +252,30 @@ async def open_history_client( | |||
|             else: | ||||
|                 client.mkt_mode = 'spot' | ||||
| 
 | ||||
|             # NOTE: always query using their native symbology! | ||||
|             mktid: str = mkt.bs_mktid | ||||
|             array = await client.bars( | ||||
|                 mktid, | ||||
|             array: np.ndarray = await client.bars( | ||||
|                 mkt=mkt, | ||||
|                 start_dt=start_dt, | ||||
|                 end_dt=end_dt, | ||||
|             ) | ||||
|             if array.size == 0: | ||||
|                 raise NoData( | ||||
|                     f'No frame for {start_dt} -> {end_dt}\n' | ||||
|                 ) | ||||
| 
 | ||||
|             times = array['time'] | ||||
|             if ( | ||||
|                 end_dt is None | ||||
|             ): | ||||
|                 inow = round(time.time()) | ||||
|             if not times.any(): | ||||
|                 raise ValueError( | ||||
|                     'Bad frame with null-times?\n\n' | ||||
|                     f'{times}' | ||||
|                 ) | ||||
| 
 | ||||
|             if end_dt is None: | ||||
|                 inow: int = round(time.time()) | ||||
|                 if (inow - times[-1]) > 60: | ||||
|                     await tractor.pause() | ||||
| 
 | ||||
|             start_dt = from_timestamp(times[0]) | ||||
|             end_dt = from_timestamp(times[-1]) | ||||
| 
 | ||||
|             return array, start_dt, end_dt | ||||
| 
 | ||||
|         yield get_ohlc, {'erlangs': 3, 'rate': 3} | ||||
|  | @ -540,7 +546,7 @@ async def open_symbol_search( | |||
|                 ) | ||||
| 
 | ||||
|                 # repack in fqme-keyed table | ||||
|                 byfqme: dict[start, Pair] = {} | ||||
|                 byfqme: dict[str, Pair] = {} | ||||
|                 for pair in pairs.values(): | ||||
|                     byfqme[pair.bs_fqme] = pair | ||||
| 
 | ||||
|  |  | |||
|  | @ -137,10 +137,12 @@ class SpotPair(Pair, frozen=True): | |||
|     quoteOrderQtyMarketAllowed: bool | ||||
|     isSpotTradingAllowed: bool | ||||
|     isMarginTradingAllowed: bool | ||||
|     otoAllowed: bool | ||||
| 
 | ||||
|     defaultSelfTradePreventionMode: str | ||||
|     allowedSelfTradePreventionModes: list[str] | ||||
|     permissions: list[str] | ||||
|     permissionSets: list[list[str]] | ||||
| 
 | ||||
|     # NOTE: see `.data._symcache.SymbologyCache.load()` for why | ||||
|     ns_path: str = 'piker.brokers.binance:SpotPair' | ||||
|  | @ -179,7 +181,6 @@ class FutesPair(Pair): | |||
|     quoteAsset: str  # 'USDT', | ||||
|     quotePrecision: int  # 8, | ||||
|     requiredMarginPercent: float  # '5.0000', | ||||
|     settlePlan: int  # 0, | ||||
|     timeInForce: list[str]  # ['GTC', 'IOC', 'FOK', 'GTX'], | ||||
|     triggerProtect: float  # '0.0500', | ||||
|     underlyingSubType: list[str]  # ['PoW'], | ||||
|  |  | |||
|  | @ -25,6 +25,7 @@ from .api import ( | |||
|     get_client, | ||||
| ) | ||||
| from .feed import ( | ||||
|     get_mkt_info, | ||||
|     open_history_client, | ||||
|     open_symbol_search, | ||||
|     stream_quotes, | ||||
|  | @ -34,15 +35,20 @@ from .feed import ( | |||
|     # open_trade_dialog, | ||||
|     # norm_trade_records, | ||||
| # ) | ||||
| from .venues import ( | ||||
|     OptionPair, | ||||
| ) | ||||
| 
 | ||||
| log = get_logger(__name__) | ||||
| 
 | ||||
| __all__ = [ | ||||
|     'get_client', | ||||
| #    'trades_dialogue', | ||||
|     'get_mkt_info', | ||||
|     'open_history_client', | ||||
|     'open_symbol_search', | ||||
|     'stream_quotes', | ||||
|     'OptionPair', | ||||
| #    'norm_trade_records', | ||||
| ] | ||||
| 
 | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -18,38 +18,59 @@ | |||
| Deribit backend. | ||||
| 
 | ||||
| ''' | ||||
| from __future__ import annotations | ||||
| from contextlib import asynccontextmanager as acm | ||||
| from datetime import datetime | ||||
| from typing import Any, Optional, Callable | ||||
| from typing import ( | ||||
|     # Any, | ||||
|     # Optional, | ||||
|     Callable, | ||||
| ) | ||||
| # from pprint import pformat | ||||
| import time | ||||
| 
 | ||||
| import cryptofeed | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| import pendulum | ||||
| from rapidfuzz import process as fuzzy | ||||
| from pendulum import ( | ||||
|     from_timestamp, | ||||
| ) | ||||
| import numpy as np | ||||
| import tractor | ||||
| 
 | ||||
| from piker.brokers import open_cached_client | ||||
| from piker.log import get_logger, get_console_log | ||||
| from piker.data import ShmArray | ||||
| from piker.brokers._util import ( | ||||
|     BrokerError, | ||||
| from piker.accounting import ( | ||||
|     Asset, | ||||
|     MktPair, | ||||
|     unpack_fqme, | ||||
| ) | ||||
| from piker.brokers import ( | ||||
|     open_cached_client, | ||||
|     NoData, | ||||
|     DataUnavailable, | ||||
| ) | ||||
| 
 | ||||
| from cryptofeed import FeedHandler | ||||
| from cryptofeed.defines import ( | ||||
|     DERIBIT, L1_BOOK, TRADES, OPTION, CALL, PUT | ||||
| from piker._cacheables import ( | ||||
|     async_lifo_cache, | ||||
| ) | ||||
| from cryptofeed.symbols import Symbol | ||||
| from piker.log import ( | ||||
|     get_logger, | ||||
|     mk_repr, | ||||
| ) | ||||
| from piker.data.validate import FeedInit | ||||
| 
 | ||||
| 
 | ||||
| from .api import ( | ||||
|     Client, Trade, | ||||
|     get_config, | ||||
|     str_to_cb_sym, piker_sym_to_cb_sym, cb_sym_to_deribit_inst, | ||||
|     Client, | ||||
|     # get_config, | ||||
|     piker_sym_to_cb_sym, | ||||
|     cb_sym_to_deribit_inst, | ||||
|     str_to_cb_sym, | ||||
|     maybe_open_price_feed | ||||
| ) | ||||
| from .venues import ( | ||||
|     Pair, | ||||
|     OptionPair, | ||||
|     Trade, | ||||
| ) | ||||
| 
 | ||||
| _spawn_kwargs = { | ||||
|     'infect_asyncio': True, | ||||
|  | @ -64,90 +85,215 @@ async def open_history_client( | |||
|     mkt: MktPair, | ||||
| ) -> tuple[Callable, int]: | ||||
| 
 | ||||
|     fnstrument: str = mkt.bs_fqme | ||||
|     # TODO implement history getter for the new storage layer. | ||||
|     async with open_cached_client('deribit') as client: | ||||
| 
 | ||||
|         pair: OptionPair = client._pairs[mkt.dst.name] | ||||
|         # XXX NOTE, the cuckers use ms !!! | ||||
|         creation_time_s: int = pair.creation_timestamp/1000 | ||||
| 
 | ||||
|         async def get_ohlc( | ||||
|             end_dt: Optional[datetime] = None, | ||||
|             start_dt: Optional[datetime] = None, | ||||
|             timeframe: float, | ||||
|             end_dt: datetime | None = None, | ||||
|             start_dt: datetime | None = None, | ||||
| 
 | ||||
|         ) -> tuple[ | ||||
|             np.ndarray, | ||||
|             datetime,  # start | ||||
|             datetime,  # end | ||||
|         ]: | ||||
|             if timeframe != 60: | ||||
|                 raise DataUnavailable('Only 1m bars are supported') | ||||
| 
 | ||||
|             array = await client.bars( | ||||
|                 instrument, | ||||
|             array: np.ndarray = await client.bars( | ||||
|                 mkt, | ||||
|                 start_dt=start_dt, | ||||
|                 end_dt=end_dt, | ||||
|             ) | ||||
|             if len(array) == 0: | ||||
|                 raise DataUnavailable | ||||
|                 if ( | ||||
|                     end_dt is None | ||||
|                 ): | ||||
|                     raise DataUnavailable( | ||||
|                         'No history seems to exist yet?\n\n' | ||||
|                         f'{mkt}' | ||||
|                     ) | ||||
|                 elif ( | ||||
|                     end_dt | ||||
|                     and | ||||
|                     end_dt.timestamp() < creation_time_s | ||||
|                 ): | ||||
|                     # the contract can't have history | ||||
|                     # before it was created. | ||||
|                     pair_type_str: str = type(pair).__name__ | ||||
|                     create_dt: datetime = from_timestamp(creation_time_s) | ||||
|                     raise DataUnavailable( | ||||
|                         f'No history prior to\n' | ||||
|                         f'`{pair_type_str}.creation_timestamp: int = ' | ||||
|                         f'{pair.creation_timestamp}\n\n' | ||||
|                         f'------ deribit sux ------\n' | ||||
|                         f'WHICH IN "NORMAL PEOPLE WHO USE EPOCH TIME" form is,\n' | ||||
|                         f'creation_time_s: {creation_time_s}\n' | ||||
|                         f'create_dt: {create_dt}\n' | ||||
|                     ) | ||||
|                 raise NoData( | ||||
|                     f'No frame for {start_dt} -> {end_dt}\n' | ||||
|                 ) | ||||
| 
 | ||||
|             start_dt = pendulum.from_timestamp(array[0]['time']) | ||||
|             end_dt = pendulum.from_timestamp(array[-1]['time']) | ||||
|             start_dt = from_timestamp(array[0]['time']) | ||||
|             end_dt = from_timestamp(array[-1]['time']) | ||||
| 
 | ||||
|             times = array['time'] | ||||
|             if not times.any(): | ||||
|                 raise ValueError( | ||||
|                     'Bad frame with null-times?\n\n' | ||||
|                     f'{times}' | ||||
|                 ) | ||||
| 
 | ||||
|             if end_dt is None: | ||||
|                 inow: int = round(time.time()) | ||||
|                 if (inow - times[-1]) > 60: | ||||
|                     await tractor.pause() | ||||
| 
 | ||||
|             return array, start_dt, end_dt | ||||
| 
 | ||||
|         yield get_ohlc, {'erlangs': 3, 'rate': 3} | ||||
|         yield ( | ||||
|             get_ohlc, | ||||
|             {  # backfill config | ||||
|                 'erlangs': 3, | ||||
|                 'rate': 3, | ||||
|             } | ||||
|         ) | ||||
| 
 | ||||
| 
 | ||||
| @async_lifo_cache() | ||||
| async def get_mkt_info( | ||||
|     fqme: str, | ||||
| 
 | ||||
| ) -> tuple[MktPair, Pair|OptionPair] | None: | ||||
| 
 | ||||
|     # uppercase since kraken bs_mktid is always upper | ||||
|     if 'deribit' not in fqme.lower(): | ||||
|         fqme += '.deribit' | ||||
| 
 | ||||
|     mkt_mode: str = '' | ||||
|     broker, mkt_ep, venue, expiry = unpack_fqme(fqme) | ||||
| 
 | ||||
|     # NOTE: we always upper case all tokens to be consistent with | ||||
|     # binance's symbology style for pairs, like `BTCUSDT`, but in | ||||
|     # theory we could also just keep things lower case; as long as | ||||
|     # we're consistent and the symcache matches whatever this func | ||||
|     # returns, always! | ||||
|     expiry: str = expiry.upper() | ||||
|     venue: str = venue.upper() | ||||
|     # venue_lower: str = venue.lower() | ||||
| 
 | ||||
|     mkt_mode: str = 'option' | ||||
| 
 | ||||
|     async with open_cached_client( | ||||
|         'deribit', | ||||
|     ) as client: | ||||
| 
 | ||||
|         assets: dict[str, Asset] = await client.get_assets() | ||||
|         pair_str: str = mkt_ep.lower() | ||||
| 
 | ||||
|         pair: Pair = await client.exch_info( | ||||
|             sym=pair_str, | ||||
|         ) | ||||
|         mkt_mode = pair.venue | ||||
|         client.mkt_mode = mkt_mode | ||||
| 
 | ||||
|         dst: Asset | None = assets.get(pair.bs_dst_asset) | ||||
|         src: Asset | None = assets.get(pair.bs_src_asset) | ||||
| 
 | ||||
|         mkt = MktPair( | ||||
|             dst=dst, | ||||
|             src=src, | ||||
|             price_tick=pair.price_tick, | ||||
|             size_tick=pair.size_tick, | ||||
|             bs_mktid=pair.symbol, | ||||
|             venue=mkt_mode, | ||||
|             broker='deribit', | ||||
|             _atype=mkt_mode, | ||||
|             _fqme_without_src=True, | ||||
| 
 | ||||
|             # expiry=pair.expiry, | ||||
|             # XXX TODO, currently we don't use it since it's | ||||
|             # already "described" in the `OptionPair.symbol: str` | ||||
|             # and if we slap in the ISO repr it's kinda hideous.. | ||||
|             # -[ ] figure out the best either std | ||||
|         ) | ||||
|         return mkt, pair | ||||
| 
 | ||||
| 
 | ||||
| async def stream_quotes( | ||||
| 
 | ||||
|     send_chan: trio.abc.SendChannel, | ||||
|     symbols: list[str], | ||||
|     feed_is_live: trio.Event, | ||||
|     loglevel: str = None, | ||||
| 
 | ||||
|     # startup sync | ||||
|     task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED, | ||||
| 
 | ||||
| ) -> None: | ||||
|     # XXX: required to propagate ``tractor`` loglevel to piker logging | ||||
|     get_console_log(loglevel or tractor.current_actor().loglevel) | ||||
|     ''' | ||||
|     Open a live quote stream for the market set defined by `symbols`. | ||||
| 
 | ||||
|     sym = symbols[0] | ||||
|     Internally this starts a `cryptofeed.FeedHandler` inside an `asyncio`-side | ||||
|     task and relays through L1 and `Trade` msgs here to our `trio.Task`. | ||||
| 
 | ||||
|     ''' | ||||
|     sym = symbols[0].split('.')[0] | ||||
|     init_msgs: list[FeedInit] = [] | ||||
| 
 | ||||
|     # multiline nested `dict` formatter (since rn quote-msgs are | ||||
|     # just that). | ||||
|     pfmt: Callable[[str], str] = mk_repr( | ||||
|         # so we can see `deribit`'s delightfully mega-long bs fields.. | ||||
|         maxstring=100, | ||||
|     ) | ||||
| 
 | ||||
|     async with ( | ||||
|         open_cached_client('deribit') as client, | ||||
|         send_chan as send_chan | ||||
|     ): | ||||
|         mkt: MktPair | ||||
|         pair: Pair | ||||
|         mkt, pair = await get_mkt_info(sym) | ||||
| 
 | ||||
|         init_msgs = { | ||||
|             # pass back token, and bool, signalling if we're the writer | ||||
|             # and that history has been written | ||||
|             sym: { | ||||
|                 'symbol_info': { | ||||
|                     'asset_type': 'option', | ||||
|                     'price_tick_size': 0.0005 | ||||
|                 }, | ||||
|                 'shm_write_opts': {'sum_tick_vml': False}, | ||||
|                 'fqsn': sym, | ||||
|             }, | ||||
|         } | ||||
|         # build out init msgs according to latest spec | ||||
|         init_msgs.append( | ||||
|             FeedInit( | ||||
|                 mkt_info=mkt, | ||||
|             ) | ||||
|         ) | ||||
|         # build `cryptofeed` feed-handle | ||||
|         cf_sym: cryptofeed.Symbol = piker_sym_to_cb_sym(sym) | ||||
| 
 | ||||
|         nsym = piker_sym_to_cb_sym(sym) | ||||
|         from_cf: tractor.to_asyncio.LinkedTaskChannel | ||||
|         async with maybe_open_price_feed(sym) as from_cf: | ||||
| 
 | ||||
|         async with maybe_open_price_feed(sym) as stream: | ||||
|             # load the "last trades" summary | ||||
|             last_trades_res: cryptofeed.LastTradesResult = await client.last_trades( | ||||
|                 cb_sym_to_deribit_inst(cf_sym), | ||||
|                 count=1, | ||||
|             ) | ||||
|             last_trades: list[Trade] = last_trades_res.trades | ||||
| 
 | ||||
|             cache = await client.cache_symbols() | ||||
|             # TODO, do we even need this or will the above always | ||||
|             # work? | ||||
|             # if not last_trades: | ||||
|             #     await tractor.pause() | ||||
|             #     async for typ, quote in from_cf: | ||||
|             #         if typ == 'trade': | ||||
|             #             last_trade = Trade(**(quote['data'])) | ||||
|             #             break | ||||
| 
 | ||||
|             last_trades = (await client.last_trades( | ||||
|                 cb_sym_to_deribit_inst(nsym), count=1)).trades | ||||
|             # else: | ||||
|             last_trade = Trade( | ||||
|                 **(last_trades[0]) | ||||
|             ) | ||||
| 
 | ||||
|             if len(last_trades) == 0: | ||||
|                 last_trade = None | ||||
|                 async for typ, quote in stream: | ||||
|                     if typ == 'trade': | ||||
|                         last_trade = Trade(**(quote['data'])) | ||||
|                         break | ||||
| 
 | ||||
|             else: | ||||
|                 last_trade = Trade(**(last_trades[0])) | ||||
| 
 | ||||
|             first_quote = { | ||||
|             first_quote: dict = { | ||||
|                 'symbol': sym, | ||||
|                 'last': last_trade.price, | ||||
|                 'brokerd_ts': last_trade.timestamp, | ||||
|  | @ -158,13 +304,84 @@ async def stream_quotes( | |||
|                     'broker_ts': last_trade.timestamp | ||||
|                 }] | ||||
|             } | ||||
|             task_status.started((init_msgs,  first_quote)) | ||||
|             task_status.started(( | ||||
|                 init_msgs, | ||||
|                 first_quote, | ||||
|             )) | ||||
| 
 | ||||
|             feed_is_live.set() | ||||
| 
 | ||||
|             async for typ, quote in stream: | ||||
|                 topic = quote['symbol'] | ||||
|                 await send_chan.send({topic: quote}) | ||||
|             # NOTE XXX, static for now! | ||||
|             # => since this only handles ONE mkt feed at a time we | ||||
|             # don't need a lookup table to map interleaved quotes | ||||
|             # from multiple possible mkt-pairs | ||||
|             topic: str = mkt.bs_fqme | ||||
| 
 | ||||
|             # deliver until cancelled | ||||
|             async for typ, ref in from_cf: | ||||
|                 match typ: | ||||
|                     case 'trade': | ||||
|                         trade: cryptofeed.types.Trade = ref | ||||
| 
 | ||||
|                         # TODO, re-impl this according to teh ideal | ||||
|                         # fqme for opts that we choose!! | ||||
|                         bs_fqme: str = cb_sym_to_deribit_inst( | ||||
|                             str_to_cb_sym(trade.symbol) | ||||
|                         ).lower() | ||||
| 
 | ||||
|                         piker_quote: dict = { | ||||
|                             'symbol': bs_fqme, | ||||
|                             'last': trade.price, | ||||
|                             'broker_ts': time.time(), | ||||
|                             # ^TODO, name this `brokerd/datad_ts` and | ||||
|                             # use `time.time_ns()` ?? | ||||
|                             'ticks': [{ | ||||
|                                 'type': 'trade', | ||||
|                                 'price': float(trade.price), | ||||
|                                 'size': float(trade.amount), | ||||
|                                 'broker_ts': trade.timestamp, | ||||
|                             }], | ||||
|                         } | ||||
|                         log.info( | ||||
|                             f'deribit {typ!r} quote for {sym!r}\n\n' | ||||
|                             f'{trade}\n\n' | ||||
|                             f'{pfmt(piker_quote)}\n' | ||||
|                         ) | ||||
| 
 | ||||
|                     case 'l1': | ||||
|                         book: cryptofeed.types.L1Book = ref | ||||
| 
 | ||||
|                         # TODO, so this is where we can possibly change things | ||||
|                         # and instead lever the `MktPair.bs_fqme: str` output? | ||||
|                         bs_fqme: str = cb_sym_to_deribit_inst( | ||||
|                             str_to_cb_sym(book.symbol) | ||||
|                         ).lower() | ||||
| 
 | ||||
|                         piker_quote: dict = { | ||||
|                             'symbol': bs_fqme, | ||||
|                             'ticks': [ | ||||
| 
 | ||||
|                                 {'type': 'bid', | ||||
|                                  'price': float(book.bid_price), | ||||
|                                  'size': float(book.bid_size)}, | ||||
| 
 | ||||
|                                 {'type': 'bsize', | ||||
|                                  'price': float(book.bid_price), | ||||
|                                  'size': float(book.bid_size),}, | ||||
| 
 | ||||
|                                 {'type': 'ask', | ||||
|                                  'price': float(book.ask_price), | ||||
|                                  'size': float(book.ask_size),}, | ||||
| 
 | ||||
|                                 {'type': 'asize', | ||||
|                                  'price': float(book.ask_price), | ||||
|                                  'size': float(book.ask_size),} | ||||
|                             ] | ||||
|                         } | ||||
| 
 | ||||
|                 await send_chan.send({ | ||||
|                     topic: piker_quote, | ||||
|                 }) | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
|  | @ -174,12 +391,21 @@ async def open_symbol_search( | |||
|     async with open_cached_client('deribit') as client: | ||||
| 
 | ||||
|         # load all symbols locally for fast search | ||||
|         cache = await client.cache_symbols() | ||||
|         # cache = client._pairs | ||||
|         await ctx.started() | ||||
| 
 | ||||
|         async with ctx.open_stream() as stream: | ||||
| 
 | ||||
|             pattern: str | ||||
|             async for pattern in stream: | ||||
|                 # repack in dict form | ||||
|                 await stream.send( | ||||
|                     await client.search_symbols(pattern)) | ||||
| 
 | ||||
|                 # NOTE: pattern fuzzy-matching is done within | ||||
|                 # the methd impl. | ||||
|                 pairs: dict[str, Pair] = await client.search_symbols( | ||||
|                     pattern, | ||||
|                 ) | ||||
|                 # repack in fqme-keyed table | ||||
|                 byfqme: dict[str, Pair] = {} | ||||
|                 for pair in pairs.values(): | ||||
|                     byfqme[pair.bs_fqme] = pair | ||||
| 
 | ||||
|                 await stream.send(byfqme) | ||||
|  |  | |||
|  | @ -0,0 +1,196 @@ | |||
| # piker: trading gear for hackers | ||||
| # Copyright (C) Tyler Goodlet (in stewardship for pikers) | ||||
| 
 | ||||
| # This program is free software: you can redistribute it and/or modify | ||||
| # it under the terms of the GNU Affero General Public License as published by | ||||
| # the Free Software Foundation, either version 3 of the License, or | ||||
| # (at your option) any later version. | ||||
| 
 | ||||
| # This program is distributed in the hope that it will be useful, | ||||
| # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||||
| # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | ||||
| # GNU Affero General Public License for more details. | ||||
| 
 | ||||
| # You should have received a copy of the GNU Affero General Public License | ||||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| """ | ||||
| Per market data-type definitions and schemas types. | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| import pendulum | ||||
| from typing import ( | ||||
|     Literal, | ||||
|     Optional, | ||||
| ) | ||||
| from decimal import Decimal | ||||
| 
 | ||||
| from piker.types import Struct | ||||
| 
 | ||||
| 
 | ||||
| # API endpoint paths by venue / sub-API | ||||
| _domain: str = 'deribit.com' | ||||
| _url = f'https://www.{_domain}' | ||||
| 
 | ||||
| # WEBsocketz | ||||
| _ws_url: str = f'wss://www.{_domain}/ws/api/v2' | ||||
| 
 | ||||
| # test nets | ||||
| _testnet_ws_url: str = f'wss://test.{_domain}/ws/api/v2' | ||||
| 
 | ||||
| MarketType = Literal[ | ||||
|     'option' | ||||
| ] | ||||
| 
 | ||||
| 
 | ||||
| def get_api_eps(venue: MarketType) -> tuple[str, str]: | ||||
|     ''' | ||||
|     Return API ep root paths per venue. | ||||
| 
 | ||||
|     ''' | ||||
|     return { | ||||
|         'option': ( | ||||
|             _ws_url, | ||||
|         ), | ||||
|     }[venue] | ||||
| 
 | ||||
| 
 | ||||
| class Pair(Struct, frozen=True, kw_only=True): | ||||
| 
 | ||||
|     symbol: str | ||||
| 
 | ||||
|     # src | ||||
|     quote_currency: str # 'BTC' | ||||
| 
 | ||||
|     # dst | ||||
|     base_currency: str # "BTC", | ||||
| 
 | ||||
|     tick_size: float # 0.0001 # [{'above_price': 0.005, 'tick_size': 0.0005}] | ||||
|     tick_size_steps: list[dict[str, float]]  | ||||
| 
 | ||||
|     @property | ||||
|     def price_tick(self) -> Decimal: | ||||
|         return Decimal(str(self.tick_size_steps[0]['above_price'])) | ||||
| 
 | ||||
|     @property | ||||
|     def size_tick(self) -> Decimal: | ||||
|         return Decimal(str(self.tick_size)) | ||||
| 
 | ||||
|     @property | ||||
|     def bs_fqme(self) -> str: | ||||
|         return f'{self.symbol}' | ||||
| 
 | ||||
|     @property | ||||
|     def bs_mktid(self) -> str: | ||||
|         return f'{self.symbol}.{self.venue}' | ||||
| 
 | ||||
| 
 | ||||
| class OptionPair(Pair, frozen=True): | ||||
| 
 | ||||
|     taker_commission: float # 0.0003 | ||||
|     strike: float # 5000.0 | ||||
|     settlement_period: str # 'day' | ||||
|     settlement_currency: str # "BTC", | ||||
|     rfq: bool # false | ||||
|     price_index: str # 'btc_usd' | ||||
|     option_type: str # 'call' | ||||
|     min_trade_amount: float # 0.1 | ||||
|     maker_commission: float # 0.0003 | ||||
|     kind: str # 'option' | ||||
|     is_active: bool # true | ||||
|     instrument_type: str # 'reversed' | ||||
|     instrument_name: str # 'BTC-1SEP24-55000-C' | ||||
|     instrument_id: int # 364671 | ||||
|     expiration_timestamp: int # 1725177600000 | ||||
|     creation_timestamp: int # 1724918461000 | ||||
|     counter_currency: str # 'USD'  | ||||
|     contract_size: float # '1.0' | ||||
|     block_trade_tick_size: float # '0.0001' | ||||
|     block_trade_min_trade_amount: int # '25' | ||||
|     block_trade_commission: float # '0.003' | ||||
| 
 | ||||
|     # NOTE: see `.data._symcache.SymbologyCache.load()` for why | ||||
|     ns_path: str = 'piker.brokers.deribit:OptionPair' | ||||
| 
 | ||||
|     # TODO, impl this without the MM:SS part of | ||||
|     # the `'THH:MM:SS..'` etc.. | ||||
|     @property | ||||
|     def expiry(self) -> str: | ||||
|         iso_date = pendulum.from_timestamp( | ||||
|             self.expiration_timestamp / 1000 | ||||
|         ).isoformat() | ||||
|         return iso_date  | ||||
| 
 | ||||
|     @property | ||||
|     def venue(self) -> str: | ||||
|         return f'{self.instrument_type}_option' | ||||
| 
 | ||||
|     @property | ||||
|     def bs_fqme(self) -> str: | ||||
|         return f'{self.symbol}' | ||||
| 
 | ||||
|     @property | ||||
|     def bs_src_asset(self) -> str: | ||||
|         return f'{self.quote_currency}' | ||||
| 
 | ||||
|     @property | ||||
|     def bs_dst_asset(self) -> str: | ||||
|         return f'{self.symbol}' | ||||
| 
 | ||||
| 
 | ||||
| PAIRTYPES: dict[MarketType, Pair] = { | ||||
|     'option': OptionPair, | ||||
| } | ||||
| 
 | ||||
| 
 | ||||
| class JSONRPCResult(Struct): | ||||
|     id: int | ||||
|     usIn: int | ||||
|     usOut: int | ||||
|     usDiff: int | ||||
|     testnet: bool | ||||
|     jsonrpc: str = '2.0' | ||||
|     error: Optional[dict] = None | ||||
|     result: Optional[list[dict]] = None | ||||
| 
 | ||||
| 
 | ||||
| class JSONRPCChannel(Struct): | ||||
|     method: str | ||||
|     params: dict | ||||
|     jsonrpc: str = '2.0' | ||||
| 
 | ||||
| 
 | ||||
| class KLinesResult(Struct): | ||||
|     low: list[float] | ||||
|     cost: list[float] | ||||
|     high: list[float] | ||||
|     open: list[float] | ||||
|     close: list[float] | ||||
|     ticks: list[int] | ||||
|     status: str | ||||
|     volume: list[float] | ||||
| 
 | ||||
| 
 | ||||
| class Trade(Struct): | ||||
|     iv: float | ||||
|     price: float | ||||
|     amount: float | ||||
|     trade_id: str | ||||
|     contracts: float | ||||
|     direction: str | ||||
|     trade_seq: int | ||||
|     timestamp: int | ||||
|     mark_price: float | ||||
|     index_price: float | ||||
|     tick_direction: int | ||||
|     instrument_name: str | ||||
|     combo_id: Optional[str] = '', | ||||
|     combo_trade_id: Optional[int] = 0, | ||||
|     block_trade_id: Optional[str] = '', | ||||
|     block_trade_leg_count: Optional[int] = 0, | ||||
| 
 | ||||
| 
 | ||||
| class LastTradesResult(Struct): | ||||
|     trades: list[Trade] | ||||
|     has_more: bool | ||||
|  | @ -100,7 +100,7 @@ async def data_reset_hack( | |||
|         log.warning( | ||||
|             no_setup_msg | ||||
|             + | ||||
|             f'REQUIRES A `vnc_addrs: array` ENTRY' | ||||
|             'REQUIRES A `vnc_addrs: array` ENTRY' | ||||
|         ) | ||||
| 
 | ||||
|     vnc_host, vnc_port = vnc_sockaddr.get( | ||||
|  | @ -259,7 +259,7 @@ def i3ipc_xdotool_manual_click_hack() -> None: | |||
|                         timeout=timeout, | ||||
|                     ) | ||||
| 
 | ||||
|     # re-activate and focus original window | ||||
|         # re-activate and focus original window | ||||
|         subprocess.call([ | ||||
|             'xdotool', | ||||
|             'windowactivate', '--sync', str(orig_win_id), | ||||
|  |  | |||
|  | @ -287,9 +287,31 @@ class Client: | |||
|         self.conf = config | ||||
| 
 | ||||
|         # NOTE: the ib.client here is "throttled" to 45 rps by default | ||||
|         self.ib = ib | ||||
|         self.ib: IB = ib | ||||
|         self.ib.RaiseRequestErrors: bool = True | ||||
| 
 | ||||
|         # self._acnt_names: set[str] = {} | ||||
|         self._acnt_names: list[str] = [] | ||||
| 
 | ||||
|     @property | ||||
|     def acnts(self) -> list[str]: | ||||
|         # return list(self._acnt_names) | ||||
|         return self._acnt_names | ||||
| 
 | ||||
|     def __repr__(self) -> str: | ||||
|         return ( | ||||
|             f'<{type(self).__name__}(' | ||||
|             f'ib={self.ib} ' | ||||
|             f'acnts={self.acnts}' | ||||
| 
 | ||||
|             # TODO: we need to mask out acnt-#s and other private | ||||
|             # infos if we're going to console this! | ||||
|             # f' |_.conf:\n' | ||||
|             # f'    {pformat(self.conf)}\n' | ||||
| 
 | ||||
|             ')>' | ||||
|         ) | ||||
| 
 | ||||
|     async def get_fills(self) -> list[Fill]: | ||||
|         ''' | ||||
|         Return list of rents `Fills` from trading session. | ||||
|  | @ -376,55 +398,63 @@ class Client: | |||
|             # whatToShow='MIDPOINT', | ||||
|             # whatToShow='TRADES', | ||||
|         ) | ||||
|         log.info( | ||||
|             f'REQUESTING {ib_duration_str} worth {bar_size} BARS\n' | ||||
|             f'fqme: {fqme}\n' | ||||
|             f'global _enters: {_enters}\n' | ||||
|             f'kwargs: {pformat(kwargs)}\n' | ||||
|         ) | ||||
| 
 | ||||
|         bars = await self.ib.reqHistoricalDataAsync( | ||||
|             **kwargs, | ||||
|         ) | ||||
| 
 | ||||
|         query_info: str = ( | ||||
|             f'REQUESTING IB history BARS\n' | ||||
|             f'    ------ - ------\n' | ||||
|             f'dt_duration: {dt_duration}\n' | ||||
|             f'ib_duration_str: {ib_duration_str}\n' | ||||
|             f'bar_size: {bar_size}\n' | ||||
|             f'fqme: {fqme}\n' | ||||
|             f'actor-global _enters: {_enters}\n' | ||||
|             f'kwargs: {pformat(kwargs)}\n' | ||||
|         ) | ||||
|         # tail case if no history for range or none prior. | ||||
|         # NOTE: there's actually 3 cases here to handle (and | ||||
|         # this should be read alongside the implementation of | ||||
|         # `.reqHistoricalDataAsync()`): | ||||
|         # - a timeout occurred in which case insync internals return | ||||
|         #   an empty list thing with bars.clear()... | ||||
|         # - no data exists for the period likely due to | ||||
|         #   a weekend, holiday or other non-trading period prior to | ||||
|         #   ``end_dt`` which exceeds the ``duration``, | ||||
|         # - LITERALLY this is the start of the mkt's history! | ||||
|         if not bars: | ||||
|             # NOTE: there's actually 3 cases here to handle (and | ||||
|             # this should be read alongside the implementation of | ||||
|             # `.reqHistoricalDataAsync()`): | ||||
|             # - a timeout occurred in which case insync internals return | ||||
|             #   an empty list thing with bars.clear()... | ||||
|             # - no data exists for the period likely due to | ||||
|             #   a weekend, holiday or other non-trading period prior to | ||||
|             #   ``end_dt`` which exceeds the ``duration``, | ||||
|             # - LITERALLY this is the start of the mkt's history! | ||||
|             # TODO: figure out wut's going on here. | ||||
| 
 | ||||
|             # TODO: is this handy, a sync requester for tinkering | ||||
|             # with empty frame cases? | ||||
|             # def get_hist(): | ||||
|             #     return self.ib.reqHistoricalData(**kwargs) | ||||
|             # import pdbp | ||||
|             # pdbp.set_trace() | ||||
| 
 | ||||
|             # sync requester for debugging empty frame cases | ||||
|             def get_hist(): | ||||
|                 return self.ib.reqHistoricalData(**kwargs) | ||||
|             log.critical( | ||||
|                 'STUPID IB SAYS NO HISTORY\n\n' | ||||
|                 + query_info | ||||
|             ) | ||||
| 
 | ||||
|             assert get_hist | ||||
|             import pdbp | ||||
|             pdbp.set_trace() | ||||
| 
 | ||||
|             return [], np.empty(0), dt_duration | ||||
|             # TODO: we could maybe raise ``NoData`` instead if we | ||||
|             # rewrite the method in the first case? right now there's no | ||||
|             # way to detect a timeout. | ||||
|             # rewrite the method in the first case? | ||||
|             # right now there's no way to detect a timeout.. | ||||
|             return [], np.empty(0), dt_duration | ||||
| 
 | ||||
|         # NOTE XXX: ensure minimum duration in bars B) | ||||
|         # => we recursively call this method until we get at least | ||||
|         # as many bars such that they sum in aggregate to the the | ||||
|         # desired total time (duration) at most. | ||||
|         # XXX XXX XXX | ||||
|         # WHY DID WE EVEN NEED THIS ORIGINALLY!? | ||||
|         # XXX XXX XXX | ||||
|         # - if you query over a gap and get no data | ||||
|         #   that may short circuit the history  | ||||
|         log.info(query_info) | ||||
|         # NOTE XXX: ensure minimum duration in bars? | ||||
|         # => recursively call this method until we get at least as | ||||
|         #   many bars such that they sum in aggregate to the the | ||||
|         #   desired total time (duration) at most. | ||||
|         #  - if you query over a gap and get no data | ||||
|         #    that may short circuit the history | ||||
|         if ( | ||||
|             end_dt | ||||
|             and False | ||||
|             # XXX XXX XXX | ||||
|             # => WHY DID WE EVEN NEED THIS ORIGINALLY!? <= | ||||
|             # XXX XXX XXX | ||||
|             False | ||||
|             and end_dt | ||||
|         ): | ||||
|             nparr: np.ndarray = bars_to_np(bars) | ||||
|             times: np.ndarray = nparr['time'] | ||||
|  | @ -927,7 +957,10 @@ class Client: | |||
|                         warnset = True | ||||
| 
 | ||||
|             else: | ||||
|                 log.info(f'Got first quote for {contract}') | ||||
|                 log.info( | ||||
|                     'Got first quote for contract\n' | ||||
|                     f'{contract}\n' | ||||
|                 ) | ||||
|                 break | ||||
|         else: | ||||
|             if timeouterr and raise_on_timeout: | ||||
|  | @ -991,8 +1024,12 @@ class Client: | |||
|                     outsideRth=True, | ||||
| 
 | ||||
|                     optOutSmartRouting=True, | ||||
|                     # TODO: need to understand this setting better as | ||||
|                     # it pertains to shit ass mms.. | ||||
|                     routeMarketableToBbo=True, | ||||
| 
 | ||||
|                     designatedLocation='SMART', | ||||
| 
 | ||||
|                     # TODO: make all orders GTC? | ||||
|                     # https://interactivebrokers.github.io/tws-api/classIBApi_1_1Order.html#a95539081751afb9980f4c6bd1655a6ba | ||||
|                     # goodTillDate=f"yyyyMMdd-HH:mm:ss", | ||||
|  | @ -1120,8 +1157,8 @@ def get_config() -> dict[str, Any]: | |||
|     names = list(accounts.keys()) | ||||
|     accts = section['accounts'] = bidict(accounts) | ||||
|     log.info( | ||||
|         f'brokers.toml defines {len(accts)} accounts: ' | ||||
|         f'{pformat(names)}' | ||||
|         f'{path} defines {len(accts)} account aliases:\n' | ||||
|         f'{pformat(names)}\n' | ||||
|     ) | ||||
| 
 | ||||
|     if section is None: | ||||
|  | @ -1188,7 +1225,7 @@ async def load_aio_clients( | |||
|         try_ports = list(try_ports.values()) | ||||
| 
 | ||||
|     _err = None | ||||
|     accounts_def = config.load_accounts(['ib']) | ||||
|     accounts_def: dict[str, str] = config.load_accounts(['ib']) | ||||
|     ports = try_ports if port is None else [port] | ||||
|     combos = list(itertools.product(hosts, ports)) | ||||
|     accounts_found: dict[str, Client] = {} | ||||
|  | @ -1227,7 +1264,9 @@ async def load_aio_clients( | |||
|                 client = Client(ib=ib, config=conf) | ||||
| 
 | ||||
|                 # update all actor-global caches | ||||
|                 log.info(f"Caching client for {sockaddr}") | ||||
|                 log.runtime( | ||||
|                     f'Connected and caching `Client` @ {sockaddr!r}' | ||||
|                 ) | ||||
|                 _client_cache[sockaddr] = client | ||||
|                 break | ||||
| 
 | ||||
|  | @ -1242,37 +1281,59 @@ async def load_aio_clients( | |||
|                 OSError, | ||||
|             ) as ce: | ||||
|                 _err = ce | ||||
|                 log.warning( | ||||
|                     f'Failed to connect on {host}:{port} for {i} time with,\n' | ||||
|                     f'{ib.client.apiError.value()}\n' | ||||
|                     'retrying with a new client id..') | ||||
|                 message: str = ( | ||||
|                     f'Failed to connect on {host}:{port} after {i} tries with\n' | ||||
|                     f'{ib.client.apiError.value()!r}\n\n' | ||||
|                     'Retrying with a new client id..\n' | ||||
|                 ) | ||||
|                 log.runtime(message) | ||||
|         else: | ||||
|             # XXX report loudly if we never established after all | ||||
|             # re-tries | ||||
|             log.warning(message) | ||||
| 
 | ||||
|         # Pre-collect all accounts available for this | ||||
|         # connection and map account names to this client | ||||
|         # instance. | ||||
|         for value in ib.accountValues(): | ||||
|             acct_number = value.account | ||||
|             acct_number: str = value.account | ||||
| 
 | ||||
|             entry = accounts_def.inverse.get(acct_number) | ||||
|             if not entry: | ||||
|             acnt_alias: str = accounts_def.inverse.get(acct_number) | ||||
|             if not acnt_alias: | ||||
| 
 | ||||
|                 # TODO: should we constuct the below reco-ex from | ||||
|                 # the existing config content? | ||||
|                 _, path = config.load( | ||||
|                     conf_name='brokers', | ||||
|                 ) | ||||
|                 raise ValueError( | ||||
|                     'No section in brokers.toml for account:' | ||||
|                     f' {acct_number}\n' | ||||
|                     f'Please add entry to continue using this API client' | ||||
|                     'No alias in account section for account!\n' | ||||
|                     f'Please add an acnt alias entry to your {path}\n' | ||||
|                     'For example,\n\n' | ||||
| 
 | ||||
|                     '[ib.accounts]\n' | ||||
|                     'margin = {accnt_number!r}\n' | ||||
|                     '^^^^^^ <- you need this part!\n\n' | ||||
| 
 | ||||
|                     'This ensures `piker` will not leak private acnt info ' | ||||
|                     'to console output by default!\n' | ||||
|                 ) | ||||
| 
 | ||||
|             # surjection of account names to operating clients. | ||||
|             if acct_number not in accounts_found: | ||||
|                 accounts_found[entry] = client | ||||
|             if acnt_alias not in accounts_found: | ||||
|                 accounts_found[acnt_alias] = client | ||||
|                 # client._acnt_names.add(acnt_alias) | ||||
|                 client._acnt_names.append(acnt_alias) | ||||
| 
 | ||||
|         log.info( | ||||
|             f'Loaded accounts for client @ {host}:{port}\n' | ||||
|             f'{pformat(accounts_found)}' | ||||
|         ) | ||||
|         if accounts_found: | ||||
|             log.info( | ||||
|                 f'Loaded accounts for api client\n\n' | ||||
|                 f'{pformat(accounts_found)}\n' | ||||
|             ) | ||||
| 
 | ||||
|         # XXX: why aren't we just updating this directy above | ||||
|         # instead of using the intermediary `accounts_found`? | ||||
|         _accounts2clients.update(accounts_found) | ||||
|             # XXX: why aren't we just updating this directy above | ||||
|             # instead of using the intermediary `accounts_found`? | ||||
|             _accounts2clients.update(accounts_found) | ||||
| 
 | ||||
|     # if we have no clients after the scan loop then error out. | ||||
|     if not _client_cache: | ||||
|  | @ -1472,7 +1533,7 @@ async def open_aio_client_method_relay( | |||
|         msg: tuple[str, dict] | dict | None = await from_trio.get() | ||||
|         match msg: | ||||
|             case None:  # termination sentinel | ||||
|                 print('asyncio PROXY-RELAY SHUTDOWN') | ||||
|                 log.info('asyncio `Client` method-proxy SHUTDOWN!') | ||||
|                 break | ||||
| 
 | ||||
|             case (meth_name, kwargs): | ||||
|  |  | |||
|  | @ -1183,7 +1183,14 @@ async def deliver_trade_events( | |||
|                         pos | ||||
|                         and fill | ||||
|                     ): | ||||
|                         assert fill.commissionReport == cr | ||||
|                         now_cr: CommissionReport = fill.commissionReport | ||||
|                         if (now_cr != cr): | ||||
|                             log.warning( | ||||
|                                 'UhhHh ib updated the commission report mid-fill..?\n' | ||||
|                                 f'was: {pformat(cr)}\n' | ||||
|                                 f'now: {pformat(now_cr)}\n' | ||||
|                             ) | ||||
| 
 | ||||
|                         await emit_pp_update( | ||||
|                             ems_stream, | ||||
|                             accounts_def, | ||||
|  |  | |||
|  | @ -671,8 +671,8 @@ async def _setup_quote_stream( | |||
|         # making them mostly useless and explains why the scanner | ||||
|         # is always slow XD | ||||
|         # '293',  # Trade count for day | ||||
|         '294',  # Trade rate / minute | ||||
|         '295',  # Vlm rate / minute | ||||
|         # '294',  # Trade rate / minute | ||||
|         # '295',  # Vlm rate / minute | ||||
|     ), | ||||
|     contract: Contract | None = None, | ||||
| 
 | ||||
|  | @ -915,9 +915,13 @@ async def stream_quotes( | |||
| 
 | ||||
|         if first_ticker: | ||||
|             first_quote: dict = normalize(first_ticker) | ||||
|             log.info( | ||||
|                 'Rxed init quote:\n' | ||||
|                 f'{pformat(first_quote)}' | ||||
| 
 | ||||
|             # TODO: we need a stack-oriented log levels filters for | ||||
|             # this! | ||||
|             # log.info(message, filter={'stack': 'live_feed'}) ? | ||||
|             log.runtime( | ||||
|                 'Rxed init quote:\n\n' | ||||
|                 f'{pformat(first_quote)}\n' | ||||
|             ) | ||||
| 
 | ||||
|         # NOTE: it might be outside regular trading hours for | ||||
|  | @ -969,7 +973,11 @@ async def stream_quotes( | |||
|             raise_on_timeout=True, | ||||
|         ) | ||||
|         first_quote: dict = normalize(first_ticker) | ||||
|         log.info( | ||||
| 
 | ||||
|         # TODO: we need a stack-oriented log levels filters for | ||||
|         # this! | ||||
|         # log.info(message, filter={'stack': 'live_feed'}) ? | ||||
|         log.runtime( | ||||
|             'Rxed init quote:\n' | ||||
|             f'{pformat(first_quote)}' | ||||
|         ) | ||||
|  |  | |||
|  | @ -31,7 +31,11 @@ from typing import ( | |||
| ) | ||||
| 
 | ||||
| from bidict import bidict | ||||
| import pendulum | ||||
| from pendulum import ( | ||||
|     DateTime, | ||||
|     parse, | ||||
|     from_timestamp, | ||||
| ) | ||||
| from ib_insync import ( | ||||
|     Contract, | ||||
|     Commodity, | ||||
|  | @ -66,10 +70,11 @@ tx_sort: Callable = partial( | |||
|     iter_by_dt, | ||||
|     parsers={ | ||||
|         'dateTime': parse_flex_dt, | ||||
|         'datetime': pendulum.parse, | ||||
|         # for some some fucking 2022 and | ||||
|         # back options records...fuck me. | ||||
|         'date': pendulum.parse, | ||||
|         'datetime': parse, | ||||
| 
 | ||||
|         # XXX: for some some fucking 2022 and | ||||
|         # back options records.. f@#$ me.. | ||||
|         'date': parse, | ||||
|     } | ||||
| ) | ||||
| 
 | ||||
|  | @ -89,15 +94,38 @@ def norm_trade( | |||
| 
 | ||||
|     conid: int = str(record.get('conId') or record['conid']) | ||||
|     bs_mktid: str = str(conid) | ||||
|     comms = record.get('commission') | ||||
|     if comms is None: | ||||
|         comms = -1*record['ibCommission'] | ||||
| 
 | ||||
|     price = record.get('price') or record['tradePrice'] | ||||
|     # NOTE: sometimes weird records (like BTTX?) | ||||
|     # have no field for this? | ||||
|     comms: float = -1 * ( | ||||
|         record.get('commission') | ||||
|         or record.get('ibCommission') | ||||
|         or 0 | ||||
|     ) | ||||
|     if not comms: | ||||
|         log.warning( | ||||
|             'No commissions found for record?\n' | ||||
|             f'{pformat(record)}\n' | ||||
|         ) | ||||
| 
 | ||||
|     price: float = ( | ||||
|         record.get('price') | ||||
|         or record.get('tradePrice') | ||||
|     ) | ||||
|     if price is None: | ||||
|         log.warning( | ||||
|             'No `price` field found in record?\n' | ||||
|             'Skipping normalization..\n' | ||||
|             f'{pformat(record)}\n' | ||||
|         ) | ||||
|         return None | ||||
| 
 | ||||
|     # the api doesn't do the -/+ on the quantity for you but flex | ||||
|     # records do.. are you fucking serious ib...!? | ||||
|     size = record.get('quantity') or record['shares'] * { | ||||
|     size: float|int = ( | ||||
|         record.get('quantity') | ||||
|         or record['shares'] | ||||
|     ) * { | ||||
|         'BOT': 1, | ||||
|         'SLD': -1, | ||||
|     }[record['side']] | ||||
|  | @ -128,26 +156,31 @@ def norm_trade( | |||
|         # otype = tail[6] | ||||
|         # strike = tail[7:] | ||||
| 
 | ||||
|         print(f'skipping opts contract {symbol}') | ||||
|         log.warning( | ||||
|             f'Skipping option contract -> NO SUPPORT YET!\n' | ||||
|             f'{symbol}\n' | ||||
|         ) | ||||
|         return None | ||||
| 
 | ||||
|     # timestamping is way different in API records | ||||
|     dtstr = record.get('datetime') | ||||
|     date = record.get('date') | ||||
|     flex_dtstr = record.get('dateTime') | ||||
|     dtstr: str = record.get('datetime') | ||||
|     date: str = record.get('date') | ||||
|     flex_dtstr: str = record.get('dateTime') | ||||
| 
 | ||||
|     if dtstr or date: | ||||
|         dt = pendulum.parse(dtstr or date) | ||||
|         dt: DateTime = parse(dtstr or date) | ||||
| 
 | ||||
|     elif flex_dtstr: | ||||
|         # probably a flex record with a wonky non-std timestamp.. | ||||
|         dt = parse_flex_dt(record['dateTime']) | ||||
|         dt: DateTime = parse_flex_dt(record['dateTime']) | ||||
| 
 | ||||
|     # special handling of symbol extraction from | ||||
|     # flex records using some ad-hoc schema parsing. | ||||
|     asset_type: str = record.get( | ||||
|         'assetCategory' | ||||
|     ) or record.get('secType', 'STK') | ||||
|     asset_type: str = ( | ||||
|         record.get('assetCategory') | ||||
|         or record.get('secType') | ||||
|         or 'STK' | ||||
|     ) | ||||
| 
 | ||||
|     if (expiry := ( | ||||
|             record.get('lastTradeDateOrContractMonth') | ||||
|  | @ -357,6 +390,7 @@ def norm_trade_records( | |||
|         if txn is None: | ||||
|             continue | ||||
| 
 | ||||
|         # inject txns sorted by datetime | ||||
|         insort( | ||||
|             records, | ||||
|             txn, | ||||
|  | @ -405,7 +439,7 @@ def api_trades_to_ledger_entries( | |||
|                     txn_dict[attr_name] = val | ||||
| 
 | ||||
|         tid = str(txn_dict['execId']) | ||||
|         dt = pendulum.from_timestamp(txn_dict['time']) | ||||
|         dt = from_timestamp(txn_dict['time']) | ||||
|         txn_dict['datetime'] = str(dt) | ||||
|         acctid = accounts[txn_dict['acctNumber']] | ||||
| 
 | ||||
|  |  | |||
|  | @ -209,7 +209,10 @@ async def open_symbol_search(ctx: tractor.Context) -> None: | |||
|                 break | ||||
| 
 | ||||
|             ib_client = proxy._aio_ns.ib | ||||
|             log.info(f'Using {ib_client} for symbol search') | ||||
|             log.info( | ||||
|                 f'Using API client for symbol-search\n' | ||||
|                 f'{ib_client}\n' | ||||
|             ) | ||||
| 
 | ||||
|             last = time.time() | ||||
|             async for pattern in stream: | ||||
|  | @ -294,7 +297,7 @@ async def open_symbol_search(ctx: tractor.Context) -> None: | |||
|                     elif stock_results: | ||||
|                         break | ||||
|                     # else: | ||||
|                     await tractor.pause() | ||||
|                     # await tractor.pause() | ||||
| 
 | ||||
|                     # # match against our ad-hoc set immediately | ||||
|                     # adhoc_matches = fuzzy.extract( | ||||
|  | @ -522,7 +525,21 @@ async def get_mkt_info( | |||
|         venue = con.primaryExchange or con.exchange | ||||
| 
 | ||||
|     price_tick: Decimal = Decimal(str(details.minTick)) | ||||
|     # price_tick: Decimal = Decimal('0.01') | ||||
|     ib_min_tick_gt_2: Decimal = Decimal('0.01') | ||||
|     if ( | ||||
|         price_tick < ib_min_tick_gt_2 | ||||
|     ): | ||||
|         # TODO: we need to add some kinda dynamic rounding sys | ||||
|         # to our MktPair i guess? | ||||
|         # not sure where the logic should sit, but likely inside | ||||
|         # the `.clearing._ems` i suppose... | ||||
|         log.warning( | ||||
|             'IB seems to disallow a min price tick < 0.01 ' | ||||
|             'when the price is > 2.0..?\n' | ||||
|             f'Decreasing min tick precision for {fqme} to 0.01' | ||||
|         ) | ||||
|         # price_tick = ib_min_tick | ||||
|         # await tractor.pause() | ||||
| 
 | ||||
|     if atype == 'stock': | ||||
|         # XXX: GRRRR they don't support fractional share sizes for | ||||
|  |  | |||
|  | @ -27,8 +27,8 @@ from typing import ( | |||
| ) | ||||
| import time | ||||
| 
 | ||||
| import httpx | ||||
| import pendulum | ||||
| import asks | ||||
| import numpy as np | ||||
| import urllib.parse | ||||
| import hashlib | ||||
|  | @ -60,6 +60,11 @@ log = get_logger('piker.brokers.kraken') | |||
| 
 | ||||
| # <uri>/<version>/ | ||||
| _url = 'https://api.kraken.com/0' | ||||
| 
 | ||||
| _headers: dict[str, str] = { | ||||
|     'User-Agent': 'krakenex/2.1.0 (+https://github.com/veox/python3-krakenex)' | ||||
| } | ||||
| 
 | ||||
| # TODO: this is the only backend providing this right? | ||||
| # in which case we should drop it from the defaults and | ||||
| # instead make a custom fields descr in this module! | ||||
|  | @ -135,16 +140,15 @@ class Client: | |||
|     def __init__( | ||||
|         self, | ||||
|         config: dict[str, str], | ||||
|         httpx_client: httpx.AsyncClient, | ||||
| 
 | ||||
|         name: str = '', | ||||
|         api_key: str = '', | ||||
|         secret: str = '' | ||||
|     ) -> None: | ||||
|         self._sesh = asks.Session(connections=4) | ||||
|         self._sesh.base_location = _url | ||||
|         self._sesh.headers.update({ | ||||
|             'User-Agent': | ||||
|                 'krakenex/2.1.0 (+https://github.com/veox/python3-krakenex)' | ||||
|         }) | ||||
| 
 | ||||
|         self._sesh: httpx.AsyncClient = httpx_client | ||||
| 
 | ||||
|         self._name = name | ||||
|         self._api_key = api_key | ||||
|         self._secret = secret | ||||
|  | @ -166,10 +170,9 @@ class Client: | |||
|         method: str, | ||||
|         data: dict, | ||||
|     ) -> dict[str, Any]: | ||||
|         resp = await self._sesh.post( | ||||
|             path=f'/public/{method}', | ||||
|         resp: httpx.Response = await self._sesh.post( | ||||
|             url=f'/public/{method}', | ||||
|             json=data, | ||||
|             timeout=float('inf') | ||||
|         ) | ||||
|         return resproc(resp, log) | ||||
| 
 | ||||
|  | @ -180,18 +183,18 @@ class Client: | |||
|         uri_path: str | ||||
|     ) -> dict[str, Any]: | ||||
|         headers = { | ||||
|             'Content-Type': | ||||
|                 'application/x-www-form-urlencoded', | ||||
|             'API-Key': | ||||
|                 self._api_key, | ||||
|             'API-Sign': | ||||
|                 get_kraken_signature(uri_path, data, self._secret) | ||||
|             'Content-Type': 'application/x-www-form-urlencoded', | ||||
|             'API-Key': self._api_key, | ||||
|             'API-Sign': get_kraken_signature( | ||||
|                 uri_path, | ||||
|                 data, | ||||
|                 self._secret, | ||||
|             ), | ||||
|         } | ||||
|         resp = await self._sesh.post( | ||||
|             path=f'/private/{method}', | ||||
|         resp: httpx.Response = await self._sesh.post( | ||||
|             url=f'/private/{method}', | ||||
|             data=data, | ||||
|             headers=headers, | ||||
|             timeout=float('inf') | ||||
|         ) | ||||
|         return resproc(resp, log) | ||||
| 
 | ||||
|  | @ -665,24 +668,36 @@ class Client: | |||
| @acm | ||||
| async def get_client() -> Client: | ||||
| 
 | ||||
|     conf = get_config() | ||||
|     if conf: | ||||
|         client = Client( | ||||
|             conf, | ||||
|     conf: dict[str, Any] = get_config() | ||||
|     async with httpx.AsyncClient( | ||||
|         base_url=_url, | ||||
|         headers=_headers, | ||||
| 
 | ||||
|             # TODO: don't break these up and just do internal | ||||
|             # conf lookups instead.. | ||||
|             name=conf['key_descr'], | ||||
|             api_key=conf['api_key'], | ||||
|             secret=conf['secret'] | ||||
|         ) | ||||
|     else: | ||||
|         client = Client({}) | ||||
|         # TODO: is there a way to numerate this? | ||||
|         # https://www.python-httpx.org/advanced/clients/#why-use-a-client | ||||
|         # connections=4 | ||||
|     ) as trio_client: | ||||
|         if conf: | ||||
|             client = Client( | ||||
|                 conf, | ||||
|                 httpx_client=trio_client, | ||||
| 
 | ||||
|     # at startup, load all symbols, and asset info in | ||||
|     # batch requests. | ||||
|     async with trio.open_nursery() as nurse: | ||||
|         nurse.start_soon(client.get_assets) | ||||
|         await client.get_mkt_pairs() | ||||
|                 # TODO: don't break these up and just do internal | ||||
|                 # conf lookups instead.. | ||||
|                 name=conf['key_descr'], | ||||
|                 api_key=conf['api_key'], | ||||
|                 secret=conf['secret'] | ||||
|             ) | ||||
|         else: | ||||
|             client = Client( | ||||
|                 conf={}, | ||||
|                 httpx_client=trio_client, | ||||
|             ) | ||||
| 
 | ||||
|     yield client | ||||
|         # at startup, load all symbols, and asset info in | ||||
|         # batch requests. | ||||
|         async with trio.open_nursery() as nurse: | ||||
|             nurse.start_soon(client.get_assets) | ||||
|             await client.get_mkt_pairs() | ||||
| 
 | ||||
|         yield client | ||||
|  |  | |||
|  | @ -612,18 +612,18 @@ async def open_trade_dialog( | |||
| 
 | ||||
|                 # enter relay loop | ||||
|                 await handle_order_updates( | ||||
|                     client, | ||||
|                     ws, | ||||
|                     stream, | ||||
|                     ems_stream, | ||||
|                     apiflows, | ||||
|                     ids, | ||||
|                     reqids2txids, | ||||
|                     acnt, | ||||
|                     api_trans, | ||||
|                     acctid, | ||||
|                     acc_name, | ||||
|                     token, | ||||
|                     client=client, | ||||
|                     ws=ws, | ||||
|                     ws_stream=stream, | ||||
|                     ems_stream=ems_stream, | ||||
|                     apiflows=apiflows, | ||||
|                     ids=ids, | ||||
|                     reqids2txids=reqids2txids, | ||||
|                     acnt=acnt, | ||||
|                     ledger=ledger, | ||||
|                     acctid=acctid, | ||||
|                     acc_name=acc_name, | ||||
|                     token=token, | ||||
|                 ) | ||||
| 
 | ||||
| 
 | ||||
|  | @ -639,7 +639,8 @@ async def handle_order_updates( | |||
| 
 | ||||
|     # transaction records which will be updated | ||||
|     # on new trade clearing events (aka order "fills") | ||||
|     ledger_trans: dict[str, Transaction], | ||||
|     ledger: TransactionLedger, | ||||
|     # ledger_trans: dict[str, Transaction], | ||||
|     acctid: str, | ||||
|     acc_name: str, | ||||
|     token: str, | ||||
|  | @ -699,7 +700,8 @@ async def handle_order_updates( | |||
|                     # if tid not in ledger_trans | ||||
|                 } | ||||
|                 for tid, trade in trades.items(): | ||||
|                     assert tid not in ledger_trans | ||||
|                     # assert tid not in ledger_trans | ||||
|                     assert tid not in ledger | ||||
|                     txid = trade['ordertxid'] | ||||
|                     reqid = trade.get('userref') | ||||
| 
 | ||||
|  | @ -747,11 +749,17 @@ async def handle_order_updates( | |||
|                     client, | ||||
|                     api_name_set='wsname', | ||||
|                 ) | ||||
|                 ppmsgs = trades2pps( | ||||
|                     acnt, | ||||
|                     acctid, | ||||
|                     new_trans, | ||||
|                 ppmsgs: list[BrokerdPosition] = trades2pps( | ||||
|                     acnt=acnt, | ||||
|                     ledger=ledger, | ||||
|                     acctid=acctid, | ||||
|                     new_trans=new_trans, | ||||
|                 ) | ||||
|                 # ppmsgs = trades2pps( | ||||
|                 #     acnt, | ||||
|                 #     acctid, | ||||
|                 #     new_trans, | ||||
|                 # ) | ||||
|                 for pp_msg in ppmsgs: | ||||
|                     await ems_stream.send(pp_msg) | ||||
| 
 | ||||
|  |  | |||
|  | @ -16,10 +16,9 @@ | |||
| # along with this program.  If not, see <https://www.gnu.org/licenses/>. | ||||
| 
 | ||||
| ''' | ||||
| Kucoin broker backend | ||||
| Kucoin cex API backend. | ||||
| 
 | ||||
| ''' | ||||
| 
 | ||||
| from contextlib import ( | ||||
|     asynccontextmanager as acm, | ||||
|     aclosing, | ||||
|  | @ -42,7 +41,7 @@ import wsproto | |||
| from uuid import uuid4 | ||||
| 
 | ||||
| from trio_typing import TaskStatus | ||||
| import asks | ||||
| import httpx | ||||
| from bidict import bidict | ||||
| import numpy as np | ||||
| import pendulum | ||||
|  | @ -112,6 +111,10 @@ class KucoinMktPair(Struct, frozen=True): | |||
|     quoteMaxSize: float | ||||
|     quoteMinSize: float | ||||
|     symbol: str  # our bs_mktid, kucoin's internal id | ||||
|     feeCategory: int | ||||
|     makerFeeCoefficient: float | ||||
|     takerFeeCoefficient: float | ||||
|     st: bool | ||||
| 
 | ||||
| 
 | ||||
| class AccountTrade(Struct, frozen=True): | ||||
|  | @ -212,8 +215,12 @@ def get_config() -> BrokerConfig | None: | |||
| 
 | ||||
| class Client: | ||||
| 
 | ||||
|     def __init__(self) -> None: | ||||
|         self._config: BrokerConfig | None = get_config() | ||||
|     def __init__( | ||||
|         self, | ||||
|         httpx_client: httpx.AsyncClient, | ||||
|     ) -> None: | ||||
|         self._http: httpx.AsyncClient = httpx_client | ||||
|         self._config: BrokerConfig|None = get_config() | ||||
|         self._pairs: dict[str, KucoinMktPair] = {} | ||||
|         self._fqmes2mktids: bidict[str, str] = bidict() | ||||
|         self._bars: list[list[float]] = [] | ||||
|  | @ -227,18 +234,24 @@ class Client: | |||
| 
 | ||||
|     ) -> dict[str, str | bytes]: | ||||
|         ''' | ||||
|         Generate authenticated request headers | ||||
|         Generate authenticated request headers: | ||||
| 
 | ||||
|         https://docs.kucoin.com/#authentication | ||||
|         https://www.kucoin.com/docs/basic-info/connection-method/authentication/creating-a-request | ||||
|         https://www.kucoin.com/docs/basic-info/connection-method/authentication/signing-a-message | ||||
| 
 | ||||
|         ''' | ||||
| 
 | ||||
|         if not self._config: | ||||
|             raise ValueError( | ||||
|                 'No config found when trying to send authenticated request') | ||||
|                 'No config found when trying to send authenticated request' | ||||
|             ) | ||||
| 
 | ||||
|         str_to_sign = ( | ||||
|             str(int(time.time() * 1000)) | ||||
|             + action + f'/api/{api}/{endpoint.lstrip("/")}' | ||||
|             + | ||||
|             action | ||||
|             + | ||||
|             f'/api/{api}/{endpoint.lstrip("/")}' | ||||
|         ) | ||||
| 
 | ||||
|         signature = base64.b64encode( | ||||
|  | @ -249,6 +262,7 @@ class Client: | |||
|             ).digest() | ||||
|         ) | ||||
| 
 | ||||
|         # TODO: can we cache this between calls? | ||||
|         passphrase = base64.b64encode( | ||||
|             hmac.new( | ||||
|                 self._config.key_secret.encode('utf-8'), | ||||
|  | @ -270,8 +284,10 @@ class Client: | |||
|         self, | ||||
|         action: Literal['POST', 'GET'], | ||||
|         endpoint: str, | ||||
| 
 | ||||
|         api: str = 'v2', | ||||
|         headers: dict = {}, | ||||
| 
 | ||||
|     ) -> Any: | ||||
|         ''' | ||||
|         Generic request wrapper for Kucoin API | ||||
|  | @ -284,14 +300,19 @@ class Client: | |||
|                 api, | ||||
|             ) | ||||
| 
 | ||||
|         api_url = f'https://api.kucoin.com/api/{api}/{endpoint}' | ||||
| 
 | ||||
|         res = await asks.request(action, api_url, headers=headers) | ||||
| 
 | ||||
|         json = res.json() | ||||
|         if 'data' in json: | ||||
|             return json['data'] | ||||
|         req_meth: Callable = getattr( | ||||
|             self._http, | ||||
|             action.lower(), | ||||
|         ) | ||||
|         res = await req_meth( | ||||
|             url=f'/{api}/{endpoint}', | ||||
|             headers=headers, | ||||
|         ) | ||||
|         json: dict = res.json() | ||||
|         if (data := json.get('data')) is not None: | ||||
|             return data | ||||
|         else: | ||||
|             api_url: str = self._http.base_url | ||||
|             log.error( | ||||
|                 f'Error making request to {api_url} ->\n' | ||||
|                 f'{pformat(res)}' | ||||
|  | @ -311,7 +332,7 @@ class Client: | |||
|         ''' | ||||
|         token_type = 'private' if private else 'public' | ||||
|         try: | ||||
|             data: dict[str, Any] | None = await self._request( | ||||
|             data: dict[str, Any]|None = await self._request( | ||||
|                 'POST', | ||||
|                 endpoint=f'bullet-{token_type}', | ||||
|                 api='v1' | ||||
|  | @ -349,8 +370,8 @@ class Client: | |||
|             currencies: dict[str, Currency] = {} | ||||
|             entries: list[dict] = await self._request( | ||||
|                 'GET', | ||||
|                 api='v1', | ||||
|                 endpoint='currencies', | ||||
|                 api='v1', | ||||
|             ) | ||||
|             for entry in entries: | ||||
|                 curr = Currency(**entry).copy() | ||||
|  | @ -366,7 +387,10 @@ class Client: | |||
|         dict[str, KucoinMktPair], | ||||
|         bidict[str, KucoinMktPair], | ||||
|     ]: | ||||
|         entries = await self._request('GET', 'symbols') | ||||
|         entries = await self._request( | ||||
|             'GET', | ||||
|             endpoint='symbols', | ||||
|         ) | ||||
|         log.info(f' {len(entries)} Kucoin market pairs fetched') | ||||
| 
 | ||||
|         pairs: dict[str, KucoinMktPair] = {} | ||||
|  | @ -567,13 +591,21 @@ def fqme_to_kucoin_sym( | |||
| 
 | ||||
| @acm | ||||
| async def get_client() -> AsyncGenerator[Client, None]: | ||||
|     client = Client() | ||||
|     ''' | ||||
|     Load an API `Client` preconfigured from user settings | ||||
| 
 | ||||
|     async with trio.open_nursery() as n: | ||||
|         n.start_soon(client.get_mkt_pairs) | ||||
|         await client.get_currencies() | ||||
|     ''' | ||||
|     async with ( | ||||
|         httpx.AsyncClient( | ||||
|             base_url='https://api.kucoin.com/api', | ||||
|         ) as trio_client, | ||||
|     ): | ||||
|         client = Client(httpx_client=trio_client) | ||||
|         async with trio.open_nursery() as tn: | ||||
|             tn.start_soon(client.get_mkt_pairs) | ||||
|             await client.get_currencies() | ||||
| 
 | ||||
|     yield client | ||||
|         yield client | ||||
| 
 | ||||
| 
 | ||||
| @tractor.context | ||||
|  | @ -609,7 +641,7 @@ async def open_ping_task( | |||
|                 await trio.sleep((ping_interval - 1000) / 1000) | ||||
|                 await ws.send_msg({'id': connect_id, 'type': 'ping'}) | ||||
| 
 | ||||
|         log.info('Starting ping task for kucoin ws connection') | ||||
|         log.warning('Starting ping task for kucoin ws connection') | ||||
|         n.start_soon(ping_server) | ||||
| 
 | ||||
|         yield | ||||
|  | @ -621,9 +653,14 @@ async def open_ping_task( | |||
| async def get_mkt_info( | ||||
|     fqme: str, | ||||
| 
 | ||||
| ) -> tuple[MktPair, KucoinMktPair]: | ||||
| ) -> tuple[ | ||||
|     MktPair, | ||||
|     KucoinMktPair, | ||||
| ]: | ||||
|     ''' | ||||
|     Query for and return a `MktPair` and `KucoinMktPair`. | ||||
|     Query for and return both a `piker.accounting.MktPair` and | ||||
|     `KucoinMktPair` from provided `fqme: str` | ||||
|     (fully-qualified-market-endpoint). | ||||
| 
 | ||||
|     ''' | ||||
|     async with open_cached_client('kucoin') as client: | ||||
|  | @ -698,6 +735,8 @@ async def stream_quotes( | |||
| 
 | ||||
|         log.info(f'Starting up quote stream(s) for {symbols}') | ||||
|         for sym_str in symbols: | ||||
|             mkt: MktPair | ||||
|             pair: KucoinMktPair | ||||
|             mkt, pair = await get_mkt_info(sym_str) | ||||
|             init_msgs.append( | ||||
|                 FeedInit(mkt_info=mkt) | ||||
|  | @ -705,7 +744,11 @@ async def stream_quotes( | |||
| 
 | ||||
|         ws: NoBsWs | ||||
|         token, ping_interval = await client._get_ws_token() | ||||
|         connect_id = str(uuid4()) | ||||
|         log.info('API reported ping_interval: {ping_interval}\n') | ||||
| 
 | ||||
|         connect_id: str = str(uuid4()) | ||||
|         typ: str | ||||
|         quote: dict | ||||
|         async with ( | ||||
|             open_autorecon_ws( | ||||
|                 ( | ||||
|  | @ -719,20 +762,37 @@ async def stream_quotes( | |||
|                 ), | ||||
|             ) as ws, | ||||
|             open_ping_task(ws, ping_interval, connect_id), | ||||
|             aclosing(stream_messages(ws, sym_str)) as msg_gen, | ||||
|             aclosing( | ||||
|                 iter_normed_quotes( | ||||
|                     ws, sym_str | ||||
|                 ) | ||||
|             ) as iter_quotes, | ||||
|         ): | ||||
|             typ, quote = await anext(msg_gen) | ||||
|             typ, quote = await anext(iter_quotes) | ||||
| 
 | ||||
|             while typ != 'trade': | ||||
|                 # take care to not unblock here until we get a real | ||||
|                 # trade quote | ||||
|                 typ, quote = await anext(msg_gen) | ||||
|             # take care to not unblock here until we get a real | ||||
|             # trade quote? | ||||
|             # ^TODO, remove this right? | ||||
|             # -[ ] what often blocks chart boot/new-feed switching | ||||
|             #   since we'ere waiting for a live quote instead of just | ||||
|             #   loading history afap.. | ||||
|             #  |_ XXX, not sure if we require a bit of rework to core | ||||
|             #    feed init logic or if backends justg gotta be | ||||
|             #    changed up.. feel like there was some causality | ||||
|             #    dilema prolly only seen with IB too.. | ||||
|             # while typ != 'trade': | ||||
|             #     typ, quote = await anext(iter_quotes) | ||||
| 
 | ||||
|             task_status.started((init_msgs, quote)) | ||||
|             feed_is_live.set() | ||||
| 
 | ||||
|             async for typ, msg in msg_gen: | ||||
|                 await send_chan.send({sym_str: msg}) | ||||
|             # XXX NOTE, DO NOT include the `.<backend>` suffix! | ||||
|             # OW the sampling loop will not broadcast correctly.. | ||||
|             # since `bus._subscribers.setdefault(bs_fqme, set())` | ||||
|             # is used inside `.data.open_feed_bus()` !!! | ||||
|             topic: str = mkt.bs_fqme | ||||
|             async for typ, quote in iter_quotes: | ||||
|                 await send_chan.send({topic: quote}) | ||||
| 
 | ||||
| 
 | ||||
| @acm | ||||
|  | @ -787,7 +847,7 @@ async def subscribe( | |||
|             ) | ||||
| 
 | ||||
| 
 | ||||
| async def stream_messages( | ||||
| async def iter_normed_quotes( | ||||
|     ws: NoBsWs, | ||||
|     sym: str, | ||||
| 
 | ||||
|  | @ -818,6 +878,9 @@ async def stream_messages( | |||
| 
 | ||||
|                 yield 'trade', { | ||||
|                     'symbol': sym, | ||||
|                     # TODO, is 'last' even used elsewhere/a-good | ||||
|                     # semantic? can't we just read the ticks with our | ||||
|                     # .data.ticktools.frame_ticks()`/ | ||||
|                     'last': trade_data.price, | ||||
|                     'brokerd_ts': last_trade_ts, | ||||
|                     'ticks': [ | ||||
|  | @ -910,7 +973,7 @@ async def open_history_client( | |||
|             if end_dt is None: | ||||
|                 inow = round(time.time()) | ||||
| 
 | ||||
|                 print( | ||||
|                 log.debug( | ||||
|                     f'difference in time between load and processing' | ||||
|                     f'{inow - times[-1]}' | ||||
|                 ) | ||||
|  |  | |||
|  | @ -653,7 +653,11 @@ class Router(Struct): | |||
|             flume = feed.flumes[fqme] | ||||
|             first_quote: dict = flume.first_quote | ||||
|             book: DarkBook = self.get_dark_book(broker) | ||||
|             book.lasts[fqme]: float = float(first_quote['last']) | ||||
| 
 | ||||
|             if not (last := first_quote.get('last')): | ||||
|                 last: float = flume.rt_shm.array[-1]['close'] | ||||
| 
 | ||||
|             book.lasts[fqme]: float = float(last) | ||||
| 
 | ||||
|             async with self.maybe_open_brokerd_dialog( | ||||
|                 brokermod=brokermod, | ||||
|  | @ -716,7 +720,7 @@ class Router(Struct): | |||
|             subs = self.subscribers[sub_key] | ||||
| 
 | ||||
|         sent_some: bool = False | ||||
|         for client_stream in subs: | ||||
|         for client_stream in subs.copy(): | ||||
|             try: | ||||
|                 await client_stream.send(msg) | ||||
|                 sent_some = True | ||||
|  | @ -1010,10 +1014,14 @@ async def translate_and_relay_brokerd_events( | |||
|                 status_msg.brokerd_msg = msg | ||||
|                 status_msg.src = msg.broker_details['name'] | ||||
| 
 | ||||
|                 await router.client_broadcast( | ||||
|                     status_msg.req.symbol, | ||||
|                     status_msg, | ||||
|                 ) | ||||
|                 if not status_msg.req: | ||||
|                     # likely some order change state? | ||||
|                     await tractor.pause() | ||||
|                 else: | ||||
|                     await router.client_broadcast( | ||||
|                         status_msg.req.symbol, | ||||
|                         status_msg, | ||||
|                     ) | ||||
| 
 | ||||
|                 if status == 'closed': | ||||
|                     log.info(f'Execution for {oid} is complete!') | ||||
|  |  | |||
|  | @ -335,7 +335,7 @@ def services(config, tl, ports): | |||
|                 name='service_query', | ||||
|                 loglevel=config['loglevel'] if tl else None, | ||||
|             ), | ||||
|             tractor.get_arbiter( | ||||
|             tractor.get_registry( | ||||
|                 host=host, | ||||
|                 port=ports[0] | ||||
|             ) as portal | ||||
|  |  | |||
|  | @ -25,6 +25,7 @@ from collections import ( | |||
|     defaultdict, | ||||
| ) | ||||
| from contextlib import asynccontextmanager as acm | ||||
| from functools import partial | ||||
| import time | ||||
| from typing import ( | ||||
|     Any, | ||||
|  | @ -42,7 +43,7 @@ from tractor.trionics import ( | |||
|     maybe_open_nursery, | ||||
| ) | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| from trio import TaskStatus | ||||
| 
 | ||||
| from .ticktools import ( | ||||
|     frame_ticks, | ||||
|  | @ -53,6 +54,9 @@ from ._util import ( | |||
|     get_console_log, | ||||
| ) | ||||
| from ..service import maybe_spawn_daemon | ||||
| from piker.log import ( | ||||
|     mk_repr, | ||||
| ) | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     from ._sharedmem import ( | ||||
|  | @ -70,6 +74,7 @@ if TYPE_CHECKING: | |||
| _default_delay_s: float = 1.0 | ||||
| 
 | ||||
| 
 | ||||
| # TODO: use new `tractor.singleton_acm` API for this! | ||||
| class Sampler: | ||||
|     ''' | ||||
|     Global sampling engine registry. | ||||
|  | @ -79,9 +84,9 @@ class Sampler: | |||
| 
 | ||||
|     This non-instantiated type is meant to be a singleton within | ||||
|     a `samplerd` actor-service spawned once by the user wishing to | ||||
|     time-step-sample (real-time) quote feeds, see | ||||
|     ``.service.maybe_open_samplerd()`` and the below | ||||
|     ``register_with_sampler()``. | ||||
|     time-step-sample a (real-time) quote feeds, see | ||||
|     `.service.maybe_open_samplerd()` and the below | ||||
|     `register_with_sampler()`. | ||||
| 
 | ||||
|     ''' | ||||
|     service_nursery: None | trio.Nursery = None | ||||
|  | @ -95,6 +100,12 @@ class Sampler: | |||
|     # history loading. | ||||
|     incr_task_cs: trio.CancelScope | None = None | ||||
| 
 | ||||
|     bcast_errors: tuple[Exception] = ( | ||||
|         trio.BrokenResourceError, | ||||
|         trio.ClosedResourceError, | ||||
|         trio.EndOfChannel, | ||||
|     ) | ||||
| 
 | ||||
|     # holds all the ``tractor.Context`` remote subscriptions for | ||||
|     # a particular sample period increment event: all subscribers are | ||||
|     # notified on a step. | ||||
|  | @ -258,14 +269,15 @@ class Sampler: | |||
|         subs: set | ||||
|         last_ts, subs = pair | ||||
| 
 | ||||
|         task = trio.lowlevel.current_task() | ||||
|         log.debug( | ||||
|             f'SUBS {self.subscribers}\n' | ||||
|             f'PAIR {pair}\n' | ||||
|             f'TASK: {task}: {id(task)}\n' | ||||
|             f'broadcasting {period_s} -> {last_ts}\n' | ||||
|             # f'consumers: {subs}' | ||||
|         ) | ||||
|         # NOTE, for debugging pub-sub issues | ||||
|         # task = trio.lowlevel.current_task() | ||||
|         # log.debug( | ||||
|         #     f'AlL-SUBS@{period_s!r}: {self.subscribers}\n' | ||||
|         #     f'PAIR: {pair}\n' | ||||
|         #     f'TASK: {task}: {id(task)}\n' | ||||
|         #     f'broadcasting {period_s} -> {last_ts}\n' | ||||
|         #     f'consumers: {subs}' | ||||
|         # ) | ||||
|         borked: set[MsgStream] = set() | ||||
|         sent: set[MsgStream] = set() | ||||
|         while True: | ||||
|  | @ -282,12 +294,11 @@ class Sampler: | |||
|                         await stream.send(msg) | ||||
|                         sent.add(stream) | ||||
| 
 | ||||
|                     except ( | ||||
|                         trio.BrokenResourceError, | ||||
|                         trio.ClosedResourceError | ||||
|                     ): | ||||
|                     except self.bcast_errors as err: | ||||
|                         log.error( | ||||
|                             f'{stream._ctx.chan.uid} dropped connection' | ||||
|                             f'Connection dropped for IPC ctx\n' | ||||
|                             f'{stream._ctx}\n\n' | ||||
|                             f'Due to {type(err)}' | ||||
|                         ) | ||||
|                         borked.add(stream) | ||||
|                 else: | ||||
|  | @ -375,7 +386,10 @@ async def register_with_sampler( | |||
|                 assert Sampler.ohlcv_shms | ||||
| 
 | ||||
|             # unblock caller | ||||
|             await ctx.started(set(Sampler.ohlcv_shms.keys())) | ||||
|             await ctx.started( | ||||
|                 # XXX bc msgpack only allows one array type! | ||||
|                 list(Sampler.ohlcv_shms.keys()) | ||||
|             ) | ||||
| 
 | ||||
|             if open_index_stream: | ||||
|                 try: | ||||
|  | @ -419,7 +433,6 @@ async def register_with_sampler( | |||
| 
 | ||||
| 
 | ||||
| async def spawn_samplerd( | ||||
| 
 | ||||
|     loglevel: str | None = None, | ||||
|     **extra_tractor_kwargs | ||||
| 
 | ||||
|  | @ -429,7 +442,10 @@ async def spawn_samplerd( | |||
|     update and increment count write and stream broadcasting. | ||||
| 
 | ||||
|     ''' | ||||
|     from piker.service import Services | ||||
|     from piker.service import ( | ||||
|         get_service_mngr, | ||||
|         ServiceMngr, | ||||
|     ) | ||||
| 
 | ||||
|     dname = 'samplerd' | ||||
|     log.info(f'Spawning `{dname}`') | ||||
|  | @ -437,26 +453,33 @@ async def spawn_samplerd( | |||
|     # singleton lock creation of ``samplerd`` since we only ever want | ||||
|     # one daemon per ``pikerd`` proc tree. | ||||
|     # TODO: make this built-into the service api? | ||||
|     async with Services.locks[dname + '_singleton']: | ||||
|     mngr: ServiceMngr = get_service_mngr() | ||||
|     already_started: bool = dname in mngr.service_tasks | ||||
| 
 | ||||
|         if dname not in Services.service_tasks: | ||||
| 
 | ||||
|             portal = await Services.actor_n.start_actor( | ||||
|                 dname, | ||||
|                 enable_modules=[ | ||||
|                     'piker.data._sampling', | ||||
|                 ], | ||||
|                 loglevel=loglevel, | ||||
|                 debug_mode=Services.debug_mode,  # set by pikerd flag | ||||
|                 **extra_tractor_kwargs | ||||
|             ) | ||||
| 
 | ||||
|             await Services.start_service_task( | ||||
|                 dname, | ||||
|                 portal, | ||||
|     async with mngr._locks[dname + '_singleton']: | ||||
|         ctx: Context = await mngr.start_service( | ||||
|             daemon_name=dname, | ||||
|             ctx_ep=partial( | ||||
|                 register_with_sampler, | ||||
|                 period_s=1, | ||||
|                 sub_for_broadcasts=False, | ||||
|             ), | ||||
|             debug_mode=mngr.debug_mode,  # set by pikerd flag | ||||
| 
 | ||||
|             # proxy-through to tractor | ||||
|             enable_modules=[ | ||||
|                 'piker.data._sampling', | ||||
|             ], | ||||
|             loglevel=loglevel, | ||||
|             **extra_tractor_kwargs | ||||
|         ) | ||||
|         if not already_started: | ||||
|             assert ( | ||||
|                 ctx | ||||
|                 and | ||||
|                 ctx.portal | ||||
|                 and | ||||
|                 not ctx.cancel_called | ||||
|             ) | ||||
|             return True | ||||
| 
 | ||||
|  | @ -561,8 +584,7 @@ async def open_sample_stream( | |||
| 
 | ||||
| 
 | ||||
| async def sample_and_broadcast( | ||||
| 
 | ||||
|     bus: _FeedsBus,  # noqa | ||||
|     bus: _FeedsBus, | ||||
|     rt_shm: ShmArray, | ||||
|     hist_shm: ShmArray, | ||||
|     quote_stream: trio.abc.ReceiveChannel, | ||||
|  | @ -582,11 +604,32 @@ async def sample_and_broadcast( | |||
| 
 | ||||
|     overruns = Counter() | ||||
| 
 | ||||
|     # NOTE, only used for debugging live-data-feed issues, though | ||||
|     # this should be resolved more correctly in the future using the | ||||
|     # new typed-msgspec feats of `tractor`! | ||||
|     # | ||||
|     # XXX, a multiline nested `dict` formatter (since rn quote-msgs | ||||
|     # are just that). | ||||
|     # pfmt: Callable[[str], str] = mk_repr() | ||||
| 
 | ||||
|     # iterate stream delivered by broker | ||||
|     async for quotes in quote_stream: | ||||
|         # print(quotes) | ||||
| 
 | ||||
|         # TODO: ``numba`` this! | ||||
|         # XXX WARNING XXX only enable for debugging bc ow can cost | ||||
|         # ALOT of perf with HF-feedz!!! | ||||
|         # | ||||
|         # log.info( | ||||
|         #     'Rx live quotes:\n' | ||||
|         #     f'{pfmt(quotes)}' | ||||
|         # ) | ||||
| 
 | ||||
|         # TODO, | ||||
|         # -[ ] `numba` or `cython`-nize this loop possibly? | ||||
|         #  |_alternatively could we do it in rust somehow by upacking | ||||
|         #    arrow msgs instead of using `msgspec`? | ||||
|         # -[ ] use `msgspec.Struct` support in new typed-msging from | ||||
|         #     `tractor` to ensure only allowed msgs are transmitted? | ||||
|         # | ||||
|         for broker_symbol, quote in quotes.items(): | ||||
|             # TODO: in theory you can send the IPC msg *before* writing | ||||
|             # to the sharedmem array to decrease latency, however, that | ||||
|  | @ -659,6 +702,21 @@ async def sample_and_broadcast( | |||
|             sub_key: str = broker_symbol.lower() | ||||
|             subs: set[Sub] = bus.get_subs(sub_key) | ||||
| 
 | ||||
|             # TODO, figure out how to make this useful whilst | ||||
|             # incoporating feed "pausing" .. | ||||
|             # | ||||
|             # if not subs: | ||||
|             #     all_bs_fqmes: list[str] = list( | ||||
|             #         bus._subscribers.keys() | ||||
|             #     ) | ||||
|             #     log.warning( | ||||
|             #         f'No subscribers for {brokername!r} live-quote ??\n' | ||||
|             #         f'broker_symbol: {broker_symbol}\n\n' | ||||
| 
 | ||||
|             #         f'Maybe the backend-sys symbol does not match one of,\n' | ||||
|             #         f'{pfmt(all_bs_fqmes)}\n' | ||||
|             #     ) | ||||
| 
 | ||||
|             # NOTE: by default the broker backend doesn't append | ||||
|             # it's own "name" into the fqme schema (but maybe it | ||||
|             # should?) so we have to manually generate the correct | ||||
|  | @ -756,7 +814,6 @@ async def sample_and_broadcast( | |||
| 
 | ||||
| 
 | ||||
| async def uniform_rate_send( | ||||
| 
 | ||||
|     rate: float, | ||||
|     quote_stream: trio.abc.ReceiveChannel, | ||||
|     stream: MsgStream, | ||||
|  | @ -889,6 +946,7 @@ async def uniform_rate_send( | |||
|             # to consumers which crash or lose network connection. | ||||
|             # I.e. we **DO NOT** want to crash and propagate up to | ||||
|             # ``pikerd`` these kinds of errors! | ||||
|             trio.EndOfChannel, | ||||
|             trio.ClosedResourceError, | ||||
|             trio.BrokenResourceError, | ||||
|             ConnectionResetError, | ||||
|  |  | |||
|  | @ -273,7 +273,7 @@ async def _reconnect_forever( | |||
|                 nobsws._connected.set() | ||||
|                 await trio.sleep_forever() | ||||
|         except HandshakeError: | ||||
|             log.exception(f'Retrying connection') | ||||
|             log.exception('Retrying connection') | ||||
| 
 | ||||
|         # ws & nursery block ends | ||||
| 
 | ||||
|  | @ -359,8 +359,8 @@ async def open_autorecon_ws( | |||
| 
 | ||||
| 
 | ||||
| ''' | ||||
| JSONRPC response-request style machinery for transparent multiplexing of msgs | ||||
| over a NoBsWs. | ||||
| JSONRPC response-request style machinery for transparent multiplexing | ||||
| of msgs over a `NoBsWs`. | ||||
| 
 | ||||
| ''' | ||||
| 
 | ||||
|  | @ -377,43 +377,77 @@ async def open_jsonrpc_session( | |||
|     url: str, | ||||
|     start_id: int = 0, | ||||
|     response_type: type = JSONRPCResult, | ||||
|     request_type: Optional[type] = None, | ||||
|     request_hook: Optional[Callable] = None, | ||||
|     error_hook: Optional[Callable] = None, | ||||
|     msg_recv_timeout: float = float('inf'), | ||||
|     # ^NOTE, since only `deribit` is using this jsonrpc stuff atm | ||||
|     # and options mkts are generally "slow moving".. | ||||
|     # | ||||
|     # FURTHER if we break the underlying ws connection then since we | ||||
|     # don't pass a `fixture` to the task that manages `NoBsWs`, i.e. | ||||
|     # `_reconnect_forever()`, the jsonrpc "transport pipe" get's | ||||
|     # broken and never restored with wtv init sequence is required to | ||||
|     # re-establish a working req-resp session. | ||||
| 
 | ||||
|     # request_type: Optional[type] = None, | ||||
|     # request_hook: Optional[Callable] = None, | ||||
|     # error_hook: Optional[Callable] = None, | ||||
| ) -> Callable[[str, dict], dict]: | ||||
| 
 | ||||
|     # NOTE, store all request msgs so we can raise errors on the | ||||
|     # caller side! | ||||
|     req_msgs: dict[int, dict] = {} | ||||
| 
 | ||||
|     async with ( | ||||
|         trio.open_nursery() as n, | ||||
|         open_autorecon_ws(url) as ws | ||||
|         open_autorecon_ws( | ||||
|             url=url, | ||||
|             msg_recv_timeout=msg_recv_timeout, | ||||
|         ) as ws | ||||
|     ): | ||||
|         rpc_id: Iterable = count(start_id) | ||||
|         rpc_id: Iterable[int] = count(start_id) | ||||
|         rpc_results: dict[int, dict] = {} | ||||
| 
 | ||||
|         async def json_rpc(method: str, params: dict) -> dict: | ||||
|         async def json_rpc( | ||||
|             method: str, | ||||
|             params: dict, | ||||
|         ) -> dict: | ||||
|             ''' | ||||
|             perform a json rpc call and wait for the result, raise exception in | ||||
|             case of error field present on response | ||||
|             ''' | ||||
|             nonlocal req_msgs | ||||
| 
 | ||||
|             req_id: int = next(rpc_id) | ||||
|             msg = { | ||||
|                 'jsonrpc': '2.0', | ||||
|                 'id': next(rpc_id), | ||||
|                 'id': req_id, | ||||
|                 'method': method, | ||||
|                 'params': params | ||||
|             } | ||||
|             _id = msg['id'] | ||||
| 
 | ||||
|             rpc_results[_id] = { | ||||
|             result = rpc_results[_id] = { | ||||
|                 'result': None, | ||||
|                 'event': trio.Event() | ||||
|                 'error': None, | ||||
|                 'event': trio.Event(),  # signal caller resp arrived | ||||
|             } | ||||
|             req_msgs[_id] = msg | ||||
| 
 | ||||
|             await ws.send_msg(msg) | ||||
| 
 | ||||
|             # wait for reponse before unblocking requester code | ||||
|             await rpc_results[_id]['event'].wait() | ||||
| 
 | ||||
|             ret = rpc_results[_id]['result'] | ||||
|             if (maybe_result := result['result']): | ||||
|                 ret = maybe_result | ||||
|                 del rpc_results[_id] | ||||
| 
 | ||||
|             del rpc_results[_id] | ||||
|             else: | ||||
|                 err = result['error'] | ||||
|                 raise Exception( | ||||
|                     f'JSONRPC request failed\n' | ||||
|                     f'req: {msg}\n' | ||||
|                     f'resp: {err}\n' | ||||
|                 ) | ||||
| 
 | ||||
|             if ret.error is not None: | ||||
|                 raise Exception(json.dumps(ret.error, indent=4)) | ||||
|  | @ -428,6 +462,7 @@ async def open_jsonrpc_session( | |||
|             the server side. | ||||
| 
 | ||||
|             ''' | ||||
|             nonlocal req_msgs | ||||
|             async for msg in ws: | ||||
|                 match msg: | ||||
|                     case { | ||||
|  | @ -451,15 +486,29 @@ async def open_jsonrpc_session( | |||
|                         'params': _, | ||||
|                     }: | ||||
|                         log.debug(f'Recieved\n{msg}') | ||||
|                         if request_hook: | ||||
|                             await request_hook(request_type(**msg)) | ||||
|                         # if request_hook: | ||||
|                         #     await request_hook(request_type(**msg)) | ||||
| 
 | ||||
|                     case { | ||||
|                         'error': error | ||||
|                     }: | ||||
|                         log.warning(f'Recieved\n{error}') | ||||
|                         if error_hook: | ||||
|                             await error_hook(response_type(**msg)) | ||||
|                         # if error_hook: | ||||
|                         #     await error_hook(response_type(**msg)) | ||||
| 
 | ||||
|                         # retreive orig request msg, set error | ||||
|                         # response in original "result" msg, | ||||
|                         # THEN FINALLY set the event to signal caller | ||||
|                         # to raise the error in the parent task. | ||||
|                         req_id: int = msg['id'] | ||||
|                         req_msg: dict = req_msgs[req_id] | ||||
|                         result: dict = rpc_results[req_id] | ||||
|                         result['error'] = error | ||||
|                         result['event'].set() | ||||
|                         log.error( | ||||
|                             f'JSONRPC request failed\n' | ||||
|                             f'req: {req_msg}\n' | ||||
|                             f'resp: {error}\n' | ||||
|                         ) | ||||
| 
 | ||||
|                     case _: | ||||
|                         log.warning(f'Unhandled JSON-RPC msg!?\n{msg}') | ||||
|  |  | |||
|  | @ -540,7 +540,10 @@ async def open_feed_bus( | |||
|         # subscription since the backend isn't (yet) expected to | ||||
|         # append it's own name to the fqme, so we filter on keys | ||||
|         # which *do not* include that name (e.g .ib) . | ||||
|         bus._subscribers.setdefault(bs_fqme, set()) | ||||
|         bus._subscribers.setdefault( | ||||
|             bs_fqme, | ||||
|             set(), | ||||
|         ) | ||||
| 
 | ||||
|     # sync feed subscribers with flume handles | ||||
|     await ctx.started( | ||||
|  |  | |||
							
								
								
									
										30
									
								
								piker/log.py
								
								
								
								
							
							
						
						
									
										30
									
								
								piker/log.py
								
								
								
								
							|  | @ -18,7 +18,11 @@ | |||
| Log like a forester! | ||||
| """ | ||||
| import logging | ||||
| import reprlib | ||||
| import json | ||||
| from typing import ( | ||||
|     Callable, | ||||
| ) | ||||
| 
 | ||||
| import tractor | ||||
| from pygments import ( | ||||
|  | @ -84,3 +88,29 @@ def colorize_json( | |||
|         # likeable styles: algol_nu, tango, monokai | ||||
|         formatters.TerminalTrueColorFormatter(style=style) | ||||
|     ) | ||||
| 
 | ||||
| 
 | ||||
| # TODO, eventually defer to the version in `modden` once | ||||
| # it becomes a dep! | ||||
| def mk_repr( | ||||
|     **repr_kws, | ||||
| ) -> Callable[[str], str]: | ||||
|     ''' | ||||
|     Allocate and deliver a `repr.Repr` instance with provided input | ||||
|     settings using the std-lib's `reprlib` mod, | ||||
|      * https://docs.python.org/3/library/reprlib.html | ||||
| 
 | ||||
|     ------ Ex. ------ | ||||
|     An up to 6-layer-nested `dict` as multi-line: | ||||
|     - https://stackoverflow.com/a/79102479 | ||||
|     - https://docs.python.org/3/library/reprlib.html#reprlib.Repr.maxlevel | ||||
| 
 | ||||
|     ''' | ||||
|     def_kws: dict[str, int] = dict( | ||||
|         indent=2, | ||||
|         maxlevel=6,  # recursion levels | ||||
|         maxstring=66,  # match editor line-len limit | ||||
|     ) | ||||
|     def_kws |= repr_kws | ||||
|     reprr = reprlib.Repr(**def_kws) | ||||
|     return reprr.repr | ||||
|  |  | |||
|  | @ -30,7 +30,11 @@ Actor runtime primtives and (distributed) service APIs for, | |||
|   => TODO: maybe to (re)move elsewhere? | ||||
| 
 | ||||
| ''' | ||||
| from ._mngr import Services as Services | ||||
| from ._mngr import ( | ||||
|     get_service_mngr as get_service_mngr, | ||||
|     open_service_mngr as open_service_mngr, | ||||
|     ServiceMngr as ServiceMngr, | ||||
| ) | ||||
| from ._registry import ( | ||||
|     _tractor_kwargs as _tractor_kwargs, | ||||
|     _default_reg_addr as _default_reg_addr, | ||||
|  |  | |||
|  | @ -21,7 +21,6 @@ | |||
| from __future__ import annotations | ||||
| import os | ||||
| from typing import ( | ||||
|     Optional, | ||||
|     Any, | ||||
|     ClassVar, | ||||
| ) | ||||
|  | @ -30,13 +29,13 @@ from contextlib import ( | |||
| ) | ||||
| 
 | ||||
| import tractor | ||||
| import trio | ||||
| 
 | ||||
| from ._util import ( | ||||
|     get_console_log, | ||||
| ) | ||||
| from ._mngr import ( | ||||
|     Services, | ||||
|     open_service_mngr, | ||||
|     ServiceMngr, | ||||
| ) | ||||
| from ._registry import (  # noqa | ||||
|     _tractor_kwargs, | ||||
|  | @ -59,7 +58,7 @@ async def open_piker_runtime( | |||
|     registry_addrs: list[tuple[str, int]] = [], | ||||
| 
 | ||||
|     enable_modules: list[str] = [], | ||||
|     loglevel: Optional[str] = None, | ||||
|     loglevel: str|None = None, | ||||
| 
 | ||||
|     # XXX NOTE XXX: you should pretty much never want debug mode | ||||
|     # for data daemons when running in production. | ||||
|  | @ -69,7 +68,7 @@ async def open_piker_runtime( | |||
|     # and spawn the service tree distributed per that. | ||||
|     start_method: str = 'trio', | ||||
| 
 | ||||
|     tractor_runtime_overrides: dict | None = None, | ||||
|     tractor_runtime_overrides: dict|None = None, | ||||
|     **tractor_kwargs, | ||||
| 
 | ||||
| ) -> tuple[ | ||||
|  | @ -119,6 +118,10 @@ async def open_piker_runtime( | |||
|                 # spawn other specialized daemons I think? | ||||
|                 enable_modules=enable_modules, | ||||
| 
 | ||||
|                 # TODO: how to configure this? | ||||
|                 # keep it on by default if debug mode is set? | ||||
|                 # maybe_enable_greenback=debug_mode, | ||||
| 
 | ||||
|                 **tractor_kwargs, | ||||
|             ) as actor, | ||||
| 
 | ||||
|  | @ -167,12 +170,13 @@ async def open_pikerd( | |||
| 
 | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> Services: | ||||
| ) -> ServiceMngr: | ||||
|     ''' | ||||
|     Start a root piker daemon with an indefinite lifetime. | ||||
|     Start a root piker daemon actor (aka `pikerd`) with an indefinite | ||||
|     lifetime. | ||||
| 
 | ||||
|     A root actor nursery is created which can be used to create and keep | ||||
|     alive underling services (see below). | ||||
|     A root actor-nursery is created which can be used to spawn and | ||||
|     supervise underling service sub-actors (see below). | ||||
| 
 | ||||
|     ''' | ||||
|     # NOTE: for the root daemon we always enable the root | ||||
|  | @ -199,8 +203,6 @@ async def open_pikerd( | |||
|             root_actor, | ||||
|             reg_addrs, | ||||
|         ), | ||||
|         tractor.open_nursery() as actor_nursery, | ||||
|         trio.open_nursery() as service_nursery, | ||||
|     ): | ||||
|         for addr in reg_addrs: | ||||
|             if addr not in root_actor.accept_addrs: | ||||
|  | @ -209,25 +211,17 @@ async def open_pikerd( | |||
|                     'Maybe you have another daemon already running?' | ||||
|                 ) | ||||
| 
 | ||||
|         # assign globally for future daemon/task creation | ||||
|         Services.actor_n = actor_nursery | ||||
|         Services.service_n = service_nursery | ||||
|         Services.debug_mode = debug_mode | ||||
| 
 | ||||
|         try: | ||||
|             yield Services | ||||
| 
 | ||||
|         finally: | ||||
|             # TODO: is this more clever/efficient? | ||||
|             # if 'samplerd' in Services.service_tasks: | ||||
|             #     await Services.cancel_service('samplerd') | ||||
|             service_nursery.cancel_scope.cancel() | ||||
|         mngr: ServiceMngr | ||||
|         async with open_service_mngr( | ||||
|             debug_mode=debug_mode, | ||||
|         ) as mngr: | ||||
|             yield mngr | ||||
| 
 | ||||
| 
 | ||||
| # TODO: do we even need this? | ||||
| # @acm | ||||
| # async def maybe_open_runtime( | ||||
| #     loglevel: Optional[str] = None, | ||||
| #     loglevel: str|None = None, | ||||
| #     **kwargs, | ||||
| 
 | ||||
| # ) -> None: | ||||
|  | @ -256,7 +250,7 @@ async def maybe_open_pikerd( | |||
|     loglevel: str | None = None, | ||||
|     **kwargs, | ||||
| 
 | ||||
| ) -> tractor._portal.Portal | ClassVar[Services]: | ||||
| ) -> tractor._portal.Portal | ClassVar[ServiceMngr]: | ||||
|     ''' | ||||
|     If no ``pikerd`` daemon-root-actor can be found start it and | ||||
|     yield up (we should probably figure out returning a portal to self | ||||
|  |  | |||
|  | @ -49,7 +49,7 @@ from requests.exceptions import ( | |||
|     ReadTimeout, | ||||
| ) | ||||
| 
 | ||||
| from ._mngr import Services | ||||
| from ._mngr import ServiceMngr | ||||
| from ._util import ( | ||||
|     log,  # sub-sys logger | ||||
|     get_console_log, | ||||
|  | @ -453,7 +453,7 @@ async def open_ahabd( | |||
| 
 | ||||
| @acm | ||||
| async def start_ahab_service( | ||||
|     services: Services, | ||||
|     services: ServiceMngr, | ||||
|     service_name: str, | ||||
| 
 | ||||
|     # endpoint config passed as **kwargs | ||||
|  | @ -549,7 +549,8 @@ async def start_ahab_service( | |||
|         log.warning('Failed to cancel root permsed container') | ||||
| 
 | ||||
|     except ( | ||||
|         trio.MultiError, | ||||
|         # trio.MultiError, | ||||
|         ExceptionGroup, | ||||
|     ) as err: | ||||
|         for subexc in err.exceptions: | ||||
|             if isinstance(subexc, PermissionError): | ||||
|  |  | |||
|  | @ -26,14 +26,17 @@ from typing import ( | |||
| from contextlib import ( | ||||
|     asynccontextmanager as acm, | ||||
| ) | ||||
| from collections import defaultdict | ||||
| 
 | ||||
| import tractor | ||||
| import trio | ||||
| 
 | ||||
| from ._util import ( | ||||
|     log,  # sub-sys logger | ||||
| ) | ||||
| from ._mngr import ( | ||||
|     Services, | ||||
|     get_service_mngr, | ||||
|     ServiceMngr, | ||||
| ) | ||||
| from ._actor_runtime import maybe_open_pikerd | ||||
| from ._registry import find_service | ||||
|  | @ -41,15 +44,14 @@ from ._registry import find_service | |||
| 
 | ||||
| @acm | ||||
| async def maybe_spawn_daemon( | ||||
| 
 | ||||
|     service_name: str, | ||||
|     service_task_target: Callable, | ||||
| 
 | ||||
|     spawn_args: dict[str, Any], | ||||
| 
 | ||||
|     loglevel: str | None = None, | ||||
|     singleton: bool = False, | ||||
| 
 | ||||
|     _locks = defaultdict(trio.Lock), | ||||
|     **pikerd_kwargs, | ||||
| 
 | ||||
| ) -> tractor.Portal: | ||||
|  | @ -67,7 +69,7 @@ async def maybe_spawn_daemon( | |||
|     ''' | ||||
|     # serialize access to this section to avoid | ||||
|     # 2 or more tasks racing to create a daemon | ||||
|     lock = Services.locks[service_name] | ||||
|     lock = _locks[service_name] | ||||
|     await lock.acquire() | ||||
| 
 | ||||
|     async with find_service( | ||||
|  | @ -132,7 +134,65 @@ async def maybe_spawn_daemon( | |||
|         async with tractor.wait_for_actor(service_name) as portal: | ||||
|             lock.release() | ||||
|             yield portal | ||||
|             await portal.cancel_actor() | ||||
|             # --- ---- --- | ||||
|             # XXX NOTE XXX | ||||
|             # --- ---- --- | ||||
|             # DO NOT PUT A `portal.cancel_actor()` here (as was prior)! | ||||
|             # | ||||
|             # Doing so will cause an "out-of-band" ctxc | ||||
|             # (`tractor.ContextCancelled`) to be raised inside the | ||||
|             # `ServiceMngr.open_context_in_task()`'s call to | ||||
|             # `ctx.wait_for_result()` AND the internal self-ctxc | ||||
|             # "graceful capture" WILL NOT CATCH IT! | ||||
|             # | ||||
|             # This can cause certain types of operations to raise | ||||
|             # that ctxc BEFORE THEY `return`, resulting in | ||||
|             # a "false-negative" ctxc being raised when really | ||||
|             # nothing actually failed, other then our semantic | ||||
|             # "failure" to suppress an expected, graceful, | ||||
|             # self-cancel scenario.. | ||||
|             # | ||||
|             # bUt wHy duZ It WorK lIKe dis.. | ||||
|             # ------------------------------ | ||||
|             # from the perspective of the `tractor.Context` this | ||||
|             # cancel request was conducted "out of band" since | ||||
|             # `Context.cancel()` was never called and thus the | ||||
|             # `._cancel_called: bool` was never set. Despite the | ||||
|             # remote `.canceller` being set to `pikerd` (i.e. the | ||||
|             # same `Actor.uid` of the raising service-mngr task) the | ||||
|             # service-task's ctx itself was never marked as having | ||||
|             # requested cancellation and thus still raises the ctxc | ||||
|             # bc it was unaware of any such request. | ||||
|             # | ||||
|             # How to make grokin these cases easier tho? | ||||
|             # ------------------------------------------ | ||||
|             # Because `Portal.cancel_actor()` was called it requests | ||||
|             # "full-`Actor`-runtime-cancellation" of it's peer | ||||
|             # process which IS NOT THE SAME as a single inter-actor | ||||
|             # RPC task cancelling its local context with a remote | ||||
|             # peer `Task` in that same peer process. | ||||
|             # | ||||
|             # ?TODO? It might be better if we do one (or all) of the | ||||
|             # following: | ||||
|             # | ||||
|             # -[ ] at least set a special message for the | ||||
|             #    `ContextCancelled` when raised locally by the | ||||
|             #    unaware ctx task such that we check for the | ||||
|             #    `.canceller` being *our `Actor`* and in the case | ||||
|             #    where `Context._cancel_called == False` we specially | ||||
|             #    note that this is likely an "out-of-band" | ||||
|             #    runtime-cancel request triggered by some call to | ||||
|             #    `Portal.cancel_actor()`, possibly even reporting the | ||||
|             #    exact LOC of that caller by tracking it inside our | ||||
|             #    portal-type? | ||||
|             # -[ ] possibly add another field `ContextCancelled` like | ||||
|             #    maybe a, | ||||
|             #    `.request_type: Literal['os', 'proc', 'actor', | ||||
|             #    'ctx']` type thing which would allow immediately | ||||
|             #    being able to tell what kind of cancellation caused | ||||
|             #    the unexpected ctxc? | ||||
|             # -[ ] REMOVE THIS COMMENT, once we've settled on how to | ||||
|             #     better augment `tractor` to be more explicit on this! | ||||
| 
 | ||||
| 
 | ||||
| async def spawn_emsd( | ||||
|  | @ -147,21 +207,22 @@ async def spawn_emsd( | |||
|     """ | ||||
|     log.info('Spawning emsd') | ||||
| 
 | ||||
|     portal = await Services.actor_n.start_actor( | ||||
|     smngr: ServiceMngr = get_service_mngr() | ||||
|     portal = await smngr.actor_n.start_actor( | ||||
|         'emsd', | ||||
|         enable_modules=[ | ||||
|             'piker.clearing._ems', | ||||
|             'piker.clearing._client', | ||||
|         ], | ||||
|         loglevel=loglevel, | ||||
|         debug_mode=Services.debug_mode,  # set by pikerd flag | ||||
|         debug_mode=smngr.debug_mode,  # set by pikerd flag | ||||
|         **extra_tractor_kwargs | ||||
|     ) | ||||
| 
 | ||||
|     # non-blocking setup of clearing service | ||||
|     from ..clearing._ems import _setup_persistent_emsd | ||||
| 
 | ||||
|     await Services.start_service_task( | ||||
|     await smngr.start_service_task( | ||||
|         'emsd', | ||||
|         portal, | ||||
| 
 | ||||
|  |  | |||
|  | @ -18,16 +18,29 @@ | |||
| daemon-service management API. | ||||
| 
 | ||||
| """ | ||||
| from __future__ import annotations | ||||
| from contextlib import ( | ||||
|     asynccontextmanager as acm, | ||||
|     # contextmanager as cm, | ||||
| ) | ||||
| from collections import defaultdict | ||||
| from dataclasses import ( | ||||
|     dataclass, | ||||
|     field, | ||||
| ) | ||||
| import functools | ||||
| import inspect | ||||
| from typing import ( | ||||
|     Callable, | ||||
|     Any, | ||||
| ) | ||||
| 
 | ||||
| import trio | ||||
| from trio_typing import TaskStatus | ||||
| import msgspec | ||||
| import tractor | ||||
| import trio | ||||
| from trio import TaskStatus | ||||
| from tractor import ( | ||||
|     ActorNursery, | ||||
|     current_actor, | ||||
|     ContextCancelled, | ||||
|     Context, | ||||
|  | @ -39,6 +52,130 @@ from ._util import ( | |||
| ) | ||||
| 
 | ||||
| 
 | ||||
| # TODO: implement a singleton deco-API for wrapping the below | ||||
| # factory's impl for general actor-singleton use? | ||||
| # | ||||
| # @singleton | ||||
| # async def open_service_mngr( | ||||
| #     **init_kwargs, | ||||
| # ) -> ServiceMngr: | ||||
| #     ''' | ||||
| #     Note this function body is invoke IFF no existing singleton instance already | ||||
| #     exists in this proc's memory. | ||||
| 
 | ||||
| #     ''' | ||||
| #     # setup | ||||
| #     yield ServiceMngr(**init_kwargs) | ||||
| #     # teardown | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| # TODO: singleton factory API instead of a class API | ||||
| @acm | ||||
| async def open_service_mngr( | ||||
|     *, | ||||
|     debug_mode: bool = False, | ||||
| 
 | ||||
|     # impl deat which ensures a single global instance | ||||
|     _singleton: list[ServiceMngr|None] = [None], | ||||
|     **init_kwargs, | ||||
| 
 | ||||
| ) -> ServiceMngr: | ||||
|     ''' | ||||
|     Open a multi-subactor-as-service-daemon tree supervisor. | ||||
| 
 | ||||
|     The delivered `ServiceMngr` is a singleton instance for each | ||||
|     actor-process and is allocated on first open and never | ||||
|     de-allocated unless explicitly deleted by al call to | ||||
|     `del_service_mngr()`. | ||||
| 
 | ||||
|     ''' | ||||
|     # TODO: factor this an allocation into | ||||
|     # a `._mngr.open_service_mngr()` and put in the | ||||
|     # once-n-only-once setup/`.__aenter__()` part! | ||||
|     # -[ ] how to make this only happen on the `mngr == None` case? | ||||
|     #  |_ use `.trionics.maybe_open_context()` (for generic | ||||
|     #     async-with-style-only-once of the factory impl, though | ||||
|     #     what do we do for the allocation case? | ||||
|     #    / `.maybe_open_nursery()` (since for this specific case | ||||
|     #    it's simpler?) to activate | ||||
|     async with ( | ||||
|         tractor.open_nursery() as an, | ||||
|         trio.open_nursery() as tn, | ||||
|     ): | ||||
|         # impl specific obvi.. | ||||
|         init_kwargs.update({ | ||||
|             'actor_n': an, | ||||
|             'service_n': tn, | ||||
|         }) | ||||
| 
 | ||||
|         mngr: ServiceMngr|None | ||||
|         if (mngr := _singleton[0]) is None: | ||||
| 
 | ||||
|             log.info('Allocating a new service mngr!') | ||||
|             mngr = _singleton[0] = ServiceMngr(**init_kwargs) | ||||
| 
 | ||||
|             # TODO: put into `.__aenter__()` section of | ||||
|             # eventual `@singleton_acm` API wrapper. | ||||
|             # | ||||
|             # assign globally for future daemon/task creation | ||||
|             mngr.actor_n = an | ||||
|             mngr.service_n = tn | ||||
| 
 | ||||
|         else: | ||||
|             assert ( | ||||
|                 mngr.actor_n | ||||
|                 and | ||||
|                 mngr.service_tn | ||||
|             ) | ||||
|             log.info( | ||||
|                 'Using extant service mngr!\n\n' | ||||
|                 f'{mngr!r}\n'  # it has a nice `.__repr__()` of services state | ||||
|             ) | ||||
| 
 | ||||
|         try: | ||||
|             # NOTE: this is a singleton factory impl specific detail | ||||
|             # which should be supported in the condensed | ||||
|             # `@singleton_acm` API? | ||||
|             mngr.debug_mode = debug_mode | ||||
| 
 | ||||
|             yield mngr | ||||
|         finally: | ||||
|             # TODO: is this more clever/efficient? | ||||
|             # if 'samplerd' in mngr.service_tasks: | ||||
|             #     await mngr.cancel_service('samplerd') | ||||
|             tn.cancel_scope.cancel() | ||||
| 
 | ||||
| 
 | ||||
| 
 | ||||
| def get_service_mngr() -> ServiceMngr: | ||||
|     ''' | ||||
|     Try to get the singleton service-mngr for this actor presuming it | ||||
|     has already been allocated using, | ||||
| 
 | ||||
|     .. code:: python | ||||
| 
 | ||||
|         async with open_<@singleton_acm(func)>() as mngr` | ||||
|             ... this block kept open ... | ||||
| 
 | ||||
|     If not yet allocated raise a `ServiceError`. | ||||
| 
 | ||||
|     ''' | ||||
|     # https://stackoverflow.com/a/12627202 | ||||
|     # https://docs.python.org/3/library/inspect.html#inspect.Signature | ||||
|     maybe_mngr: ServiceMngr|None = inspect.signature( | ||||
|         open_service_mngr | ||||
|     ).parameters['_singleton'].default[0] | ||||
| 
 | ||||
|     if maybe_mngr is None: | ||||
|         raise RuntimeError( | ||||
|             'Someone must allocate a `ServiceMngr` using\n\n' | ||||
|             '`async with open_service_mngr()` beforehand!!\n' | ||||
|         ) | ||||
| 
 | ||||
|     return maybe_mngr | ||||
| 
 | ||||
| 
 | ||||
| # TODO: we need remote wrapping and a general soln: | ||||
| # - factor this into a ``tractor.highlevel`` extension # pack for the | ||||
| #   library. | ||||
|  | @ -46,31 +183,46 @@ from ._util import ( | |||
| #   to the pikerd actor for starting services remotely! | ||||
| # - prolly rename this to ActorServicesNursery since it spawns | ||||
| #   new actors and supervises them to completion? | ||||
| class Services: | ||||
| @dataclass | ||||
| class ServiceMngr: | ||||
| # class ServiceMngr(msgspec.Struct): | ||||
|     ''' | ||||
|     A multi-subactor-as-service manager. | ||||
| 
 | ||||
|     actor_n: tractor._supervise.ActorNursery | ||||
|     Spawn, supervise and monitor service/daemon subactors in a SC | ||||
|     process tree. | ||||
| 
 | ||||
|     ''' | ||||
|     actor_n: ActorNursery | ||||
|     service_n: trio.Nursery | ||||
|     debug_mode: bool  # tractor sub-actor debug mode flag | ||||
|     debug_mode: bool = False # tractor sub-actor debug mode flag | ||||
| 
 | ||||
|     service_tasks: dict[ | ||||
|         str, | ||||
|         tuple[ | ||||
|             trio.CancelScope, | ||||
|             Context, | ||||
|             Portal, | ||||
|             trio.Event, | ||||
|         ] | ||||
|     ] = {} | ||||
|     locks = defaultdict(trio.Lock) | ||||
|     ] = field(default_factory=dict) | ||||
| 
 | ||||
|     # internal per-service task mutexs | ||||
|     _locks = defaultdict(trio.Lock) | ||||
| 
 | ||||
|     @classmethod | ||||
|     async def start_service_task( | ||||
|         self, | ||||
|         name: str, | ||||
|         portal: Portal, | ||||
| 
 | ||||
|         # TODO: typevar for the return type of the target and then | ||||
|         # use it below for `ctx_res`? | ||||
|         target: Callable, | ||||
| 
 | ||||
|         allow_overruns: bool = False, | ||||
|         **ctx_kwargs, | ||||
| 
 | ||||
|     ) -> (trio.CancelScope, Context): | ||||
|     ) -> (trio.CancelScope, Context, Any): | ||||
|         ''' | ||||
|         Open a context in a service sub-actor, add to a stack | ||||
|         that gets unwound at ``pikerd`` teardown. | ||||
|  | @ -83,6 +235,7 @@ class Services: | |||
|             task_status: TaskStatus[ | ||||
|                 tuple[ | ||||
|                     trio.CancelScope, | ||||
|                     Context, | ||||
|                     trio.Event, | ||||
|                     Any, | ||||
|                 ] | ||||
|  | @ -90,64 +243,87 @@ class Services: | |||
| 
 | ||||
|         ) -> Any: | ||||
| 
 | ||||
|             # TODO: use the ctx._scope directly here instead? | ||||
|             # -[ ] actually what semantics do we expect for this | ||||
|             #   usage!? | ||||
|             with trio.CancelScope() as cs: | ||||
|                 try: | ||||
|                     async with portal.open_context( | ||||
|                         target, | ||||
|                         allow_overruns=allow_overruns, | ||||
|                         **ctx_kwargs, | ||||
| 
 | ||||
|                 async with portal.open_context( | ||||
|                     target, | ||||
|                     allow_overruns=allow_overruns, | ||||
|                     **ctx_kwargs, | ||||
|                     ) as (ctx, started): | ||||
| 
 | ||||
|                 ) as (ctx, first): | ||||
| 
 | ||||
|                     # unblock once the remote context has started | ||||
|                     complete = trio.Event() | ||||
|                     task_status.started((cs, complete, first)) | ||||
|                     log.info( | ||||
|                         f'`pikerd` service {name} started with value {first}' | ||||
|                     ) | ||||
|                     try: | ||||
|                         # unblock once the remote context has started | ||||
|                         complete = trio.Event() | ||||
|                         task_status.started(( | ||||
|                             cs, | ||||
|                             ctx, | ||||
|                             complete, | ||||
|                             started, | ||||
|                         )) | ||||
|                         log.info( | ||||
|                             f'`pikerd` service {name} started with value {started}' | ||||
|                         ) | ||||
|                         # wait on any context's return value | ||||
|                         # and any final portal result from the | ||||
|                         # sub-actor. | ||||
|                         ctx_res: Any = await ctx.result() | ||||
|                         ctx_res: Any = await ctx.wait_for_result() | ||||
| 
 | ||||
|                         # NOTE: blocks indefinitely until cancelled | ||||
|                         # either by error from the target context | ||||
|                         # function or by being cancelled here by the | ||||
|                         # surrounding cancel scope. | ||||
|                         return (await portal.result(), ctx_res) | ||||
|                     except ContextCancelled as ctxe: | ||||
|                         canceller: tuple[str, str] = ctxe.canceller | ||||
|                         our_uid: tuple[str, str] = current_actor().uid | ||||
|                         if ( | ||||
|                             canceller != portal.channel.uid | ||||
|                             and | ||||
|                             canceller != our_uid | ||||
|                         ): | ||||
|                             log.cancel( | ||||
|                                 f'Actor-service {name} was remotely cancelled?\n' | ||||
|                                 f'remote canceller: {canceller}\n' | ||||
|                                 f'Keeping {our_uid} alive, ignoring sub-actor cancel..\n' | ||||
|                             ) | ||||
|                         else: | ||||
|                             raise | ||||
|                         return ( | ||||
|                             await portal.wait_for_result(), | ||||
|                             ctx_res, | ||||
|                         ) | ||||
| 
 | ||||
|                 except ContextCancelled as ctxe: | ||||
|                     canceller: tuple[str, str] = ctxe.canceller | ||||
|                     our_uid: tuple[str, str] = current_actor().uid | ||||
|                     if ( | ||||
|                         canceller != portal.chan.uid | ||||
|                         and | ||||
|                         canceller != our_uid | ||||
|                     ): | ||||
|                         log.cancel( | ||||
|                             f'Actor-service `{name}` was remotely cancelled by a peer?\n' | ||||
| 
 | ||||
|                             # TODO: this would be a good spot to use | ||||
|                             # a respawn feature Bo | ||||
|                             f'-> Keeping `pikerd` service manager alive despite this inter-peer cancel\n\n' | ||||
| 
 | ||||
|                     finally: | ||||
|                         await portal.cancel_actor() | ||||
|                         complete.set() | ||||
|                         self.service_tasks.pop(name) | ||||
|                             f'cancellee: {portal.chan.uid}\n' | ||||
|                             f'canceller: {canceller}\n' | ||||
|                         ) | ||||
|                     else: | ||||
|                         raise | ||||
| 
 | ||||
|         cs, complete, first = await self.service_n.start(open_context_in_task) | ||||
|                 finally: | ||||
|                     # NOTE: the ctx MUST be cancelled first if we | ||||
|                     # don't want the above `ctx.wait_for_result()` to | ||||
|                     # raise a self-ctxc. WHY, well since from the ctx's | ||||
|                     # perspective the cancel request will have | ||||
|                     # arrived out-out-of-band at the `Actor.cancel()` | ||||
|                     # level, thus `Context.cancel_called == False`, | ||||
|                     # meaning `ctx._is_self_cancelled() == False`. | ||||
|                     # with trio.CancelScope(shield=True): | ||||
|                     # await ctx.cancel() | ||||
|                     await portal.cancel_actor() | ||||
|                     complete.set() | ||||
|                     self.service_tasks.pop(name) | ||||
| 
 | ||||
|         cs, sub_ctx, complete, started = await self.service_n.start( | ||||
|             open_context_in_task | ||||
|         ) | ||||
| 
 | ||||
|         # store the cancel scope and portal for later cancellation or | ||||
|         # retstart if needed. | ||||
|         self.service_tasks[name] = (cs, portal, complete) | ||||
|         self.service_tasks[name] = (cs, sub_ctx, portal, complete) | ||||
|         return cs, sub_ctx, started | ||||
| 
 | ||||
|         return cs, first | ||||
| 
 | ||||
|     @classmethod | ||||
|     async def cancel_service( | ||||
|         self, | ||||
|         name: str, | ||||
|  | @ -158,8 +334,80 @@ class Services: | |||
| 
 | ||||
|         ''' | ||||
|         log.info(f'Cancelling `pikerd` service {name}') | ||||
|         cs, portal, complete = self.service_tasks[name] | ||||
|         cs.cancel() | ||||
|         cs, sub_ctx, portal, complete = self.service_tasks[name] | ||||
| 
 | ||||
|         # cs.cancel() | ||||
|         await sub_ctx.cancel() | ||||
|         await complete.wait() | ||||
|         assert name not in self.service_tasks, \ | ||||
|             f'Serice task for {name} not terminated?' | ||||
| 
 | ||||
|         if name in self.service_tasks: | ||||
|             # TODO: custom err? | ||||
|             # raise ServiceError( | ||||
|             raise RuntimeError( | ||||
|                 f'Serice task for {name} not terminated?' | ||||
|             ) | ||||
| 
 | ||||
|         # assert name not in self.service_tasks, \ | ||||
|         #     f'Serice task for {name} not terminated?' | ||||
| 
 | ||||
|     async def start_service( | ||||
|         self, | ||||
|         daemon_name: str, | ||||
|         ctx_ep: Callable,  # kwargs must `partial`-ed in! | ||||
| 
 | ||||
|         debug_mode: bool = False, | ||||
|         **tractor_actor_kwargs, | ||||
| 
 | ||||
|     ) -> Context: | ||||
|         ''' | ||||
|         Start a "service" task in a new sub-actor (daemon) and manage it's lifetime | ||||
|         indefinitely. | ||||
| 
 | ||||
|         Services can be cancelled/shutdown using `.cancel_service()`. | ||||
| 
 | ||||
|         ''' | ||||
|         entry: tuple|None = self.service_tasks.get(daemon_name) | ||||
|         if entry: | ||||
|             (cs, sub_ctx, portal, complete) = entry | ||||
|             return sub_ctx | ||||
| 
 | ||||
|         if daemon_name not in self.service_tasks: | ||||
|             portal = await self.actor_n.start_actor( | ||||
|                 daemon_name, | ||||
|                 debug_mode=(  # maybe set globally during allocate | ||||
|                     debug_mode | ||||
|                     or | ||||
|                     self.debug_mode | ||||
|                 ), | ||||
|                 **tractor_actor_kwargs, | ||||
|             ) | ||||
|             ctx_kwargs: dict[str, Any] = {} | ||||
|             if isinstance(ctx_ep, functools.partial): | ||||
|                 ctx_kwargs: dict[str, Any] = ctx_ep.keywords | ||||
|                 ctx_ep: Callable = ctx_ep.func | ||||
| 
 | ||||
|             (cs, sub_ctx, started) = await self.start_service_task( | ||||
|                 daemon_name, | ||||
|                 portal, | ||||
|                 ctx_ep, | ||||
|                 **ctx_kwargs, | ||||
|             ) | ||||
| 
 | ||||
|             return sub_ctx | ||||
| 
 | ||||
| 
 | ||||
| # TODO: | ||||
| # -[ ] factor all the common shit from `.data._sampling` | ||||
| #   and `.brokers._daemon` into here / `ServiceMngr` | ||||
| #   in terms of allocating the `Portal` as part of the | ||||
| #   "service-in-subactor" starting! | ||||
| # -[ ] move to `tractor.hilevel._service`, import and use here! | ||||
| # NOTE: purposely leaks the ref to the mod-scope Bo | ||||
| # import tractor | ||||
| # from tractor.hilevel import ( | ||||
| #     open_service_mngr, | ||||
| #     ServiceMngr, | ||||
| # ) | ||||
| # mngr: ServiceMngr|None = None | ||||
| # with tractor.hilevel.open_service_mngr() as mngr: | ||||
| #     Services = proxy(mngr) | ||||
|  |  | |||
|  | @ -21,11 +21,13 @@ from typing import ( | |||
|     TYPE_CHECKING, | ||||
| ) | ||||
| 
 | ||||
| # TODO: oof, needs to be changed to `httpx`! | ||||
| import asks | ||||
| 
 | ||||
| if TYPE_CHECKING: | ||||
|     import docker | ||||
|     from ._ahab import DockerContainer | ||||
|     from . import ServiceMngr | ||||
| 
 | ||||
| from ._util import log  # sub-sys logger | ||||
| from ._util import ( | ||||
|  | @ -127,7 +129,7 @@ def start_elasticsearch( | |||
| 
 | ||||
| @acm | ||||
| async def start_ahab_daemon( | ||||
|     service_mngr: Services, | ||||
|     service_mngr: ServiceMngr, | ||||
|     user_config: dict | None = None, | ||||
|     loglevel: str | None = None, | ||||
| 
 | ||||
|  |  | |||
|  | @ -53,7 +53,7 @@ import pendulum | |||
| # import purerpc | ||||
| 
 | ||||
| from ..data.feed import maybe_open_feed | ||||
| from . import Services | ||||
| from . import ServiceMngr | ||||
| from ._util import ( | ||||
|     log,  # sub-sys logger | ||||
|     get_console_log, | ||||
|  | @ -233,7 +233,7 @@ def start_marketstore( | |||
| 
 | ||||
| @acm | ||||
| async def start_ahab_daemon( | ||||
|     service_mngr: Services, | ||||
|     service_mngr: ServiceMngr, | ||||
|     user_config: dict | None = None, | ||||
|     loglevel: str | None = None, | ||||
| 
 | ||||
|  |  | |||
|  | @ -161,7 +161,12 @@ class NativeStorageClient: | |||
| 
 | ||||
|     def index_files(self): | ||||
|         for path in self._datadir.iterdir(): | ||||
|             if path.name in {'borked', 'expired',}: | ||||
|             if ( | ||||
|                 path.name in {'borked', 'expired',} | ||||
|                 or | ||||
|                 '.parquet' not in str(path) | ||||
|             ): | ||||
|                 # ignore all non-apache files (for now) | ||||
|                 continue | ||||
| 
 | ||||
|             key: str = path.name.rstrip('.parquet') | ||||
|  |  | |||
|  | @ -44,8 +44,10 @@ import trio | |||
| from trio_typing import TaskStatus | ||||
| import tractor | ||||
| from pendulum import ( | ||||
|     Interval, | ||||
|     DateTime, | ||||
|     Duration, | ||||
|     duration as mk_duration, | ||||
|     from_timestamp, | ||||
| ) | ||||
| import numpy as np | ||||
|  | @ -214,7 +216,8 @@ async def maybe_fill_null_segments( | |||
|         # pair, immediately stop backfilling? | ||||
|         if ( | ||||
|             start_dt | ||||
|             and end_dt < start_dt | ||||
|             and | ||||
|             end_dt < start_dt | ||||
|         ): | ||||
|             await tractor.pause() | ||||
|             break | ||||
|  | @ -262,6 +265,7 @@ async def maybe_fill_null_segments( | |||
|         except tractor.ContextCancelled: | ||||
|             # log.exception | ||||
|             await tractor.pause() | ||||
|             raise | ||||
| 
 | ||||
|     null_segs_detected.set() | ||||
|     # RECHECK for more null-gaps | ||||
|  | @ -349,7 +353,7 @@ async def maybe_fill_null_segments( | |||
| 
 | ||||
| async def start_backfill( | ||||
|     get_hist, | ||||
|     frame_types: dict[str, Duration] | None, | ||||
|     def_frame_duration: Duration, | ||||
|     mod: ModuleType, | ||||
|     mkt: MktPair, | ||||
|     shm: ShmArray, | ||||
|  | @ -379,22 +383,23 @@ async def start_backfill( | |||
|         update_start_on_prepend: bool = False | ||||
|         if backfill_until_dt is None: | ||||
| 
 | ||||
|             # TODO: drop this right and just expose the backfill | ||||
|             # limits inside a [storage] section in conf.toml? | ||||
|             # when no tsdb "last datum" is provided, we just load | ||||
|             # some near-term history. | ||||
|             # periods = { | ||||
|             #     1: {'days': 1}, | ||||
|             #     60: {'days': 14}, | ||||
|             # } | ||||
| 
 | ||||
|             # do a decently sized backfill and load it into storage. | ||||
|             # TODO: per-provider default history-durations? | ||||
|             # -[ ] inside the `open_history_client()` config allow | ||||
|             #    declaring the history duration limits instead of | ||||
|             #    guessing and/or applying the same limits to all? | ||||
|             # | ||||
|             # -[ ] allow declaring (default) per-provider backfill | ||||
|             #     limits inside a [storage] sub-section in conf.toml? | ||||
|             # | ||||
|             # NOTE, when no tsdb "last datum" is provided, we just | ||||
|             # load some near-term history by presuming a "decently | ||||
|             # large" 60s duration limit and a much shorter 1s range. | ||||
|             periods = { | ||||
|                 1: {'days': 2}, | ||||
|                 60: {'years': 6}, | ||||
|             } | ||||
|             period_duration: int = periods[timeframe] | ||||
|             update_start_on_prepend = True | ||||
|             update_start_on_prepend: bool = True | ||||
| 
 | ||||
|             # NOTE: manually set the "latest" datetime which we intend to | ||||
|             # backfill history "until" so as to adhere to the history | ||||
|  | @ -416,7 +421,6 @@ async def start_backfill( | |||
|                 f'backfill_until_dt: {backfill_until_dt}\n' | ||||
|                 f'last_start_dt: {last_start_dt}\n' | ||||
|             ) | ||||
| 
 | ||||
|             try: | ||||
|                 ( | ||||
|                     array, | ||||
|  | @ -426,71 +430,114 @@ async def start_backfill( | |||
|                     timeframe, | ||||
|                     end_dt=last_start_dt, | ||||
|                 ) | ||||
| 
 | ||||
|             except NoData as _daterr: | ||||
|                 # 3 cases: | ||||
|                 # - frame in the middle of a legit venue gap | ||||
|                 # - history actually began at the `last_start_dt` | ||||
|                 # - some other unknown error (ib blocking the | ||||
|                 #   history bc they don't want you seeing how they | ||||
|                 #   cucked all the tinas..) | ||||
|                 if dur := frame_types.get(timeframe): | ||||
|                     # decrement by a frame's worth of duration and | ||||
|                     # retry a few times. | ||||
|                     last_start_dt.subtract( | ||||
|                         seconds=dur.total_seconds() | ||||
|                 orig_last_start_dt: datetime = last_start_dt | ||||
|                 gap_report: str = ( | ||||
|                     f'EMPTY FRAME for `end_dt: {last_start_dt}`?\n' | ||||
|                     f'{mod.name} -> tf@fqme: {timeframe}@{mkt.fqme}\n' | ||||
|                     f'last_start_dt: {orig_last_start_dt}\n\n' | ||||
|                     f'bf_until: {backfill_until_dt}\n' | ||||
|                 ) | ||||
|                 # EMPTY FRAME signal with 3 (likely) causes: | ||||
|                 # | ||||
|                 # 1. range contains legit gap in venue history | ||||
|                 # 2. history actually (edge case) **began** at the | ||||
|                 #    value `last_start_dt` | ||||
|                 # 3. some other unknown error (ib blocking the | ||||
|                 #    history-query bc they don't want you seeing how | ||||
|                 #    they cucked all the tinas.. like with options | ||||
|                 #    hist) | ||||
|                 # | ||||
|                 if def_frame_duration: | ||||
|                     # decrement by a duration's (frame) worth of time | ||||
|                     # as maybe indicated by the backend to see if we | ||||
|                     # can get older data before this possible | ||||
|                     # "history gap". | ||||
|                     last_start_dt: datetime = last_start_dt.subtract( | ||||
|                         seconds=def_frame_duration.total_seconds() | ||||
|                     ) | ||||
|                     log.warning( | ||||
|                         f'{mod.name} -> EMPTY FRAME for end_dt?\n' | ||||
|                         f'tf@fqme: {timeframe}@{mkt.fqme}\n' | ||||
|                         'bf_until <- last_start_dt:\n' | ||||
|                         f'{backfill_until_dt} <- {last_start_dt}\n' | ||||
|                         f'Decrementing `end_dt` by {dur} and retry..\n' | ||||
|                     gap_report += ( | ||||
|                         f'Decrementing `end_dt` and retrying with,\n' | ||||
|                         f'def_frame_duration: {def_frame_duration}\n' | ||||
|                         f'(new) last_start_dt: {last_start_dt}\n' | ||||
|                     ) | ||||
|                     log.warning(gap_report) | ||||
|                     # skip writing to shm/tsdb and try the next | ||||
|                     # duration's worth of prior history. | ||||
|                     continue | ||||
| 
 | ||||
|             # broker says there never was or is no more history to pull | ||||
|             except DataUnavailable: | ||||
|                 log.warning( | ||||
|                     f'NO-MORE-DATA in range?\n' | ||||
|                     f'`{mod.name}` halted history:\n' | ||||
|                     f'tf@fqme: {timeframe}@{mkt.fqme}\n' | ||||
|                     'bf_until <- last_start_dt:\n' | ||||
|                     f'{backfill_until_dt} <- {last_start_dt}\n' | ||||
|                 ) | ||||
|                 else: | ||||
|                     # await tractor.pause() | ||||
|                     raise DataUnavailable(gap_report) | ||||
| 
 | ||||
|                 # ugh, what's a better way? | ||||
|                 # TODO: fwiw, we probably want a way to signal a throttle | ||||
|                 # condition (eg. with ib) so that we can halt the | ||||
|                 # request loop until the condition is resolved? | ||||
|                 if timeframe > 1: | ||||
|                     await tractor.pause() | ||||
|             # broker says there never was or is no more history to pull | ||||
|             except DataUnavailable as due: | ||||
|                 message: str = due.args[0] | ||||
|                 log.warning( | ||||
|                     f'Provider {mod.name!r} halted backfill due to,\n\n' | ||||
| 
 | ||||
|                     f'{message}\n' | ||||
| 
 | ||||
|                     f'fqme: {mkt.fqme}\n' | ||||
|                     f'timeframe: {timeframe}\n' | ||||
|                     f'last_start_dt: {last_start_dt}\n' | ||||
|                     f'bf_until: {backfill_until_dt}\n' | ||||
|                 ) | ||||
|                 # UGH: what's a better way? | ||||
|                 # TODO: backends are responsible for being correct on | ||||
|                 # this right!? | ||||
|                 # -[ ] in the `ib` case we could maybe offer some way | ||||
|                 #     to halt the request loop until the condition is | ||||
|                 #     resolved or should the backend be entirely in | ||||
|                 #     charge of solving such faults? yes, right? | ||||
|                 return | ||||
| 
 | ||||
|             time: np.ndarray = array['time'] | ||||
|             assert ( | ||||
|                 array['time'][0] | ||||
|                 time[0] | ||||
|                 == | ||||
|                 next_start_dt.timestamp() | ||||
|             ) | ||||
| 
 | ||||
|             diff = last_start_dt - next_start_dt | ||||
|             frame_time_diff_s = diff.seconds | ||||
|             assert time[-1] == next_end_dt.timestamp() | ||||
| 
 | ||||
|             expected_dur: Interval = last_start_dt - next_start_dt | ||||
| 
 | ||||
|             # frame's worth of sample-period-steps, in seconds | ||||
|             frame_size_s: float = len(array) * timeframe | ||||
|             expected_frame_size_s: float = frame_size_s + timeframe | ||||
|             if frame_time_diff_s > expected_frame_size_s: | ||||
| 
 | ||||
|             recv_frame_dur: Duration = ( | ||||
|                 from_timestamp(array[-1]['time']) | ||||
|                 - | ||||
|                 from_timestamp(array[0]['time']) | ||||
|             ) | ||||
|             if ( | ||||
|                 (lt_frame := (recv_frame_dur < expected_dur)) | ||||
|                 or | ||||
|                 (null_frame := (frame_size_s == 0)) | ||||
|                 # ^XXX, should NEVER hit now! | ||||
|             ): | ||||
|                 # XXX: query result includes a start point prior to our | ||||
|                 # expected "frame size" and thus is likely some kind of | ||||
|                 # history gap (eg. market closed period, outage, etc.) | ||||
|                 # so just report it to console for now. | ||||
|                 if lt_frame: | ||||
|                     reason = 'Possible GAP (or first-datum)' | ||||
|                 else: | ||||
|                     assert null_frame | ||||
|                     reason = 'NULL-FRAME' | ||||
| 
 | ||||
|                 missing_dur: Interval = expected_dur.end - recv_frame_dur.end | ||||
|                 log.warning( | ||||
|                     'GAP DETECTED:\n' | ||||
|                     f'last_start_dt: {last_start_dt}\n' | ||||
|                     f'diff: {diff}\n' | ||||
|                     f'frame_time_diff_s: {frame_time_diff_s}\n' | ||||
|                     f'{timeframe}s-series {reason} detected!\n' | ||||
|                     f'fqme: {mkt.fqme}\n' | ||||
|                     f'last_start_dt: {last_start_dt}\n\n' | ||||
|                     f'recv interval: {recv_frame_dur}\n' | ||||
|                     f'expected interval: {expected_dur}\n\n' | ||||
| 
 | ||||
|                     f'Missing duration of history of {missing_dur.in_words()!r}\n' | ||||
|                     f'{missing_dur}\n' | ||||
|                 ) | ||||
|                 # await tractor.pause() | ||||
| 
 | ||||
|             to_push = diff_history( | ||||
|                 array, | ||||
|  | @ -565,7 +612,8 @@ async def start_backfill( | |||
|             # long-term storage. | ||||
|             if ( | ||||
|                 storage is not None | ||||
|                 and write_tsdb | ||||
|                 and | ||||
|                 write_tsdb | ||||
|             ): | ||||
|                 log.info( | ||||
|                     f'Writing {ln} frame to storage:\n' | ||||
|  | @ -578,6 +626,7 @@ async def start_backfill( | |||
|                     'crypto', | ||||
|                     'crypto_currency', | ||||
|                     'fiat',  # a "forex pair" | ||||
|                     'perpetual_future',  # stupid "perps" from cex land | ||||
|                 }: | ||||
|                     # for now, our table key schema is not including | ||||
|                     # the dst[/src] source asset token. | ||||
|  | @ -685,7 +734,7 @@ async def back_load_from_tsdb( | |||
|         last_tsdb_dt | ||||
|         and latest_start_dt | ||||
|     ): | ||||
|         backfilled_size_s = ( | ||||
|         backfilled_size_s: Duration = ( | ||||
|             latest_start_dt - last_tsdb_dt | ||||
|         ).seconds | ||||
|         # if the shm buffer len is not large enough to contain | ||||
|  | @ -908,6 +957,8 @@ async def tsdb_backfill( | |||
|             f'{pformat(config)}\n' | ||||
|         ) | ||||
| 
 | ||||
|         # concurrently load the provider's most-recent-frame AND any | ||||
|         # pre-existing tsdb history already saved in `piker` storage. | ||||
|         dt_eps: list[DateTime, DateTime] = [] | ||||
|         async with trio.open_nursery() as tn: | ||||
|             tn.start_soon( | ||||
|  | @ -918,7 +969,6 @@ async def tsdb_backfill( | |||
|                 timeframe, | ||||
|                 config, | ||||
|             ) | ||||
| 
 | ||||
|             tsdb_entry: tuple = await load_tsdb_hist( | ||||
|                 storage, | ||||
|                 mkt, | ||||
|  | @ -947,6 +997,25 @@ async def tsdb_backfill( | |||
|                 mr_end_dt, | ||||
|             ) = dt_eps | ||||
| 
 | ||||
|             first_frame_dur_s: Duration = (mr_end_dt - mr_start_dt).seconds | ||||
|             calced_frame_size: Duration = mk_duration( | ||||
|                 seconds=first_frame_dur_s, | ||||
|             ) | ||||
|             # NOTE, attempt to use the backend declared default frame | ||||
|             # sizing (as allowed by their time-series query APIs) and | ||||
|             # if not provided try to construct a default from the | ||||
|             # first frame received above. | ||||
|             def_frame_durs: dict[ | ||||
|                 int, | ||||
|                 Duration, | ||||
|             ]|None = config.get('frame_types', None) | ||||
|             if def_frame_durs: | ||||
|                 def_frame_size: Duration = def_frame_durs[timeframe] | ||||
|                 assert def_frame_size == calced_frame_size | ||||
|             else: | ||||
|                 # use what we calced from first frame above. | ||||
|                 def_frame_size = calced_frame_size | ||||
| 
 | ||||
|             # NOTE: when there's no offline data, there's 2 cases: | ||||
|             # - data backend doesn't support timeframe/sample | ||||
|             #   period (in which case `dt_eps` should be `None` and | ||||
|  | @ -977,7 +1046,7 @@ async def tsdb_backfill( | |||
|                     partial( | ||||
|                         start_backfill, | ||||
|                         get_hist=get_hist, | ||||
|                         frame_types=config.get('frame_types', None), | ||||
|                         def_frame_duration=def_frame_size, | ||||
|                         mod=mod, | ||||
|                         mkt=mkt, | ||||
|                         shm=shm, | ||||
|  |  | |||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -25,11 +25,11 @@ build-backend = "poetry.core.masonry.api" | |||
| ignore = [] | ||||
| 
 | ||||
| # https://docs.astral.sh/ruff/settings/#lint_per-file-ignores | ||||
| "piker/ui/qt.py" = [ | ||||
|   "E402", | ||||
|   'F401',  # unused imports (without __all__ or blah as blah) | ||||
|   # "F841", # unused variable rules | ||||
| ] | ||||
| # "piker/ui/qt.py" = [ | ||||
| #   "E402", | ||||
| #   'F401',  # unused imports (without __all__ or blah as blah) | ||||
| #   # "F841", # unused variable rules | ||||
| # ] | ||||
| # ignore-init-module-imports = false | ||||
| 
 | ||||
| # ------ - ------ | ||||
|  | @ -50,10 +50,8 @@ attrs = "^23.1.0" | |||
| bidict = "^0.22.1" | ||||
| colorama = "^0.4.6" | ||||
| colorlog = "^6.7.0" | ||||
| cython = "^3.0.0" | ||||
| greenback = "^1.1.1" | ||||
| ib-insync = "^0.9.86" | ||||
| msgspec = "^0.18.0" | ||||
| msgspec = "^0.18.6" | ||||
| numba = "^0.59.0" | ||||
| numpy = "^1.25" | ||||
| polars = "^0.18.13" | ||||
|  | @ -71,13 +69,11 @@ pdbp = "^1.5.0" | |||
| trio = "^0.24" | ||||
| pendulum = "^3.0.0" | ||||
| httpx = "^0.27.0" | ||||
| cryptofeed = "^2.4.0" | ||||
| pyarrow = "^17.0.0" | ||||
| 
 | ||||
| [tool.poetry.dependencies.tractor] | ||||
| develop = true | ||||
| git = 'https://github.com/goodboy/tractor.git' | ||||
| branch = 'asyncio_debugger_support' | ||||
| # path = "../tractor" | ||||
| 
 | ||||
| tractor = {path = "../tractor", develop = true} | ||||
| websockets = "12.0" | ||||
| [tool.poetry.dependencies.asyncvnc] | ||||
| git = 'https://github.com/pikers/asyncvnc.git' | ||||
| branch = 'main' | ||||
|  | @ -109,6 +105,8 @@ pytest = "^6.0.0" | |||
| elasticsearch = "^8.9.0" | ||||
| xonsh = "^0.14.2" | ||||
| prompt-toolkit = "3.0.40" | ||||
| cython = "^3.0.0" | ||||
| greenback = "^1.1.1" | ||||
| 
 | ||||
| # console ehancements and eventually remote debugging | ||||
| # extras/helpers. | ||||
|  |  | |||
|  | @ -10,7 +10,7 @@ from piker import ( | |||
|     config, | ||||
| ) | ||||
| from piker.service import ( | ||||
|     Services, | ||||
|     get_service_mngr, | ||||
| ) | ||||
| from piker.log import get_console_log | ||||
| 
 | ||||
|  | @ -129,7 +129,7 @@ async def _open_test_pikerd( | |||
|         ) as service_manager, | ||||
|     ): | ||||
|         # this proc/actor is the pikerd | ||||
|         assert service_manager is Services | ||||
|         assert service_manager is get_service_mngr() | ||||
| 
 | ||||
|         async with tractor.wait_for_actor( | ||||
|             'pikerd', | ||||
|  |  | |||
|  | @ -26,7 +26,7 @@ import pytest | |||
| import tractor | ||||
| from uuid import uuid4 | ||||
| 
 | ||||
| from piker.service import Services | ||||
| from piker.service import ServiceMngr | ||||
| from piker.log import get_logger | ||||
| from piker.clearing._messages import ( | ||||
|     Order, | ||||
|  | @ -158,7 +158,7 @@ def load_and_check_pos( | |||
| 
 | ||||
| 
 | ||||
| def test_ems_err_on_bad_broker( | ||||
|     open_test_pikerd: Services, | ||||
|     open_test_pikerd: ServiceMngr, | ||||
|     loglevel: str, | ||||
| ): | ||||
|     async def load_bad_fqme(): | ||||
|  |  | |||
|  | @ -15,7 +15,7 @@ import tractor | |||
| 
 | ||||
| from piker.service import ( | ||||
|     find_service, | ||||
|     Services, | ||||
|     ServiceMngr, | ||||
| ) | ||||
| from piker.data import ( | ||||
|     open_feed, | ||||
|  | @ -44,7 +44,7 @@ def test_runtime_boot( | |||
|     async def main(): | ||||
|         port = 6666 | ||||
|         daemon_addr = ('127.0.0.1', port) | ||||
|         services: Services | ||||
|         services: ServiceMngr | ||||
| 
 | ||||
|         async with ( | ||||
|             open_test_pikerd( | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue