Compare commits
62 Commits
022432cce7
...
b4d3bcf240
Author | SHA1 | Date |
---|---|---|
|
b4d3bcf240 | |
|
5cefe8bcdb | |
|
d96e9d4f11 | |
|
a0dcf14aba | |
|
1705afb607 | |
|
dafd5a3ca5 | |
|
b9dde98d1e | |
|
1616cc0e82 | |
|
0a2ed195a7 | |
|
28e8628c61 | |
|
b734245183 | |
|
dc2c379d86 | |
|
be84d0dae1 | |
|
bdc3bc9219 | |
|
9232d09440 | |
|
f96bd51442 | |
|
6555ccfbba | |
|
75d1d007fb | |
|
2bdbe0f20e | |
|
a117177759 | |
|
30060a83c9 | |
|
156a35b606 | |
|
89e241c132 | |
|
df8d1274ae | |
|
0916b707e2 | |
|
45788b0b53 | |
|
38a1f0b9ee | |
|
f291654dbe | |
|
e9fa422916 | |
|
5304a36b87 | |
|
089c79e905 | |
|
d848050b52 | |
|
ddffe2bec6 | |
|
19b4ca9d85 | |
|
f037f851d8 | |
|
a3ab8dd8fe | |
|
6fa0d4bcf3 | |
|
a4f7fa9c1a | |
|
266ecf6206 | |
|
ea6126d310 | |
|
1f4a5b80c4 | |
|
ac6f52088a | |
|
960298514c | |
|
71f3a0a4cd | |
|
b25a7699ab | |
|
b39affc96e | |
|
be8629929b | |
|
4776be6736 | |
|
008e68174b | |
|
b4a9b86783 | |
|
d3ca571c0e | |
|
b3bbef30c0 | |
|
499b2d0090 | |
|
8b0f1e7045 | |
|
b2cfa3444f | |
|
0be454c3d6 | |
|
de6189da4d | |
|
cc5b21a7e6 | |
|
35a9d8ec9d | |
|
a831212c86 | |
|
e987d7d7c4 | |
|
5ec756234a |
|
@ -0,0 +1,82 @@
|
|||
with (import <nixpkgs> {});
|
||||
with python312Packages;
|
||||
let
|
||||
glibStorePath = lib.getLib glib;
|
||||
qtpyStorePath = lib.getLib qtpy;
|
||||
pyqt6StorePath = lib.getLib pyqt6;
|
||||
pyqt6SipStorePath = lib.getLib pyqt6-sip;
|
||||
qt6baseStorePath = lib.getLib qt6.qtbase;
|
||||
rapidfuzzStorePath = lib.getLib rapidfuzz;
|
||||
qdarkstyleStorePath = lib.getLib qdarkstyle;
|
||||
in
|
||||
stdenv.mkDerivation {
|
||||
name = "piker-qt6-poetry-shell";
|
||||
buildInputs = [
|
||||
# System requirements.
|
||||
glib
|
||||
qt6.qtbase
|
||||
libgcc.lib
|
||||
|
||||
# Python requirements.
|
||||
python312Full
|
||||
poetry-core
|
||||
qdarkstyle
|
||||
rapidfuzz
|
||||
pyqt6
|
||||
qtpy
|
||||
];
|
||||
src = null;
|
||||
shellHook = ''
|
||||
set -e
|
||||
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${libgcc.lib}/lib:${glibStorePath}/lib
|
||||
|
||||
# Set the Qt plugin path
|
||||
# export QT_DEBUG_PLUGINS=1
|
||||
|
||||
QTBASE_PATH="${qt6baseStorePath}"
|
||||
echo "qtbase path: $QTBASE_PATH"
|
||||
echo ""
|
||||
export QT_PLUGIN_PATH="$QTBASE_PATH/lib/qt-6/plugins"
|
||||
export QT_QPA_PLATFORM_PLUGIN_PATH="$QT_PLUGIN_PATH/platforms"
|
||||
echo "qt plugin path: $QT_PLUGIN_PATH"
|
||||
echo ""
|
||||
|
||||
# Maybe create venv & install deps
|
||||
poetry install --with uis
|
||||
|
||||
# Use pyqt6 from System, patch activate script
|
||||
ACTIVATE_SCRIPT_PATH="$(poetry env info --path)/bin/activate"
|
||||
|
||||
export RPDFUZZ_PATH="${rapidfuzzStorePath}/lib/python3.12/site-packages"
|
||||
export QDRKSTYLE_PATH="${qdarkstyleStorePath}/lib/python3.12/site-packages"
|
||||
export QTPY_PATH="${qtpyStorePath}/lib/python3.12/site-packages"
|
||||
export PYQT6_PATH="${pyqt6StorePath}/lib/python3.12/site-packages"
|
||||
export PYQT6_SIP_PATH="${pyqt6SipStorePath}/lib/python3.12/site-packages"
|
||||
echo "rapidfuzz at: $RPDFUZZ_PATH"
|
||||
echo "qdarkstyle at: $QDRKSTYLE_PATH"
|
||||
echo "qtpy at: $QTPY_PATH"
|
||||
echo "pyqt6 at: $PYQT6_PATH"
|
||||
echo "pyqt6-sip at: $PYQT6_SIP_PATH"
|
||||
echo ""
|
||||
|
||||
PATCH="export PYTHONPATH=\""
|
||||
|
||||
PATCH="$PATCH\$RPDFUZZ_PATH"
|
||||
PATCH="$PATCH:\$QDRKSTYLE_PATH"
|
||||
PATCH="$PATCH:\$QTPY_PATH"
|
||||
PATCH="$PATCH:\$PYQT6_PATH"
|
||||
PATCH="$PATCH:\$PYQT6_SIP_PATH"
|
||||
|
||||
PATCH="$PATCH\""
|
||||
|
||||
if grep -q "$PATCH" "$ACTIVATE_SCRIPT_PATH"; then
|
||||
echo "venv is already patched."
|
||||
else
|
||||
echo "patching $ACTIVATE_SCRIPT_PATH to use pyqt6 from nixos..."
|
||||
sed -i "\$i$PATCH" $ACTIVATE_SCRIPT_PATH
|
||||
fi
|
||||
|
||||
poetry shell
|
||||
'';
|
||||
}
|
|
@ -51,6 +51,7 @@ __brokers__: list[str] = [
|
|||
'ib',
|
||||
'kraken',
|
||||
'kucoin',
|
||||
'deribit',
|
||||
|
||||
# broken but used to work
|
||||
# 'questrade',
|
||||
|
@ -61,7 +62,6 @@ __brokers__: list[str] = [
|
|||
# wstrade
|
||||
# iex
|
||||
|
||||
# deribit
|
||||
# bitso
|
||||
]
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ from __future__ import annotations
|
|||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from functools import partial
|
||||
from types import ModuleType
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
|
@ -60,12 +61,13 @@ async def _setup_persistent_brokerd(
|
|||
ctx: tractor.Context,
|
||||
brokername: str,
|
||||
loglevel: str | None = None,
|
||||
debug_mode: bool = False,
|
||||
|
||||
) -> None:
|
||||
'''
|
||||
Allocate a actor-wide service nursery in ``brokerd``
|
||||
such that feeds can be run in the background persistently by
|
||||
the broker backend as needed.
|
||||
Allocate a actor-wide service nursery in `brokerd` such that
|
||||
feeds can be run in the background persistently by the broker
|
||||
backend as needed.
|
||||
|
||||
'''
|
||||
# NOTE: we only need to setup logging once (and only) here
|
||||
|
@ -86,6 +88,18 @@ async def _setup_persistent_brokerd(
|
|||
from piker.data import feed
|
||||
assert not feed._bus
|
||||
|
||||
if (
|
||||
debug_mode
|
||||
and
|
||||
tractor.current_actor().is_infected_aio()
|
||||
):
|
||||
# NOTE, whenever running `asyncio` in provider's actor
|
||||
# runtime be sure we enabled `breakpoint()` support
|
||||
# for non-`trio.Task` usage.
|
||||
from tractor.devx._debug import maybe_init_greenback
|
||||
await maybe_init_greenback()
|
||||
# breakpoint() # XXX, SHOULD WORK from `trio.Task`!
|
||||
|
||||
# allocate a nursery to the bus for spawning background
|
||||
# tasks to service client IPC requests, normally
|
||||
# `tractor.Context` connections to explicitly required
|
||||
|
@ -145,18 +159,21 @@ def broker_init(
|
|||
above.
|
||||
|
||||
'''
|
||||
from ..brokers import get_brokermod
|
||||
brokermod = get_brokermod(brokername)
|
||||
brokermod: ModuleType = get_brokermod(brokername)
|
||||
modpath: str = brokermod.__name__
|
||||
|
||||
start_actor_kwargs['name'] = f'brokerd.{brokername}'
|
||||
start_actor_kwargs.update(
|
||||
getattr(
|
||||
brokermod,
|
||||
'_spawn_kwargs',
|
||||
{},
|
||||
)
|
||||
spawn_kws: dict = getattr(
|
||||
brokermod,
|
||||
'_spawn_kwargs',
|
||||
{},
|
||||
)
|
||||
# ^^ NOTE, here we pull any runtime parameters specific
|
||||
# to spawning the sub-actor for the backend. For ex.
|
||||
# both `ib` and `deribit` rely on,
|
||||
# `'infect_asyncio': True,` since they both
|
||||
# use `tractor`'s "infected `asyncio` mode"
|
||||
# for their libs but you could also do something like
|
||||
# `'debug_mode: True` which would be like passing
|
||||
# `--pdb` for just that provider backend.
|
||||
|
||||
# XXX TODO: make this not so hacky/monkeypatched..
|
||||
# -> we need a sane way to configure the logging level for all
|
||||
|
@ -166,8 +183,7 @@ def broker_init(
|
|||
|
||||
# lookup actor-enabled modules declared by the backend offering the
|
||||
# `brokerd` endpoint(s).
|
||||
enabled: list[str]
|
||||
enabled = start_actor_kwargs['enable_modules'] = [
|
||||
enabled: list[str] = [
|
||||
__name__, # so that eps from THIS mod can be invoked
|
||||
modpath,
|
||||
]
|
||||
|
@ -179,9 +195,13 @@ def broker_init(
|
|||
subpath: str = f'{modpath}.{submodname}'
|
||||
enabled.append(subpath)
|
||||
|
||||
datad_kwargs: dict = {
|
||||
'name': f'brokerd.{brokername}',
|
||||
'enable_modules': enabled,
|
||||
}
|
||||
return (
|
||||
brokermod,
|
||||
start_actor_kwargs, # to `ActorNursery.start_actor()`
|
||||
start_actor_kwargs | datad_kwargs | spawn_kws, # to `ActorNursery.start_actor()`
|
||||
|
||||
# XXX see impl above; contains all (actor global)
|
||||
# setup/teardown expected in all `brokerd` actor instances.
|
||||
|
@ -190,14 +210,17 @@ def broker_init(
|
|||
|
||||
|
||||
async def spawn_brokerd(
|
||||
|
||||
brokername: str,
|
||||
loglevel: str | None = None,
|
||||
|
||||
**tractor_kwargs,
|
||||
|
||||
) -> bool:
|
||||
'''
|
||||
Spawn a `brokerd.<backendname>` subactor service daemon
|
||||
using `pikerd`'s service mngr.
|
||||
|
||||
'''
|
||||
from piker.service._util import log # use service mngr log
|
||||
log.info(f'Spawning {brokername} broker daemon')
|
||||
|
||||
|
@ -211,33 +234,41 @@ async def spawn_brokerd(
|
|||
**tractor_kwargs,
|
||||
)
|
||||
|
||||
brokermod = get_brokermod(brokername)
|
||||
extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {})
|
||||
tractor_kwargs.update(extra_tractor_kwargs)
|
||||
|
||||
# ask `pikerd` to spawn a new sub-actor and manage it under its
|
||||
# actor nursery
|
||||
from piker.service import Services
|
||||
|
||||
from piker.service import (
|
||||
get_service_mngr,
|
||||
ServiceMngr,
|
||||
)
|
||||
dname: str = tractor_kwargs.pop('name') # f'brokerd.{brokername}'
|
||||
portal = await Services.actor_n.start_actor(
|
||||
dname,
|
||||
enable_modules=_data_mods + tractor_kwargs.pop('enable_modules'),
|
||||
debug_mode=Services.debug_mode,
|
||||
mngr: ServiceMngr = get_service_mngr()
|
||||
ctx: tractor.Context = await mngr.start_service(
|
||||
daemon_name=dname,
|
||||
ctx_ep=partial(
|
||||
# signature of target root-task endpoint
|
||||
daemon_fixture_ep,
|
||||
|
||||
# passed to daemon_fixture_ep(**kwargs)
|
||||
brokername=brokername,
|
||||
loglevel=loglevel,
|
||||
debug_mode=mngr.debug_mode,
|
||||
),
|
||||
debug_mode=mngr.debug_mode,
|
||||
# ^TODO, allow overriding this per-daemon from client side?
|
||||
# |_ it's already supported in `tractor` so..
|
||||
|
||||
loglevel=loglevel,
|
||||
enable_modules=(
|
||||
_data_mods
|
||||
+
|
||||
tractor_kwargs.pop('enable_modules')
|
||||
),
|
||||
**tractor_kwargs
|
||||
)
|
||||
|
||||
# NOTE: the service mngr expects an already spawned actor + its
|
||||
# portal ref in order to do non-blocking setup of brokerd
|
||||
# service nursery.
|
||||
await Services.start_service_task(
|
||||
dname,
|
||||
portal,
|
||||
|
||||
# signature of target root-task endpoint
|
||||
daemon_fixture_ep,
|
||||
brokername=brokername,
|
||||
loglevel=loglevel,
|
||||
assert (
|
||||
not ctx.cancel_called
|
||||
and ctx.portal # parent side
|
||||
and dname in ctx.chan.uid # subactor is named as desired
|
||||
)
|
||||
return True
|
||||
|
||||
|
@ -262,8 +293,7 @@ async def maybe_spawn_brokerd(
|
|||
from piker.service import maybe_spawn_daemon
|
||||
|
||||
async with maybe_spawn_daemon(
|
||||
|
||||
f'brokerd.{brokername}',
|
||||
service_name=f'brokerd.{brokername}',
|
||||
service_task_target=spawn_brokerd,
|
||||
spawn_args={
|
||||
'brokername': brokername,
|
||||
|
|
|
@ -567,6 +567,7 @@ class Client:
|
|||
) -> str:
|
||||
return {
|
||||
'USDTM': 'usdtm_futes',
|
||||
'SPOT': 'spot',
|
||||
# 'COINM': 'coin_futes',
|
||||
# ^-TODO-^ bc someone might want it..?
|
||||
}[pair.venue]
|
||||
|
|
|
@ -181,7 +181,6 @@ class FutesPair(Pair):
|
|||
quoteAsset: str # 'USDT',
|
||||
quotePrecision: int # 8,
|
||||
requiredMarginPercent: float # '5.0000',
|
||||
settlePlan: int # 0,
|
||||
timeInForce: list[str] # ['GTC', 'IOC', 'FOK', 'GTX'],
|
||||
triggerProtect: float # '0.0500',
|
||||
underlyingSubType: list[str] # ['PoW'],
|
||||
|
|
|
@ -25,6 +25,7 @@ from .api import (
|
|||
get_client,
|
||||
)
|
||||
from .feed import (
|
||||
get_mkt_info,
|
||||
open_history_client,
|
||||
open_symbol_search,
|
||||
stream_quotes,
|
||||
|
@ -34,15 +35,20 @@ from .feed import (
|
|||
# open_trade_dialog,
|
||||
# norm_trade_records,
|
||||
# )
|
||||
from .venues import (
|
||||
OptionPair,
|
||||
)
|
||||
|
||||
log = get_logger(__name__)
|
||||
|
||||
__all__ = [
|
||||
'get_client',
|
||||
# 'trades_dialogue',
|
||||
'get_mkt_info',
|
||||
'open_history_client',
|
||||
'open_symbol_search',
|
||||
'stream_quotes',
|
||||
'OptionPair',
|
||||
# 'norm_trade_records',
|
||||
]
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -18,38 +18,59 @@
|
|||
Deribit backend.
|
||||
|
||||
'''
|
||||
from __future__ import annotations
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional, Callable
|
||||
from typing import (
|
||||
# Any,
|
||||
# Optional,
|
||||
Callable,
|
||||
)
|
||||
# from pprint import pformat
|
||||
import time
|
||||
|
||||
import cryptofeed
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
import pendulum
|
||||
from rapidfuzz import process as fuzzy
|
||||
from pendulum import (
|
||||
from_timestamp,
|
||||
)
|
||||
import numpy as np
|
||||
import tractor
|
||||
|
||||
from piker.brokers import open_cached_client
|
||||
from piker.log import get_logger, get_console_log
|
||||
from piker.data import ShmArray
|
||||
from piker.brokers._util import (
|
||||
BrokerError,
|
||||
from piker.accounting import (
|
||||
Asset,
|
||||
MktPair,
|
||||
unpack_fqme,
|
||||
)
|
||||
from piker.brokers import (
|
||||
open_cached_client,
|
||||
NoData,
|
||||
DataUnavailable,
|
||||
)
|
||||
|
||||
from cryptofeed import FeedHandler
|
||||
from cryptofeed.defines import (
|
||||
DERIBIT, L1_BOOK, TRADES, OPTION, CALL, PUT
|
||||
from piker._cacheables import (
|
||||
async_lifo_cache,
|
||||
)
|
||||
from cryptofeed.symbols import Symbol
|
||||
from piker.log import (
|
||||
get_logger,
|
||||
mk_repr,
|
||||
)
|
||||
from piker.data.validate import FeedInit
|
||||
|
||||
|
||||
from .api import (
|
||||
Client, Trade,
|
||||
get_config,
|
||||
str_to_cb_sym, piker_sym_to_cb_sym, cb_sym_to_deribit_inst,
|
||||
Client,
|
||||
# get_config,
|
||||
piker_sym_to_cb_sym,
|
||||
cb_sym_to_deribit_inst,
|
||||
str_to_cb_sym,
|
||||
maybe_open_price_feed
|
||||
)
|
||||
from .venues import (
|
||||
Pair,
|
||||
OptionPair,
|
||||
Trade,
|
||||
)
|
||||
|
||||
_spawn_kwargs = {
|
||||
'infect_asyncio': True,
|
||||
|
@ -64,90 +85,215 @@ async def open_history_client(
|
|||
mkt: MktPair,
|
||||
) -> tuple[Callable, int]:
|
||||
|
||||
fnstrument: str = mkt.bs_fqme
|
||||
# TODO implement history getter for the new storage layer.
|
||||
async with open_cached_client('deribit') as client:
|
||||
|
||||
pair: OptionPair = client._pairs[mkt.dst.name]
|
||||
# XXX NOTE, the cuckers use ms !!!
|
||||
creation_time_s: int = pair.creation_timestamp/1000
|
||||
|
||||
async def get_ohlc(
|
||||
end_dt: Optional[datetime] = None,
|
||||
start_dt: Optional[datetime] = None,
|
||||
timeframe: float,
|
||||
end_dt: datetime | None = None,
|
||||
start_dt: datetime | None = None,
|
||||
|
||||
) -> tuple[
|
||||
np.ndarray,
|
||||
datetime, # start
|
||||
datetime, # end
|
||||
]:
|
||||
if timeframe != 60:
|
||||
raise DataUnavailable('Only 1m bars are supported')
|
||||
|
||||
array = await client.bars(
|
||||
instrument,
|
||||
array: np.ndarray = await client.bars(
|
||||
mkt,
|
||||
start_dt=start_dt,
|
||||
end_dt=end_dt,
|
||||
)
|
||||
if len(array) == 0:
|
||||
raise DataUnavailable
|
||||
if (
|
||||
end_dt is None
|
||||
):
|
||||
raise DataUnavailable(
|
||||
'No history seems to exist yet?\n\n'
|
||||
f'{mkt}'
|
||||
)
|
||||
elif (
|
||||
end_dt
|
||||
and
|
||||
end_dt.timestamp() < creation_time_s
|
||||
):
|
||||
# the contract can't have history
|
||||
# before it was created.
|
||||
pair_type_str: str = type(pair).__name__
|
||||
create_dt: datetime = from_timestamp(creation_time_s)
|
||||
raise DataUnavailable(
|
||||
f'No history prior to\n'
|
||||
f'`{pair_type_str}.creation_timestamp: int = '
|
||||
f'{pair.creation_timestamp}\n\n'
|
||||
f'------ deribit sux ------\n'
|
||||
f'WHICH IN "NORMAL PEOPLE WHO USE EPOCH TIME" form is,\n'
|
||||
f'creation_time_s: {creation_time_s}\n'
|
||||
f'create_dt: {create_dt}\n'
|
||||
)
|
||||
raise NoData(
|
||||
f'No frame for {start_dt} -> {end_dt}\n'
|
||||
)
|
||||
|
||||
start_dt = pendulum.from_timestamp(array[0]['time'])
|
||||
end_dt = pendulum.from_timestamp(array[-1]['time'])
|
||||
start_dt = from_timestamp(array[0]['time'])
|
||||
end_dt = from_timestamp(array[-1]['time'])
|
||||
|
||||
times = array['time']
|
||||
if not times.any():
|
||||
raise ValueError(
|
||||
'Bad frame with null-times?\n\n'
|
||||
f'{times}'
|
||||
)
|
||||
|
||||
if end_dt is None:
|
||||
inow: int = round(time.time())
|
||||
if (inow - times[-1]) > 60:
|
||||
await tractor.pause()
|
||||
|
||||
return array, start_dt, end_dt
|
||||
|
||||
yield get_ohlc, {'erlangs': 3, 'rate': 3}
|
||||
yield (
|
||||
get_ohlc,
|
||||
{ # backfill config
|
||||
'erlangs': 3,
|
||||
'rate': 3,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@async_lifo_cache()
|
||||
async def get_mkt_info(
|
||||
fqme: str,
|
||||
|
||||
) -> tuple[MktPair, Pair|OptionPair] | None:
|
||||
|
||||
# uppercase since kraken bs_mktid is always upper
|
||||
if 'deribit' not in fqme.lower():
|
||||
fqme += '.deribit'
|
||||
|
||||
mkt_mode: str = ''
|
||||
broker, mkt_ep, venue, expiry = unpack_fqme(fqme)
|
||||
|
||||
# NOTE: we always upper case all tokens to be consistent with
|
||||
# binance's symbology style for pairs, like `BTCUSDT`, but in
|
||||
# theory we could also just keep things lower case; as long as
|
||||
# we're consistent and the symcache matches whatever this func
|
||||
# returns, always!
|
||||
expiry: str = expiry.upper()
|
||||
venue: str = venue.upper()
|
||||
# venue_lower: str = venue.lower()
|
||||
|
||||
mkt_mode: str = 'option'
|
||||
|
||||
async with open_cached_client(
|
||||
'deribit',
|
||||
) as client:
|
||||
|
||||
assets: dict[str, Asset] = await client.get_assets()
|
||||
pair_str: str = mkt_ep.lower()
|
||||
|
||||
pair: Pair = await client.exch_info(
|
||||
sym=pair_str,
|
||||
)
|
||||
mkt_mode = pair.venue
|
||||
client.mkt_mode = mkt_mode
|
||||
|
||||
dst: Asset | None = assets.get(pair.bs_dst_asset)
|
||||
src: Asset | None = assets.get(pair.bs_src_asset)
|
||||
|
||||
mkt = MktPair(
|
||||
dst=dst,
|
||||
src=src,
|
||||
price_tick=pair.price_tick,
|
||||
size_tick=pair.size_tick,
|
||||
bs_mktid=pair.symbol,
|
||||
venue=mkt_mode,
|
||||
broker='deribit',
|
||||
_atype=mkt_mode,
|
||||
_fqme_without_src=True,
|
||||
|
||||
# expiry=pair.expiry,
|
||||
# XXX TODO, currently we don't use it since it's
|
||||
# already "described" in the `OptionPair.symbol: str`
|
||||
# and if we slap in the ISO repr it's kinda hideous..
|
||||
# -[ ] figure out the best either std
|
||||
)
|
||||
return mkt, pair
|
||||
|
||||
|
||||
async def stream_quotes(
|
||||
|
||||
send_chan: trio.abc.SendChannel,
|
||||
symbols: list[str],
|
||||
feed_is_live: trio.Event,
|
||||
loglevel: str = None,
|
||||
|
||||
# startup sync
|
||||
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
|
||||
|
||||
) -> None:
|
||||
# XXX: required to propagate ``tractor`` loglevel to piker logging
|
||||
get_console_log(loglevel or tractor.current_actor().loglevel)
|
||||
'''
|
||||
Open a live quote stream for the market set defined by `symbols`.
|
||||
|
||||
sym = symbols[0]
|
||||
Internally this starts a `cryptofeed.FeedHandler` inside an `asyncio`-side
|
||||
task and relays through L1 and `Trade` msgs here to our `trio.Task`.
|
||||
|
||||
'''
|
||||
sym = symbols[0].split('.')[0]
|
||||
init_msgs: list[FeedInit] = []
|
||||
|
||||
# multiline nested `dict` formatter (since rn quote-msgs are
|
||||
# just that).
|
||||
pfmt: Callable[[str], str] = mk_repr(
|
||||
# so we can see `deribit`'s delightfully mega-long bs fields..
|
||||
maxstring=100,
|
||||
)
|
||||
|
||||
async with (
|
||||
open_cached_client('deribit') as client,
|
||||
send_chan as send_chan
|
||||
):
|
||||
mkt: MktPair
|
||||
pair: Pair
|
||||
mkt, pair = await get_mkt_info(sym)
|
||||
|
||||
init_msgs = {
|
||||
# pass back token, and bool, signalling if we're the writer
|
||||
# and that history has been written
|
||||
sym: {
|
||||
'symbol_info': {
|
||||
'asset_type': 'option',
|
||||
'price_tick_size': 0.0005
|
||||
},
|
||||
'shm_write_opts': {'sum_tick_vml': False},
|
||||
'fqsn': sym,
|
||||
},
|
||||
}
|
||||
# build out init msgs according to latest spec
|
||||
init_msgs.append(
|
||||
FeedInit(
|
||||
mkt_info=mkt,
|
||||
)
|
||||
)
|
||||
# build `cryptofeed` feed-handle
|
||||
cf_sym: cryptofeed.Symbol = piker_sym_to_cb_sym(sym)
|
||||
|
||||
nsym = piker_sym_to_cb_sym(sym)
|
||||
from_cf: tractor.to_asyncio.LinkedTaskChannel
|
||||
async with maybe_open_price_feed(sym) as from_cf:
|
||||
|
||||
async with maybe_open_price_feed(sym) as stream:
|
||||
# load the "last trades" summary
|
||||
last_trades_res: cryptofeed.LastTradesResult = await client.last_trades(
|
||||
cb_sym_to_deribit_inst(cf_sym),
|
||||
count=1,
|
||||
)
|
||||
last_trades: list[Trade] = last_trades_res.trades
|
||||
|
||||
cache = await client.cache_symbols()
|
||||
# TODO, do we even need this or will the above always
|
||||
# work?
|
||||
# if not last_trades:
|
||||
# await tractor.pause()
|
||||
# async for typ, quote in from_cf:
|
||||
# if typ == 'trade':
|
||||
# last_trade = Trade(**(quote['data']))
|
||||
# break
|
||||
|
||||
last_trades = (await client.last_trades(
|
||||
cb_sym_to_deribit_inst(nsym), count=1)).trades
|
||||
# else:
|
||||
last_trade = Trade(
|
||||
**(last_trades[0])
|
||||
)
|
||||
|
||||
if len(last_trades) == 0:
|
||||
last_trade = None
|
||||
async for typ, quote in stream:
|
||||
if typ == 'trade':
|
||||
last_trade = Trade(**(quote['data']))
|
||||
break
|
||||
|
||||
else:
|
||||
last_trade = Trade(**(last_trades[0]))
|
||||
|
||||
first_quote = {
|
||||
first_quote: dict = {
|
||||
'symbol': sym,
|
||||
'last': last_trade.price,
|
||||
'brokerd_ts': last_trade.timestamp,
|
||||
|
@ -158,13 +304,84 @@ async def stream_quotes(
|
|||
'broker_ts': last_trade.timestamp
|
||||
}]
|
||||
}
|
||||
task_status.started((init_msgs, first_quote))
|
||||
task_status.started((
|
||||
init_msgs,
|
||||
first_quote,
|
||||
))
|
||||
|
||||
feed_is_live.set()
|
||||
|
||||
async for typ, quote in stream:
|
||||
topic = quote['symbol']
|
||||
await send_chan.send({topic: quote})
|
||||
# NOTE XXX, static for now!
|
||||
# => since this only handles ONE mkt feed at a time we
|
||||
# don't need a lookup table to map interleaved quotes
|
||||
# from multiple possible mkt-pairs
|
||||
topic: str = mkt.bs_fqme
|
||||
|
||||
# deliver until cancelled
|
||||
async for typ, ref in from_cf:
|
||||
match typ:
|
||||
case 'trade':
|
||||
trade: cryptofeed.types.Trade = ref
|
||||
|
||||
# TODO, re-impl this according to teh ideal
|
||||
# fqme for opts that we choose!!
|
||||
bs_fqme: str = cb_sym_to_deribit_inst(
|
||||
str_to_cb_sym(trade.symbol)
|
||||
).lower()
|
||||
|
||||
piker_quote: dict = {
|
||||
'symbol': bs_fqme,
|
||||
'last': trade.price,
|
||||
'broker_ts': time.time(),
|
||||
# ^TODO, name this `brokerd/datad_ts` and
|
||||
# use `time.time_ns()` ??
|
||||
'ticks': [{
|
||||
'type': 'trade',
|
||||
'price': float(trade.price),
|
||||
'size': float(trade.amount),
|
||||
'broker_ts': trade.timestamp,
|
||||
}],
|
||||
}
|
||||
log.info(
|
||||
f'deribit {typ!r} quote for {sym!r}\n\n'
|
||||
f'{trade}\n\n'
|
||||
f'{pfmt(piker_quote)}\n'
|
||||
)
|
||||
|
||||
case 'l1':
|
||||
book: cryptofeed.types.L1Book = ref
|
||||
|
||||
# TODO, so this is where we can possibly change things
|
||||
# and instead lever the `MktPair.bs_fqme: str` output?
|
||||
bs_fqme: str = cb_sym_to_deribit_inst(
|
||||
str_to_cb_sym(book.symbol)
|
||||
).lower()
|
||||
|
||||
piker_quote: dict = {
|
||||
'symbol': bs_fqme,
|
||||
'ticks': [
|
||||
|
||||
{'type': 'bid',
|
||||
'price': float(book.bid_price),
|
||||
'size': float(book.bid_size)},
|
||||
|
||||
{'type': 'bsize',
|
||||
'price': float(book.bid_price),
|
||||
'size': float(book.bid_size),},
|
||||
|
||||
{'type': 'ask',
|
||||
'price': float(book.ask_price),
|
||||
'size': float(book.ask_size),},
|
||||
|
||||
{'type': 'asize',
|
||||
'price': float(book.ask_price),
|
||||
'size': float(book.ask_size),}
|
||||
]
|
||||
}
|
||||
|
||||
await send_chan.send({
|
||||
topic: piker_quote,
|
||||
})
|
||||
|
||||
|
||||
@tractor.context
|
||||
|
@ -174,12 +391,21 @@ async def open_symbol_search(
|
|||
async with open_cached_client('deribit') as client:
|
||||
|
||||
# load all symbols locally for fast search
|
||||
cache = await client.cache_symbols()
|
||||
# cache = client._pairs
|
||||
await ctx.started()
|
||||
|
||||
async with ctx.open_stream() as stream:
|
||||
|
||||
pattern: str
|
||||
async for pattern in stream:
|
||||
# repack in dict form
|
||||
await stream.send(
|
||||
await client.search_symbols(pattern))
|
||||
|
||||
# NOTE: pattern fuzzy-matching is done within
|
||||
# the methd impl.
|
||||
pairs: dict[str, Pair] = await client.search_symbols(
|
||||
pattern,
|
||||
)
|
||||
# repack in fqme-keyed table
|
||||
byfqme: dict[str, Pair] = {}
|
||||
for pair in pairs.values():
|
||||
byfqme[pair.bs_fqme] = pair
|
||||
|
||||
await stream.send(byfqme)
|
||||
|
|
|
@ -0,0 +1,196 @@
|
|||
# piker: trading gear for hackers
|
||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Affero General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Affero General Public License for more details.
|
||||
|
||||
# You should have received a copy of the GNU Affero General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
"""
|
||||
Per market data-type definitions and schemas types.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
import pendulum
|
||||
from typing import (
|
||||
Literal,
|
||||
Optional,
|
||||
)
|
||||
from decimal import Decimal
|
||||
|
||||
from piker.types import Struct
|
||||
|
||||
|
||||
# API endpoint paths by venue / sub-API
|
||||
_domain: str = 'deribit.com'
|
||||
_url = f'https://www.{_domain}'
|
||||
|
||||
# WEBsocketz
|
||||
_ws_url: str = f'wss://www.{_domain}/ws/api/v2'
|
||||
|
||||
# test nets
|
||||
_testnet_ws_url: str = f'wss://test.{_domain}/ws/api/v2'
|
||||
|
||||
MarketType = Literal[
|
||||
'option'
|
||||
]
|
||||
|
||||
|
||||
def get_api_eps(venue: MarketType) -> tuple[str, str]:
|
||||
'''
|
||||
Return API ep root paths per venue.
|
||||
|
||||
'''
|
||||
return {
|
||||
'option': (
|
||||
_ws_url,
|
||||
),
|
||||
}[venue]
|
||||
|
||||
|
||||
class Pair(Struct, frozen=True, kw_only=True):
|
||||
|
||||
symbol: str
|
||||
|
||||
# src
|
||||
quote_currency: str # 'BTC'
|
||||
|
||||
# dst
|
||||
base_currency: str # "BTC",
|
||||
|
||||
tick_size: float # 0.0001 # [{'above_price': 0.005, 'tick_size': 0.0005}]
|
||||
tick_size_steps: list[dict[str, float]]
|
||||
|
||||
@property
|
||||
def price_tick(self) -> Decimal:
|
||||
return Decimal(str(self.tick_size_steps[0]['above_price']))
|
||||
|
||||
@property
|
||||
def size_tick(self) -> Decimal:
|
||||
return Decimal(str(self.tick_size))
|
||||
|
||||
@property
|
||||
def bs_fqme(self) -> str:
|
||||
return f'{self.symbol}'
|
||||
|
||||
@property
|
||||
def bs_mktid(self) -> str:
|
||||
return f'{self.symbol}.{self.venue}'
|
||||
|
||||
|
||||
class OptionPair(Pair, frozen=True):
|
||||
|
||||
taker_commission: float # 0.0003
|
||||
strike: float # 5000.0
|
||||
settlement_period: str # 'day'
|
||||
settlement_currency: str # "BTC",
|
||||
rfq: bool # false
|
||||
price_index: str # 'btc_usd'
|
||||
option_type: str # 'call'
|
||||
min_trade_amount: float # 0.1
|
||||
maker_commission: float # 0.0003
|
||||
kind: str # 'option'
|
||||
is_active: bool # true
|
||||
instrument_type: str # 'reversed'
|
||||
instrument_name: str # 'BTC-1SEP24-55000-C'
|
||||
instrument_id: int # 364671
|
||||
expiration_timestamp: int # 1725177600000
|
||||
creation_timestamp: int # 1724918461000
|
||||
counter_currency: str # 'USD'
|
||||
contract_size: float # '1.0'
|
||||
block_trade_tick_size: float # '0.0001'
|
||||
block_trade_min_trade_amount: int # '25'
|
||||
block_trade_commission: float # '0.003'
|
||||
|
||||
# NOTE: see `.data._symcache.SymbologyCache.load()` for why
|
||||
ns_path: str = 'piker.brokers.deribit:OptionPair'
|
||||
|
||||
# TODO, impl this without the MM:SS part of
|
||||
# the `'THH:MM:SS..'` etc..
|
||||
@property
|
||||
def expiry(self) -> str:
|
||||
iso_date = pendulum.from_timestamp(
|
||||
self.expiration_timestamp / 1000
|
||||
).isoformat()
|
||||
return iso_date
|
||||
|
||||
@property
|
||||
def venue(self) -> str:
|
||||
return f'{self.instrument_type}_option'
|
||||
|
||||
@property
|
||||
def bs_fqme(self) -> str:
|
||||
return f'{self.symbol}'
|
||||
|
||||
@property
|
||||
def bs_src_asset(self) -> str:
|
||||
return f'{self.quote_currency}'
|
||||
|
||||
@property
|
||||
def bs_dst_asset(self) -> str:
|
||||
return f'{self.symbol}'
|
||||
|
||||
|
||||
PAIRTYPES: dict[MarketType, Pair] = {
|
||||
'option': OptionPair,
|
||||
}
|
||||
|
||||
|
||||
class JSONRPCResult(Struct):
|
||||
id: int
|
||||
usIn: int
|
||||
usOut: int
|
||||
usDiff: int
|
||||
testnet: bool
|
||||
jsonrpc: str = '2.0'
|
||||
error: Optional[dict] = None
|
||||
result: Optional[list[dict]] = None
|
||||
|
||||
|
||||
class JSONRPCChannel(Struct):
|
||||
method: str
|
||||
params: dict
|
||||
jsonrpc: str = '2.0'
|
||||
|
||||
|
||||
class KLinesResult(Struct):
|
||||
low: list[float]
|
||||
cost: list[float]
|
||||
high: list[float]
|
||||
open: list[float]
|
||||
close: list[float]
|
||||
ticks: list[int]
|
||||
status: str
|
||||
volume: list[float]
|
||||
|
||||
|
||||
class Trade(Struct):
|
||||
iv: float
|
||||
price: float
|
||||
amount: float
|
||||
trade_id: str
|
||||
contracts: float
|
||||
direction: str
|
||||
trade_seq: int
|
||||
timestamp: int
|
||||
mark_price: float
|
||||
index_price: float
|
||||
tick_direction: int
|
||||
instrument_name: str
|
||||
combo_id: Optional[str] = '',
|
||||
combo_trade_id: Optional[int] = 0,
|
||||
block_trade_id: Optional[str] = '',
|
||||
block_trade_leg_count: Optional[int] = 0,
|
||||
|
||||
|
||||
class LastTradesResult(Struct):
|
||||
trades: list[Trade]
|
||||
has_more: bool
|
|
@ -111,6 +111,10 @@ class KucoinMktPair(Struct, frozen=True):
|
|||
quoteMaxSize: float
|
||||
quoteMinSize: float
|
||||
symbol: str # our bs_mktid, kucoin's internal id
|
||||
feeCategory: int
|
||||
makerFeeCoefficient: float
|
||||
takerFeeCoefficient: float
|
||||
st: bool
|
||||
|
||||
|
||||
class AccountTrade(Struct, frozen=True):
|
||||
|
@ -593,7 +597,7 @@ async def get_client() -> AsyncGenerator[Client, None]:
|
|||
'''
|
||||
async with (
|
||||
httpx.AsyncClient(
|
||||
base_url=f'https://api.kucoin.com/api',
|
||||
base_url='https://api.kucoin.com/api',
|
||||
) as trio_client,
|
||||
):
|
||||
client = Client(httpx_client=trio_client)
|
||||
|
@ -637,7 +641,7 @@ async def open_ping_task(
|
|||
await trio.sleep((ping_interval - 1000) / 1000)
|
||||
await ws.send_msg({'id': connect_id, 'type': 'ping'})
|
||||
|
||||
log.info('Starting ping task for kucoin ws connection')
|
||||
log.warning('Starting ping task for kucoin ws connection')
|
||||
n.start_soon(ping_server)
|
||||
|
||||
yield
|
||||
|
@ -649,9 +653,14 @@ async def open_ping_task(
|
|||
async def get_mkt_info(
|
||||
fqme: str,
|
||||
|
||||
) -> tuple[MktPair, KucoinMktPair]:
|
||||
) -> tuple[
|
||||
MktPair,
|
||||
KucoinMktPair,
|
||||
]:
|
||||
'''
|
||||
Query for and return a `MktPair` and `KucoinMktPair`.
|
||||
Query for and return both a `piker.accounting.MktPair` and
|
||||
`KucoinMktPair` from provided `fqme: str`
|
||||
(fully-qualified-market-endpoint).
|
||||
|
||||
'''
|
||||
async with open_cached_client('kucoin') as client:
|
||||
|
@ -726,6 +735,8 @@ async def stream_quotes(
|
|||
|
||||
log.info(f'Starting up quote stream(s) for {symbols}')
|
||||
for sym_str in symbols:
|
||||
mkt: MktPair
|
||||
pair: KucoinMktPair
|
||||
mkt, pair = await get_mkt_info(sym_str)
|
||||
init_msgs.append(
|
||||
FeedInit(mkt_info=mkt)
|
||||
|
@ -733,7 +744,11 @@ async def stream_quotes(
|
|||
|
||||
ws: NoBsWs
|
||||
token, ping_interval = await client._get_ws_token()
|
||||
connect_id = str(uuid4())
|
||||
log.info('API reported ping_interval: {ping_interval}\n')
|
||||
|
||||
connect_id: str = str(uuid4())
|
||||
typ: str
|
||||
quote: dict
|
||||
async with (
|
||||
open_autorecon_ws(
|
||||
(
|
||||
|
@ -747,20 +762,37 @@ async def stream_quotes(
|
|||
),
|
||||
) as ws,
|
||||
open_ping_task(ws, ping_interval, connect_id),
|
||||
aclosing(stream_messages(ws, sym_str)) as msg_gen,
|
||||
aclosing(
|
||||
iter_normed_quotes(
|
||||
ws, sym_str
|
||||
)
|
||||
) as iter_quotes,
|
||||
):
|
||||
typ, quote = await anext(msg_gen)
|
||||
typ, quote = await anext(iter_quotes)
|
||||
|
||||
while typ != 'trade':
|
||||
# take care to not unblock here until we get a real
|
||||
# trade quote
|
||||
typ, quote = await anext(msg_gen)
|
||||
# take care to not unblock here until we get a real
|
||||
# trade quote?
|
||||
# ^TODO, remove this right?
|
||||
# -[ ] what often blocks chart boot/new-feed switching
|
||||
# since we'ere waiting for a live quote instead of just
|
||||
# loading history afap..
|
||||
# |_ XXX, not sure if we require a bit of rework to core
|
||||
# feed init logic or if backends justg gotta be
|
||||
# changed up.. feel like there was some causality
|
||||
# dilema prolly only seen with IB too..
|
||||
# while typ != 'trade':
|
||||
# typ, quote = await anext(iter_quotes)
|
||||
|
||||
task_status.started((init_msgs, quote))
|
||||
feed_is_live.set()
|
||||
|
||||
async for typ, msg in msg_gen:
|
||||
await send_chan.send({sym_str: msg})
|
||||
# XXX NOTE, DO NOT include the `.<backend>` suffix!
|
||||
# OW the sampling loop will not broadcast correctly..
|
||||
# since `bus._subscribers.setdefault(bs_fqme, set())`
|
||||
# is used inside `.data.open_feed_bus()` !!!
|
||||
topic: str = mkt.bs_fqme
|
||||
async for typ, quote in iter_quotes:
|
||||
await send_chan.send({topic: quote})
|
||||
|
||||
|
||||
@acm
|
||||
|
@ -815,7 +847,7 @@ async def subscribe(
|
|||
)
|
||||
|
||||
|
||||
async def stream_messages(
|
||||
async def iter_normed_quotes(
|
||||
ws: NoBsWs,
|
||||
sym: str,
|
||||
|
||||
|
@ -846,6 +878,9 @@ async def stream_messages(
|
|||
|
||||
yield 'trade', {
|
||||
'symbol': sym,
|
||||
# TODO, is 'last' even used elsewhere/a-good
|
||||
# semantic? can't we just read the ticks with our
|
||||
# .data.ticktools.frame_ticks()`/
|
||||
'last': trade_data.price,
|
||||
'brokerd_ts': last_trade_ts,
|
||||
'ticks': [
|
||||
|
@ -938,7 +973,7 @@ async def open_history_client(
|
|||
if end_dt is None:
|
||||
inow = round(time.time())
|
||||
|
||||
print(
|
||||
log.debug(
|
||||
f'difference in time between load and processing'
|
||||
f'{inow - times[-1]}'
|
||||
)
|
||||
|
|
|
@ -653,7 +653,11 @@ class Router(Struct):
|
|||
flume = feed.flumes[fqme]
|
||||
first_quote: dict = flume.first_quote
|
||||
book: DarkBook = self.get_dark_book(broker)
|
||||
book.lasts[fqme]: float = float(first_quote['last'])
|
||||
|
||||
if not (last := first_quote.get('last')):
|
||||
last: float = flume.rt_shm.array[-1]['close']
|
||||
|
||||
book.lasts[fqme]: float = float(last)
|
||||
|
||||
async with self.maybe_open_brokerd_dialog(
|
||||
brokermod=brokermod,
|
||||
|
@ -716,7 +720,7 @@ class Router(Struct):
|
|||
subs = self.subscribers[sub_key]
|
||||
|
||||
sent_some: bool = False
|
||||
for client_stream in subs:
|
||||
for client_stream in subs.copy():
|
||||
try:
|
||||
await client_stream.send(msg)
|
||||
sent_some = True
|
||||
|
@ -1010,10 +1014,14 @@ async def translate_and_relay_brokerd_events(
|
|||
status_msg.brokerd_msg = msg
|
||||
status_msg.src = msg.broker_details['name']
|
||||
|
||||
await router.client_broadcast(
|
||||
status_msg.req.symbol,
|
||||
status_msg,
|
||||
)
|
||||
if not status_msg.req:
|
||||
# likely some order change state?
|
||||
await tractor.pause()
|
||||
else:
|
||||
await router.client_broadcast(
|
||||
status_msg.req.symbol,
|
||||
status_msg,
|
||||
)
|
||||
|
||||
if status == 'closed':
|
||||
log.info(f'Execution for {oid} is complete!')
|
||||
|
|
|
@ -335,7 +335,7 @@ def services(config, tl, ports):
|
|||
name='service_query',
|
||||
loglevel=config['loglevel'] if tl else None,
|
||||
),
|
||||
tractor.get_arbiter(
|
||||
tractor.get_registry(
|
||||
host=host,
|
||||
port=ports[0]
|
||||
) as portal
|
||||
|
|
|
@ -25,10 +25,12 @@ from collections import (
|
|||
defaultdict,
|
||||
)
|
||||
from contextlib import asynccontextmanager as acm
|
||||
from functools import partial
|
||||
import time
|
||||
from typing import (
|
||||
Any,
|
||||
AsyncIterator,
|
||||
Callable,
|
||||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
|
@ -42,7 +44,7 @@ from tractor.trionics import (
|
|||
maybe_open_nursery,
|
||||
)
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
from trio import TaskStatus
|
||||
|
||||
from .ticktools import (
|
||||
frame_ticks,
|
||||
|
@ -53,6 +55,9 @@ from ._util import (
|
|||
get_console_log,
|
||||
)
|
||||
from ..service import maybe_spawn_daemon
|
||||
from piker.log import (
|
||||
mk_repr,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ._sharedmem import (
|
||||
|
@ -70,6 +75,7 @@ if TYPE_CHECKING:
|
|||
_default_delay_s: float = 1.0
|
||||
|
||||
|
||||
# TODO: use new `tractor.singleton_acm` API for this!
|
||||
class Sampler:
|
||||
'''
|
||||
Global sampling engine registry.
|
||||
|
@ -79,9 +85,9 @@ class Sampler:
|
|||
|
||||
This non-instantiated type is meant to be a singleton within
|
||||
a `samplerd` actor-service spawned once by the user wishing to
|
||||
time-step-sample (real-time) quote feeds, see
|
||||
``.service.maybe_open_samplerd()`` and the below
|
||||
``register_with_sampler()``.
|
||||
time-step-sample a (real-time) quote feeds, see
|
||||
`.service.maybe_open_samplerd()` and the below
|
||||
`register_with_sampler()`.
|
||||
|
||||
'''
|
||||
service_nursery: None | trio.Nursery = None
|
||||
|
@ -375,7 +381,10 @@ async def register_with_sampler(
|
|||
assert Sampler.ohlcv_shms
|
||||
|
||||
# unblock caller
|
||||
await ctx.started(set(Sampler.ohlcv_shms.keys()))
|
||||
await ctx.started(
|
||||
# XXX bc msgpack only allows one array type!
|
||||
list(Sampler.ohlcv_shms.keys())
|
||||
)
|
||||
|
||||
if open_index_stream:
|
||||
try:
|
||||
|
@ -419,7 +428,6 @@ async def register_with_sampler(
|
|||
|
||||
|
||||
async def spawn_samplerd(
|
||||
|
||||
loglevel: str | None = None,
|
||||
**extra_tractor_kwargs
|
||||
|
||||
|
@ -429,7 +437,10 @@ async def spawn_samplerd(
|
|||
update and increment count write and stream broadcasting.
|
||||
|
||||
'''
|
||||
from piker.service import Services
|
||||
from piker.service import (
|
||||
get_service_mngr,
|
||||
ServiceMngr,
|
||||
)
|
||||
|
||||
dname = 'samplerd'
|
||||
log.info(f'Spawning `{dname}`')
|
||||
|
@ -437,26 +448,33 @@ async def spawn_samplerd(
|
|||
# singleton lock creation of ``samplerd`` since we only ever want
|
||||
# one daemon per ``pikerd`` proc tree.
|
||||
# TODO: make this built-into the service api?
|
||||
async with Services.locks[dname + '_singleton']:
|
||||
mngr: ServiceMngr = get_service_mngr()
|
||||
already_started: bool = dname in mngr.service_tasks
|
||||
|
||||
if dname not in Services.service_tasks:
|
||||
|
||||
portal = await Services.actor_n.start_actor(
|
||||
dname,
|
||||
enable_modules=[
|
||||
'piker.data._sampling',
|
||||
],
|
||||
loglevel=loglevel,
|
||||
debug_mode=Services.debug_mode, # set by pikerd flag
|
||||
**extra_tractor_kwargs
|
||||
)
|
||||
|
||||
await Services.start_service_task(
|
||||
dname,
|
||||
portal,
|
||||
async with mngr._locks[dname + '_singleton']:
|
||||
ctx: Context = await mngr.start_service(
|
||||
daemon_name=dname,
|
||||
ctx_ep=partial(
|
||||
register_with_sampler,
|
||||
period_s=1,
|
||||
sub_for_broadcasts=False,
|
||||
),
|
||||
debug_mode=mngr.debug_mode, # set by pikerd flag
|
||||
|
||||
# proxy-through to tractor
|
||||
enable_modules=[
|
||||
'piker.data._sampling',
|
||||
],
|
||||
loglevel=loglevel,
|
||||
**extra_tractor_kwargs
|
||||
)
|
||||
if not already_started:
|
||||
assert (
|
||||
ctx
|
||||
and
|
||||
ctx.portal
|
||||
and
|
||||
not ctx.cancel_called
|
||||
)
|
||||
return True
|
||||
|
||||
|
@ -561,7 +579,6 @@ async def open_sample_stream(
|
|||
|
||||
|
||||
async def sample_and_broadcast(
|
||||
|
||||
bus: _FeedsBus, # noqa
|
||||
rt_shm: ShmArray,
|
||||
hist_shm: ShmArray,
|
||||
|
@ -582,11 +599,22 @@ async def sample_and_broadcast(
|
|||
|
||||
overruns = Counter()
|
||||
|
||||
# multiline nested `dict` formatter (since rn quote-msgs are
|
||||
# just that).
|
||||
pfmt: Callable[[str], str] = mk_repr()
|
||||
|
||||
# iterate stream delivered by broker
|
||||
async for quotes in quote_stream:
|
||||
# print(quotes)
|
||||
|
||||
# TODO: ``numba`` this!
|
||||
# XXX WARNING XXX only enable for debugging bc ow can cost
|
||||
# ALOT of perf with HF-feedz!!!
|
||||
#
|
||||
# log.info(
|
||||
# 'Rx live quotes:\n'
|
||||
# f'{pfmt(quotes)}'
|
||||
# )
|
||||
|
||||
# TODO: `numba` this!
|
||||
for broker_symbol, quote in quotes.items():
|
||||
# TODO: in theory you can send the IPC msg *before* writing
|
||||
# to the sharedmem array to decrease latency, however, that
|
||||
|
@ -659,6 +687,18 @@ async def sample_and_broadcast(
|
|||
sub_key: str = broker_symbol.lower()
|
||||
subs: set[Sub] = bus.get_subs(sub_key)
|
||||
|
||||
if not subs:
|
||||
all_bs_fqmes: list[str] = list(
|
||||
bus._subscribers.keys()
|
||||
)
|
||||
log.warning(
|
||||
f'No subscribers for {brokername!r} live-quote ??\n'
|
||||
f'broker_symbol: {broker_symbol}\n\n'
|
||||
|
||||
f'Maybe the backend-sys symbol does not match one of,\n'
|
||||
f'{pfmt(all_bs_fqmes)}\n'
|
||||
)
|
||||
|
||||
# NOTE: by default the broker backend doesn't append
|
||||
# it's own "name" into the fqme schema (but maybe it
|
||||
# should?) so we have to manually generate the correct
|
||||
|
@ -889,6 +929,7 @@ async def uniform_rate_send(
|
|||
# to consumers which crash or lose network connection.
|
||||
# I.e. we **DO NOT** want to crash and propagate up to
|
||||
# ``pikerd`` these kinds of errors!
|
||||
trio.EndOfChannel,
|
||||
trio.ClosedResourceError,
|
||||
trio.BrokenResourceError,
|
||||
ConnectionResetError,
|
||||
|
|
|
@ -273,7 +273,7 @@ async def _reconnect_forever(
|
|||
nobsws._connected.set()
|
||||
await trio.sleep_forever()
|
||||
except HandshakeError:
|
||||
log.exception(f'Retrying connection')
|
||||
log.exception('Retrying connection')
|
||||
|
||||
# ws & nursery block ends
|
||||
|
||||
|
@ -359,8 +359,8 @@ async def open_autorecon_ws(
|
|||
|
||||
|
||||
'''
|
||||
JSONRPC response-request style machinery for transparent multiplexing of msgs
|
||||
over a NoBsWs.
|
||||
JSONRPC response-request style machinery for transparent multiplexing
|
||||
of msgs over a `NoBsWs`.
|
||||
|
||||
'''
|
||||
|
||||
|
@ -377,43 +377,77 @@ async def open_jsonrpc_session(
|
|||
url: str,
|
||||
start_id: int = 0,
|
||||
response_type: type = JSONRPCResult,
|
||||
request_type: Optional[type] = None,
|
||||
request_hook: Optional[Callable] = None,
|
||||
error_hook: Optional[Callable] = None,
|
||||
msg_recv_timeout: float = float('inf'),
|
||||
# ^NOTE, since only `deribit` is using this jsonrpc stuff atm
|
||||
# and options mkts are generally "slow moving"..
|
||||
#
|
||||
# FURTHER if we break the underlying ws connection then since we
|
||||
# don't pass a `fixture` to the task that manages `NoBsWs`, i.e.
|
||||
# `_reconnect_forever()`, the jsonrpc "transport pipe" get's
|
||||
# broken and never restored with wtv init sequence is required to
|
||||
# re-establish a working req-resp session.
|
||||
|
||||
# request_type: Optional[type] = None,
|
||||
# request_hook: Optional[Callable] = None,
|
||||
# error_hook: Optional[Callable] = None,
|
||||
) -> Callable[[str, dict], dict]:
|
||||
|
||||
# NOTE, store all request msgs so we can raise errors on the
|
||||
# caller side!
|
||||
req_msgs: dict[int, dict] = {}
|
||||
|
||||
async with (
|
||||
trio.open_nursery() as n,
|
||||
open_autorecon_ws(url) as ws
|
||||
open_autorecon_ws(
|
||||
url=url,
|
||||
msg_recv_timeout=msg_recv_timeout,
|
||||
) as ws
|
||||
):
|
||||
rpc_id: Iterable = count(start_id)
|
||||
rpc_id: Iterable[int] = count(start_id)
|
||||
rpc_results: dict[int, dict] = {}
|
||||
|
||||
async def json_rpc(method: str, params: dict) -> dict:
|
||||
async def json_rpc(
|
||||
method: str,
|
||||
params: dict,
|
||||
) -> dict:
|
||||
'''
|
||||
perform a json rpc call and wait for the result, raise exception in
|
||||
case of error field present on response
|
||||
'''
|
||||
nonlocal req_msgs
|
||||
|
||||
req_id: int = next(rpc_id)
|
||||
msg = {
|
||||
'jsonrpc': '2.0',
|
||||
'id': next(rpc_id),
|
||||
'id': req_id,
|
||||
'method': method,
|
||||
'params': params
|
||||
}
|
||||
_id = msg['id']
|
||||
|
||||
rpc_results[_id] = {
|
||||
result = rpc_results[_id] = {
|
||||
'result': None,
|
||||
'event': trio.Event()
|
||||
'error': None,
|
||||
'event': trio.Event(), # signal caller resp arrived
|
||||
}
|
||||
req_msgs[_id] = msg
|
||||
|
||||
await ws.send_msg(msg)
|
||||
|
||||
# wait for reponse before unblocking requester code
|
||||
await rpc_results[_id]['event'].wait()
|
||||
|
||||
ret = rpc_results[_id]['result']
|
||||
if (maybe_result := result['result']):
|
||||
ret = maybe_result
|
||||
del rpc_results[_id]
|
||||
|
||||
del rpc_results[_id]
|
||||
else:
|
||||
err = result['error']
|
||||
raise Exception(
|
||||
f'JSONRPC request failed\n'
|
||||
f'req: {msg}\n'
|
||||
f'resp: {err}\n'
|
||||
)
|
||||
|
||||
if ret.error is not None:
|
||||
raise Exception(json.dumps(ret.error, indent=4))
|
||||
|
@ -428,6 +462,7 @@ async def open_jsonrpc_session(
|
|||
the server side.
|
||||
|
||||
'''
|
||||
nonlocal req_msgs
|
||||
async for msg in ws:
|
||||
match msg:
|
||||
case {
|
||||
|
@ -451,15 +486,29 @@ async def open_jsonrpc_session(
|
|||
'params': _,
|
||||
}:
|
||||
log.debug(f'Recieved\n{msg}')
|
||||
if request_hook:
|
||||
await request_hook(request_type(**msg))
|
||||
# if request_hook:
|
||||
# await request_hook(request_type(**msg))
|
||||
|
||||
case {
|
||||
'error': error
|
||||
}:
|
||||
log.warning(f'Recieved\n{error}')
|
||||
if error_hook:
|
||||
await error_hook(response_type(**msg))
|
||||
# if error_hook:
|
||||
# await error_hook(response_type(**msg))
|
||||
|
||||
# retreive orig request msg, set error
|
||||
# response in original "result" msg,
|
||||
# THEN FINALLY set the event to signal caller
|
||||
# to raise the error in the parent task.
|
||||
req_id: int = msg['id']
|
||||
req_msg: dict = req_msgs[req_id]
|
||||
result: dict = rpc_results[req_id]
|
||||
result['error'] = error
|
||||
result['event'].set()
|
||||
log.error(
|
||||
f'JSONRPC request failed\n'
|
||||
f'req: {req_msg}\n'
|
||||
f'resp: {error}\n'
|
||||
)
|
||||
|
||||
case _:
|
||||
log.warning(f'Unhandled JSON-RPC msg!?\n{msg}')
|
||||
|
|
|
@ -540,7 +540,10 @@ async def open_feed_bus(
|
|||
# subscription since the backend isn't (yet) expected to
|
||||
# append it's own name to the fqme, so we filter on keys
|
||||
# which *do not* include that name (e.g .ib) .
|
||||
bus._subscribers.setdefault(bs_fqme, set())
|
||||
bus._subscribers.setdefault(
|
||||
bs_fqme,
|
||||
set(),
|
||||
)
|
||||
|
||||
# sync feed subscribers with flume handles
|
||||
await ctx.started(
|
||||
|
|
28
piker/log.py
28
piker/log.py
|
@ -18,7 +18,11 @@
|
|||
Log like a forester!
|
||||
"""
|
||||
import logging
|
||||
import reprlib
|
||||
import json
|
||||
from typing import (
|
||||
Callable,
|
||||
)
|
||||
|
||||
import tractor
|
||||
from pygments import (
|
||||
|
@ -84,3 +88,27 @@ def colorize_json(
|
|||
# likeable styles: algol_nu, tango, monokai
|
||||
formatters.TerminalTrueColorFormatter(style=style)
|
||||
)
|
||||
|
||||
|
||||
def mk_repr(
|
||||
**repr_kws,
|
||||
) -> Callable[[str], str]:
|
||||
'''
|
||||
Allocate and deliver a `repr.Repr` instance with provided input
|
||||
settings using the std-lib's `reprlib` mod,
|
||||
* https://docs.python.org/3/library/reprlib.html
|
||||
|
||||
------ Ex. ------
|
||||
An up to 6-layer-nested `dict` as multi-line:
|
||||
- https://stackoverflow.com/a/79102479
|
||||
- https://docs.python.org/3/library/reprlib.html#reprlib.Repr.maxlevel
|
||||
|
||||
'''
|
||||
def_kws: dict[str, int] = dict(
|
||||
indent=2,
|
||||
maxlevel=6, # recursion levels
|
||||
maxstring=66, # match editor line-len limit
|
||||
)
|
||||
def_kws |= repr_kws
|
||||
reprr = reprlib.Repr(**def_kws)
|
||||
return reprr.repr
|
||||
|
|
|
@ -30,7 +30,11 @@ Actor runtime primtives and (distributed) service APIs for,
|
|||
=> TODO: maybe to (re)move elsewhere?
|
||||
|
||||
'''
|
||||
from ._mngr import Services as Services
|
||||
from ._mngr import (
|
||||
get_service_mngr as get_service_mngr,
|
||||
open_service_mngr as open_service_mngr,
|
||||
ServiceMngr as ServiceMngr,
|
||||
)
|
||||
from ._registry import (
|
||||
_tractor_kwargs as _tractor_kwargs,
|
||||
_default_reg_addr as _default_reg_addr,
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
from __future__ import annotations
|
||||
import os
|
||||
from typing import (
|
||||
Optional,
|
||||
Any,
|
||||
ClassVar,
|
||||
)
|
||||
|
@ -30,13 +29,13 @@ from contextlib import (
|
|||
)
|
||||
|
||||
import tractor
|
||||
import trio
|
||||
|
||||
from ._util import (
|
||||
get_console_log,
|
||||
)
|
||||
from ._mngr import (
|
||||
Services,
|
||||
open_service_mngr,
|
||||
ServiceMngr,
|
||||
)
|
||||
from ._registry import ( # noqa
|
||||
_tractor_kwargs,
|
||||
|
@ -59,7 +58,7 @@ async def open_piker_runtime(
|
|||
registry_addrs: list[tuple[str, int]] = [],
|
||||
|
||||
enable_modules: list[str] = [],
|
||||
loglevel: Optional[str] = None,
|
||||
loglevel: str|None = None,
|
||||
|
||||
# XXX NOTE XXX: you should pretty much never want debug mode
|
||||
# for data daemons when running in production.
|
||||
|
@ -69,7 +68,7 @@ async def open_piker_runtime(
|
|||
# and spawn the service tree distributed per that.
|
||||
start_method: str = 'trio',
|
||||
|
||||
tractor_runtime_overrides: dict | None = None,
|
||||
tractor_runtime_overrides: dict|None = None,
|
||||
**tractor_kwargs,
|
||||
|
||||
) -> tuple[
|
||||
|
@ -119,6 +118,10 @@ async def open_piker_runtime(
|
|||
# spawn other specialized daemons I think?
|
||||
enable_modules=enable_modules,
|
||||
|
||||
# TODO: how to configure this?
|
||||
# keep it on by default if debug mode is set?
|
||||
# maybe_enable_greenback=debug_mode,
|
||||
|
||||
**tractor_kwargs,
|
||||
) as actor,
|
||||
|
||||
|
@ -167,12 +170,13 @@ async def open_pikerd(
|
|||
|
||||
**kwargs,
|
||||
|
||||
) -> Services:
|
||||
) -> ServiceMngr:
|
||||
'''
|
||||
Start a root piker daemon with an indefinite lifetime.
|
||||
Start a root piker daemon actor (aka `pikerd`) with an indefinite
|
||||
lifetime.
|
||||
|
||||
A root actor nursery is created which can be used to create and keep
|
||||
alive underling services (see below).
|
||||
A root actor-nursery is created which can be used to spawn and
|
||||
supervise underling service sub-actors (see below).
|
||||
|
||||
'''
|
||||
# NOTE: for the root daemon we always enable the root
|
||||
|
@ -199,8 +203,6 @@ async def open_pikerd(
|
|||
root_actor,
|
||||
reg_addrs,
|
||||
),
|
||||
tractor.open_nursery() as actor_nursery,
|
||||
trio.open_nursery() as service_nursery,
|
||||
):
|
||||
for addr in reg_addrs:
|
||||
if addr not in root_actor.accept_addrs:
|
||||
|
@ -209,25 +211,17 @@ async def open_pikerd(
|
|||
'Maybe you have another daemon already running?'
|
||||
)
|
||||
|
||||
# assign globally for future daemon/task creation
|
||||
Services.actor_n = actor_nursery
|
||||
Services.service_n = service_nursery
|
||||
Services.debug_mode = debug_mode
|
||||
|
||||
try:
|
||||
yield Services
|
||||
|
||||
finally:
|
||||
# TODO: is this more clever/efficient?
|
||||
# if 'samplerd' in Services.service_tasks:
|
||||
# await Services.cancel_service('samplerd')
|
||||
service_nursery.cancel_scope.cancel()
|
||||
mngr: ServiceMngr
|
||||
async with open_service_mngr(
|
||||
debug_mode=debug_mode,
|
||||
) as mngr:
|
||||
yield mngr
|
||||
|
||||
|
||||
# TODO: do we even need this?
|
||||
# @acm
|
||||
# async def maybe_open_runtime(
|
||||
# loglevel: Optional[str] = None,
|
||||
# loglevel: str|None = None,
|
||||
# **kwargs,
|
||||
|
||||
# ) -> None:
|
||||
|
@ -256,7 +250,7 @@ async def maybe_open_pikerd(
|
|||
loglevel: str | None = None,
|
||||
**kwargs,
|
||||
|
||||
) -> tractor._portal.Portal | ClassVar[Services]:
|
||||
) -> tractor._portal.Portal | ClassVar[ServiceMngr]:
|
||||
'''
|
||||
If no ``pikerd`` daemon-root-actor can be found start it and
|
||||
yield up (we should probably figure out returning a portal to self
|
||||
|
|
|
@ -49,7 +49,7 @@ from requests.exceptions import (
|
|||
ReadTimeout,
|
||||
)
|
||||
|
||||
from ._mngr import Services
|
||||
from ._mngr import ServiceMngr
|
||||
from ._util import (
|
||||
log, # sub-sys logger
|
||||
get_console_log,
|
||||
|
@ -453,7 +453,7 @@ async def open_ahabd(
|
|||
|
||||
@acm
|
||||
async def start_ahab_service(
|
||||
services: Services,
|
||||
services: ServiceMngr,
|
||||
service_name: str,
|
||||
|
||||
# endpoint config passed as **kwargs
|
||||
|
@ -549,7 +549,8 @@ async def start_ahab_service(
|
|||
log.warning('Failed to cancel root permsed container')
|
||||
|
||||
except (
|
||||
trio.MultiError,
|
||||
# trio.MultiError,
|
||||
ExceptionGroup,
|
||||
) as err:
|
||||
for subexc in err.exceptions:
|
||||
if isinstance(subexc, PermissionError):
|
||||
|
|
|
@ -26,14 +26,17 @@ from typing import (
|
|||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
)
|
||||
from collections import defaultdict
|
||||
|
||||
import tractor
|
||||
import trio
|
||||
|
||||
from ._util import (
|
||||
log, # sub-sys logger
|
||||
)
|
||||
from ._mngr import (
|
||||
Services,
|
||||
get_service_mngr,
|
||||
ServiceMngr,
|
||||
)
|
||||
from ._actor_runtime import maybe_open_pikerd
|
||||
from ._registry import find_service
|
||||
|
@ -41,15 +44,14 @@ from ._registry import find_service
|
|||
|
||||
@acm
|
||||
async def maybe_spawn_daemon(
|
||||
|
||||
service_name: str,
|
||||
service_task_target: Callable,
|
||||
|
||||
spawn_args: dict[str, Any],
|
||||
|
||||
loglevel: str | None = None,
|
||||
singleton: bool = False,
|
||||
|
||||
_locks = defaultdict(trio.Lock),
|
||||
**pikerd_kwargs,
|
||||
|
||||
) -> tractor.Portal:
|
||||
|
@ -67,7 +69,7 @@ async def maybe_spawn_daemon(
|
|||
'''
|
||||
# serialize access to this section to avoid
|
||||
# 2 or more tasks racing to create a daemon
|
||||
lock = Services.locks[service_name]
|
||||
lock = _locks[service_name]
|
||||
await lock.acquire()
|
||||
|
||||
async with find_service(
|
||||
|
@ -132,7 +134,65 @@ async def maybe_spawn_daemon(
|
|||
async with tractor.wait_for_actor(service_name) as portal:
|
||||
lock.release()
|
||||
yield portal
|
||||
await portal.cancel_actor()
|
||||
# --- ---- ---
|
||||
# XXX NOTE XXX
|
||||
# --- ---- ---
|
||||
# DO NOT PUT A `portal.cancel_actor()` here (as was prior)!
|
||||
#
|
||||
# Doing so will cause an "out-of-band" ctxc
|
||||
# (`tractor.ContextCancelled`) to be raised inside the
|
||||
# `ServiceMngr.open_context_in_task()`'s call to
|
||||
# `ctx.wait_for_result()` AND the internal self-ctxc
|
||||
# "graceful capture" WILL NOT CATCH IT!
|
||||
#
|
||||
# This can cause certain types of operations to raise
|
||||
# that ctxc BEFORE THEY `return`, resulting in
|
||||
# a "false-negative" ctxc being raised when really
|
||||
# nothing actually failed, other then our semantic
|
||||
# "failure" to suppress an expected, graceful,
|
||||
# self-cancel scenario..
|
||||
#
|
||||
# bUt wHy duZ It WorK lIKe dis..
|
||||
# ------------------------------
|
||||
# from the perspective of the `tractor.Context` this
|
||||
# cancel request was conducted "out of band" since
|
||||
# `Context.cancel()` was never called and thus the
|
||||
# `._cancel_called: bool` was never set. Despite the
|
||||
# remote `.canceller` being set to `pikerd` (i.e. the
|
||||
# same `Actor.uid` of the raising service-mngr task) the
|
||||
# service-task's ctx itself was never marked as having
|
||||
# requested cancellation and thus still raises the ctxc
|
||||
# bc it was unaware of any such request.
|
||||
#
|
||||
# How to make grokin these cases easier tho?
|
||||
# ------------------------------------------
|
||||
# Because `Portal.cancel_actor()` was called it requests
|
||||
# "full-`Actor`-runtime-cancellation" of it's peer
|
||||
# process which IS NOT THE SAME as a single inter-actor
|
||||
# RPC task cancelling its local context with a remote
|
||||
# peer `Task` in that same peer process.
|
||||
#
|
||||
# ?TODO? It might be better if we do one (or all) of the
|
||||
# following:
|
||||
#
|
||||
# -[ ] at least set a special message for the
|
||||
# `ContextCancelled` when raised locally by the
|
||||
# unaware ctx task such that we check for the
|
||||
# `.canceller` being *our `Actor`* and in the case
|
||||
# where `Context._cancel_called == False` we specially
|
||||
# note that this is likely an "out-of-band"
|
||||
# runtime-cancel request triggered by some call to
|
||||
# `Portal.cancel_actor()`, possibly even reporting the
|
||||
# exact LOC of that caller by tracking it inside our
|
||||
# portal-type?
|
||||
# -[ ] possibly add another field `ContextCancelled` like
|
||||
# maybe a,
|
||||
# `.request_type: Literal['os', 'proc', 'actor',
|
||||
# 'ctx']` type thing which would allow immediately
|
||||
# being able to tell what kind of cancellation caused
|
||||
# the unexpected ctxc?
|
||||
# -[ ] REMOVE THIS COMMENT, once we've settled on how to
|
||||
# better augment `tractor` to be more explicit on this!
|
||||
|
||||
|
||||
async def spawn_emsd(
|
||||
|
@ -147,21 +207,22 @@ async def spawn_emsd(
|
|||
"""
|
||||
log.info('Spawning emsd')
|
||||
|
||||
portal = await Services.actor_n.start_actor(
|
||||
smngr: ServiceMngr = get_service_mngr()
|
||||
portal = await smngr.actor_n.start_actor(
|
||||
'emsd',
|
||||
enable_modules=[
|
||||
'piker.clearing._ems',
|
||||
'piker.clearing._client',
|
||||
],
|
||||
loglevel=loglevel,
|
||||
debug_mode=Services.debug_mode, # set by pikerd flag
|
||||
debug_mode=smngr.debug_mode, # set by pikerd flag
|
||||
**extra_tractor_kwargs
|
||||
)
|
||||
|
||||
# non-blocking setup of clearing service
|
||||
from ..clearing._ems import _setup_persistent_emsd
|
||||
|
||||
await Services.start_service_task(
|
||||
await smngr.start_service_task(
|
||||
'emsd',
|
||||
portal,
|
||||
|
||||
|
|
|
@ -18,16 +18,29 @@
|
|||
daemon-service management API.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
from contextlib import (
|
||||
asynccontextmanager as acm,
|
||||
# contextmanager as cm,
|
||||
)
|
||||
from collections import defaultdict
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
import functools
|
||||
import inspect
|
||||
from typing import (
|
||||
Callable,
|
||||
Any,
|
||||
)
|
||||
|
||||
import trio
|
||||
from trio_typing import TaskStatus
|
||||
import msgspec
|
||||
import tractor
|
||||
import trio
|
||||
from trio import TaskStatus
|
||||
from tractor import (
|
||||
ActorNursery,
|
||||
current_actor,
|
||||
ContextCancelled,
|
||||
Context,
|
||||
|
@ -39,6 +52,130 @@ from ._util import (
|
|||
)
|
||||
|
||||
|
||||
# TODO: implement a singleton deco-API for wrapping the below
|
||||
# factory's impl for general actor-singleton use?
|
||||
#
|
||||
# @singleton
|
||||
# async def open_service_mngr(
|
||||
# **init_kwargs,
|
||||
# ) -> ServiceMngr:
|
||||
# '''
|
||||
# Note this function body is invoke IFF no existing singleton instance already
|
||||
# exists in this proc's memory.
|
||||
|
||||
# '''
|
||||
# # setup
|
||||
# yield ServiceMngr(**init_kwargs)
|
||||
# # teardown
|
||||
|
||||
|
||||
|
||||
# TODO: singleton factory API instead of a class API
|
||||
@acm
|
||||
async def open_service_mngr(
|
||||
*,
|
||||
debug_mode: bool = False,
|
||||
|
||||
# impl deat which ensures a single global instance
|
||||
_singleton: list[ServiceMngr|None] = [None],
|
||||
**init_kwargs,
|
||||
|
||||
) -> ServiceMngr:
|
||||
'''
|
||||
Open a multi-subactor-as-service-daemon tree supervisor.
|
||||
|
||||
The delivered `ServiceMngr` is a singleton instance for each
|
||||
actor-process and is allocated on first open and never
|
||||
de-allocated unless explicitly deleted by al call to
|
||||
`del_service_mngr()`.
|
||||
|
||||
'''
|
||||
# TODO: factor this an allocation into
|
||||
# a `._mngr.open_service_mngr()` and put in the
|
||||
# once-n-only-once setup/`.__aenter__()` part!
|
||||
# -[ ] how to make this only happen on the `mngr == None` case?
|
||||
# |_ use `.trionics.maybe_open_context()` (for generic
|
||||
# async-with-style-only-once of the factory impl, though
|
||||
# what do we do for the allocation case?
|
||||
# / `.maybe_open_nursery()` (since for this specific case
|
||||
# it's simpler?) to activate
|
||||
async with (
|
||||
tractor.open_nursery() as an,
|
||||
trio.open_nursery() as tn,
|
||||
):
|
||||
# impl specific obvi..
|
||||
init_kwargs.update({
|
||||
'actor_n': an,
|
||||
'service_n': tn,
|
||||
})
|
||||
|
||||
mngr: ServiceMngr|None
|
||||
if (mngr := _singleton[0]) is None:
|
||||
|
||||
log.info('Allocating a new service mngr!')
|
||||
mngr = _singleton[0] = ServiceMngr(**init_kwargs)
|
||||
|
||||
# TODO: put into `.__aenter__()` section of
|
||||
# eventual `@singleton_acm` API wrapper.
|
||||
#
|
||||
# assign globally for future daemon/task creation
|
||||
mngr.actor_n = an
|
||||
mngr.service_n = tn
|
||||
|
||||
else:
|
||||
assert (
|
||||
mngr.actor_n
|
||||
and
|
||||
mngr.service_tn
|
||||
)
|
||||
log.info(
|
||||
'Using extant service mngr!\n\n'
|
||||
f'{mngr!r}\n' # it has a nice `.__repr__()` of services state
|
||||
)
|
||||
|
||||
try:
|
||||
# NOTE: this is a singleton factory impl specific detail
|
||||
# which should be supported in the condensed
|
||||
# `@singleton_acm` API?
|
||||
mngr.debug_mode = debug_mode
|
||||
|
||||
yield mngr
|
||||
finally:
|
||||
# TODO: is this more clever/efficient?
|
||||
# if 'samplerd' in mngr.service_tasks:
|
||||
# await mngr.cancel_service('samplerd')
|
||||
tn.cancel_scope.cancel()
|
||||
|
||||
|
||||
|
||||
def get_service_mngr() -> ServiceMngr:
|
||||
'''
|
||||
Try to get the singleton service-mngr for this actor presuming it
|
||||
has already been allocated using,
|
||||
|
||||
.. code:: python
|
||||
|
||||
async with open_<@singleton_acm(func)>() as mngr`
|
||||
... this block kept open ...
|
||||
|
||||
If not yet allocated raise a `ServiceError`.
|
||||
|
||||
'''
|
||||
# https://stackoverflow.com/a/12627202
|
||||
# https://docs.python.org/3/library/inspect.html#inspect.Signature
|
||||
maybe_mngr: ServiceMngr|None = inspect.signature(
|
||||
open_service_mngr
|
||||
).parameters['_singleton'].default[0]
|
||||
|
||||
if maybe_mngr is None:
|
||||
raise RuntimeError(
|
||||
'Someone must allocate a `ServiceMngr` using\n\n'
|
||||
'`async with open_service_mngr()` beforehand!!\n'
|
||||
)
|
||||
|
||||
return maybe_mngr
|
||||
|
||||
|
||||
# TODO: we need remote wrapping and a general soln:
|
||||
# - factor this into a ``tractor.highlevel`` extension # pack for the
|
||||
# library.
|
||||
|
@ -46,31 +183,46 @@ from ._util import (
|
|||
# to the pikerd actor for starting services remotely!
|
||||
# - prolly rename this to ActorServicesNursery since it spawns
|
||||
# new actors and supervises them to completion?
|
||||
class Services:
|
||||
@dataclass
|
||||
class ServiceMngr:
|
||||
# class ServiceMngr(msgspec.Struct):
|
||||
'''
|
||||
A multi-subactor-as-service manager.
|
||||
|
||||
actor_n: tractor._supervise.ActorNursery
|
||||
Spawn, supervise and monitor service/daemon subactors in a SC
|
||||
process tree.
|
||||
|
||||
'''
|
||||
actor_n: ActorNursery
|
||||
service_n: trio.Nursery
|
||||
debug_mode: bool # tractor sub-actor debug mode flag
|
||||
debug_mode: bool = False # tractor sub-actor debug mode flag
|
||||
|
||||
service_tasks: dict[
|
||||
str,
|
||||
tuple[
|
||||
trio.CancelScope,
|
||||
Context,
|
||||
Portal,
|
||||
trio.Event,
|
||||
]
|
||||
] = {}
|
||||
locks = defaultdict(trio.Lock)
|
||||
] = field(default_factory=dict)
|
||||
|
||||
# internal per-service task mutexs
|
||||
_locks = defaultdict(trio.Lock)
|
||||
|
||||
@classmethod
|
||||
async def start_service_task(
|
||||
self,
|
||||
name: str,
|
||||
portal: Portal,
|
||||
|
||||
# TODO: typevar for the return type of the target and then
|
||||
# use it below for `ctx_res`?
|
||||
target: Callable,
|
||||
|
||||
allow_overruns: bool = False,
|
||||
**ctx_kwargs,
|
||||
|
||||
) -> (trio.CancelScope, Context):
|
||||
) -> (trio.CancelScope, Context, Any):
|
||||
'''
|
||||
Open a context in a service sub-actor, add to a stack
|
||||
that gets unwound at ``pikerd`` teardown.
|
||||
|
@ -83,6 +235,7 @@ class Services:
|
|||
task_status: TaskStatus[
|
||||
tuple[
|
||||
trio.CancelScope,
|
||||
Context,
|
||||
trio.Event,
|
||||
Any,
|
||||
]
|
||||
|
@ -90,64 +243,87 @@ class Services:
|
|||
|
||||
) -> Any:
|
||||
|
||||
# TODO: use the ctx._scope directly here instead?
|
||||
# -[ ] actually what semantics do we expect for this
|
||||
# usage!?
|
||||
with trio.CancelScope() as cs:
|
||||
try:
|
||||
async with portal.open_context(
|
||||
target,
|
||||
allow_overruns=allow_overruns,
|
||||
**ctx_kwargs,
|
||||
|
||||
async with portal.open_context(
|
||||
target,
|
||||
allow_overruns=allow_overruns,
|
||||
**ctx_kwargs,
|
||||
) as (ctx, started):
|
||||
|
||||
) as (ctx, first):
|
||||
|
||||
# unblock once the remote context has started
|
||||
complete = trio.Event()
|
||||
task_status.started((cs, complete, first))
|
||||
log.info(
|
||||
f'`pikerd` service {name} started with value {first}'
|
||||
)
|
||||
try:
|
||||
# unblock once the remote context has started
|
||||
complete = trio.Event()
|
||||
task_status.started((
|
||||
cs,
|
||||
ctx,
|
||||
complete,
|
||||
started,
|
||||
))
|
||||
log.info(
|
||||
f'`pikerd` service {name} started with value {started}'
|
||||
)
|
||||
# wait on any context's return value
|
||||
# and any final portal result from the
|
||||
# sub-actor.
|
||||
ctx_res: Any = await ctx.result()
|
||||
ctx_res: Any = await ctx.wait_for_result()
|
||||
|
||||
# NOTE: blocks indefinitely until cancelled
|
||||
# either by error from the target context
|
||||
# function or by being cancelled here by the
|
||||
# surrounding cancel scope.
|
||||
return (await portal.result(), ctx_res)
|
||||
except ContextCancelled as ctxe:
|
||||
canceller: tuple[str, str] = ctxe.canceller
|
||||
our_uid: tuple[str, str] = current_actor().uid
|
||||
if (
|
||||
canceller != portal.channel.uid
|
||||
and
|
||||
canceller != our_uid
|
||||
):
|
||||
log.cancel(
|
||||
f'Actor-service {name} was remotely cancelled?\n'
|
||||
f'remote canceller: {canceller}\n'
|
||||
f'Keeping {our_uid} alive, ignoring sub-actor cancel..\n'
|
||||
)
|
||||
else:
|
||||
raise
|
||||
return (
|
||||
await portal.wait_for_result(),
|
||||
ctx_res,
|
||||
)
|
||||
|
||||
except ContextCancelled as ctxe:
|
||||
canceller: tuple[str, str] = ctxe.canceller
|
||||
our_uid: tuple[str, str] = current_actor().uid
|
||||
if (
|
||||
canceller != portal.chan.uid
|
||||
and
|
||||
canceller != our_uid
|
||||
):
|
||||
log.cancel(
|
||||
f'Actor-service `{name}` was remotely cancelled by a peer?\n'
|
||||
|
||||
# TODO: this would be a good spot to use
|
||||
# a respawn feature Bo
|
||||
f'-> Keeping `pikerd` service manager alive despite this inter-peer cancel\n\n'
|
||||
|
||||
finally:
|
||||
await portal.cancel_actor()
|
||||
complete.set()
|
||||
self.service_tasks.pop(name)
|
||||
f'cancellee: {portal.chan.uid}\n'
|
||||
f'canceller: {canceller}\n'
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
cs, complete, first = await self.service_n.start(open_context_in_task)
|
||||
finally:
|
||||
# NOTE: the ctx MUST be cancelled first if we
|
||||
# don't want the above `ctx.wait_for_result()` to
|
||||
# raise a self-ctxc. WHY, well since from the ctx's
|
||||
# perspective the cancel request will have
|
||||
# arrived out-out-of-band at the `Actor.cancel()`
|
||||
# level, thus `Context.cancel_called == False`,
|
||||
# meaning `ctx._is_self_cancelled() == False`.
|
||||
# with trio.CancelScope(shield=True):
|
||||
# await ctx.cancel()
|
||||
await portal.cancel_actor()
|
||||
complete.set()
|
||||
self.service_tasks.pop(name)
|
||||
|
||||
cs, sub_ctx, complete, started = await self.service_n.start(
|
||||
open_context_in_task
|
||||
)
|
||||
|
||||
# store the cancel scope and portal for later cancellation or
|
||||
# retstart if needed.
|
||||
self.service_tasks[name] = (cs, portal, complete)
|
||||
self.service_tasks[name] = (cs, sub_ctx, portal, complete)
|
||||
return cs, sub_ctx, started
|
||||
|
||||
return cs, first
|
||||
|
||||
@classmethod
|
||||
async def cancel_service(
|
||||
self,
|
||||
name: str,
|
||||
|
@ -158,8 +334,80 @@ class Services:
|
|||
|
||||
'''
|
||||
log.info(f'Cancelling `pikerd` service {name}')
|
||||
cs, portal, complete = self.service_tasks[name]
|
||||
cs.cancel()
|
||||
cs, sub_ctx, portal, complete = self.service_tasks[name]
|
||||
|
||||
# cs.cancel()
|
||||
await sub_ctx.cancel()
|
||||
await complete.wait()
|
||||
assert name not in self.service_tasks, \
|
||||
f'Serice task for {name} not terminated?'
|
||||
|
||||
if name in self.service_tasks:
|
||||
# TODO: custom err?
|
||||
# raise ServiceError(
|
||||
raise RuntimeError(
|
||||
f'Serice task for {name} not terminated?'
|
||||
)
|
||||
|
||||
# assert name not in self.service_tasks, \
|
||||
# f'Serice task for {name} not terminated?'
|
||||
|
||||
async def start_service(
|
||||
self,
|
||||
daemon_name: str,
|
||||
ctx_ep: Callable, # kwargs must `partial`-ed in!
|
||||
|
||||
debug_mode: bool = False,
|
||||
**tractor_actor_kwargs,
|
||||
|
||||
) -> Context:
|
||||
'''
|
||||
Start a "service" task in a new sub-actor (daemon) and manage it's lifetime
|
||||
indefinitely.
|
||||
|
||||
Services can be cancelled/shutdown using `.cancel_service()`.
|
||||
|
||||
'''
|
||||
entry: tuple|None = self.service_tasks.get(daemon_name)
|
||||
if entry:
|
||||
(cs, sub_ctx, portal, complete) = entry
|
||||
return sub_ctx
|
||||
|
||||
if daemon_name not in self.service_tasks:
|
||||
portal = await self.actor_n.start_actor(
|
||||
daemon_name,
|
||||
debug_mode=( # maybe set globally during allocate
|
||||
debug_mode
|
||||
or
|
||||
self.debug_mode
|
||||
),
|
||||
**tractor_actor_kwargs,
|
||||
)
|
||||
ctx_kwargs: dict[str, Any] = {}
|
||||
if isinstance(ctx_ep, functools.partial):
|
||||
ctx_kwargs: dict[str, Any] = ctx_ep.keywords
|
||||
ctx_ep: Callable = ctx_ep.func
|
||||
|
||||
(cs, sub_ctx, started) = await self.start_service_task(
|
||||
daemon_name,
|
||||
portal,
|
||||
ctx_ep,
|
||||
**ctx_kwargs,
|
||||
)
|
||||
|
||||
return sub_ctx
|
||||
|
||||
|
||||
# TODO:
|
||||
# -[ ] factor all the common shit from `.data._sampling`
|
||||
# and `.brokers._daemon` into here / `ServiceMngr`
|
||||
# in terms of allocating the `Portal` as part of the
|
||||
# "service-in-subactor" starting!
|
||||
# -[ ] move to `tractor.hilevel._service`, import and use here!
|
||||
# NOTE: purposely leaks the ref to the mod-scope Bo
|
||||
# import tractor
|
||||
# from tractor.hilevel import (
|
||||
# open_service_mngr,
|
||||
# ServiceMngr,
|
||||
# )
|
||||
# mngr: ServiceMngr|None = None
|
||||
# with tractor.hilevel.open_service_mngr() as mngr:
|
||||
# Services = proxy(mngr)
|
||||
|
|
|
@ -21,11 +21,13 @@ from typing import (
|
|||
TYPE_CHECKING,
|
||||
)
|
||||
|
||||
# TODO: oof, needs to be changed to `httpx`!
|
||||
import asks
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import docker
|
||||
from ._ahab import DockerContainer
|
||||
from . import ServiceMngr
|
||||
|
||||
from ._util import log # sub-sys logger
|
||||
from ._util import (
|
||||
|
@ -127,7 +129,7 @@ def start_elasticsearch(
|
|||
|
||||
@acm
|
||||
async def start_ahab_daemon(
|
||||
service_mngr: Services,
|
||||
service_mngr: ServiceMngr,
|
||||
user_config: dict | None = None,
|
||||
loglevel: str | None = None,
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ import pendulum
|
|||
# import purerpc
|
||||
|
||||
from ..data.feed import maybe_open_feed
|
||||
from . import Services
|
||||
from . import ServiceMngr
|
||||
from ._util import (
|
||||
log, # sub-sys logger
|
||||
get_console_log,
|
||||
|
@ -233,7 +233,7 @@ def start_marketstore(
|
|||
|
||||
@acm
|
||||
async def start_ahab_daemon(
|
||||
service_mngr: Services,
|
||||
service_mngr: ServiceMngr,
|
||||
user_config: dict | None = None,
|
||||
loglevel: str | None = None,
|
||||
|
||||
|
|
|
@ -161,7 +161,12 @@ class NativeStorageClient:
|
|||
|
||||
def index_files(self):
|
||||
for path in self._datadir.iterdir():
|
||||
if path.name in {'borked', 'expired',}:
|
||||
if (
|
||||
path.name in {'borked', 'expired',}
|
||||
or
|
||||
'.parquet' not in str(path)
|
||||
):
|
||||
# ignore all non-apache files (for now)
|
||||
continue
|
||||
|
||||
key: str = path.name.rstrip('.parquet')
|
||||
|
|
|
@ -44,8 +44,10 @@ import trio
|
|||
from trio_typing import TaskStatus
|
||||
import tractor
|
||||
from pendulum import (
|
||||
Interval,
|
||||
DateTime,
|
||||
Duration,
|
||||
duration as mk_duration,
|
||||
from_timestamp,
|
||||
)
|
||||
import numpy as np
|
||||
|
@ -214,7 +216,8 @@ async def maybe_fill_null_segments(
|
|||
# pair, immediately stop backfilling?
|
||||
if (
|
||||
start_dt
|
||||
and end_dt < start_dt
|
||||
and
|
||||
end_dt < start_dt
|
||||
):
|
||||
await tractor.pause()
|
||||
break
|
||||
|
@ -262,6 +265,7 @@ async def maybe_fill_null_segments(
|
|||
except tractor.ContextCancelled:
|
||||
# log.exception
|
||||
await tractor.pause()
|
||||
raise
|
||||
|
||||
null_segs_detected.set()
|
||||
# RECHECK for more null-gaps
|
||||
|
@ -349,7 +353,7 @@ async def maybe_fill_null_segments(
|
|||
|
||||
async def start_backfill(
|
||||
get_hist,
|
||||
frame_types: dict[str, Duration] | None,
|
||||
def_frame_duration: Duration,
|
||||
mod: ModuleType,
|
||||
mkt: MktPair,
|
||||
shm: ShmArray,
|
||||
|
@ -379,22 +383,23 @@ async def start_backfill(
|
|||
update_start_on_prepend: bool = False
|
||||
if backfill_until_dt is None:
|
||||
|
||||
# TODO: drop this right and just expose the backfill
|
||||
# limits inside a [storage] section in conf.toml?
|
||||
# when no tsdb "last datum" is provided, we just load
|
||||
# some near-term history.
|
||||
# periods = {
|
||||
# 1: {'days': 1},
|
||||
# 60: {'days': 14},
|
||||
# }
|
||||
|
||||
# do a decently sized backfill and load it into storage.
|
||||
# TODO: per-provider default history-durations?
|
||||
# -[ ] inside the `open_history_client()` config allow
|
||||
# declaring the history duration limits instead of
|
||||
# guessing and/or applying the same limits to all?
|
||||
#
|
||||
# -[ ] allow declaring (default) per-provider backfill
|
||||
# limits inside a [storage] sub-section in conf.toml?
|
||||
#
|
||||
# NOTE, when no tsdb "last datum" is provided, we just
|
||||
# load some near-term history by presuming a "decently
|
||||
# large" 60s duration limit and a much shorter 1s range.
|
||||
periods = {
|
||||
1: {'days': 2},
|
||||
60: {'years': 6},
|
||||
}
|
||||
period_duration: int = periods[timeframe]
|
||||
update_start_on_prepend = True
|
||||
update_start_on_prepend: bool = True
|
||||
|
||||
# NOTE: manually set the "latest" datetime which we intend to
|
||||
# backfill history "until" so as to adhere to the history
|
||||
|
@ -416,7 +421,6 @@ async def start_backfill(
|
|||
f'backfill_until_dt: {backfill_until_dt}\n'
|
||||
f'last_start_dt: {last_start_dt}\n'
|
||||
)
|
||||
|
||||
try:
|
||||
(
|
||||
array,
|
||||
|
@ -426,71 +430,114 @@ async def start_backfill(
|
|||
timeframe,
|
||||
end_dt=last_start_dt,
|
||||
)
|
||||
|
||||
except NoData as _daterr:
|
||||
# 3 cases:
|
||||
# - frame in the middle of a legit venue gap
|
||||
# - history actually began at the `last_start_dt`
|
||||
# - some other unknown error (ib blocking the
|
||||
# history bc they don't want you seeing how they
|
||||
# cucked all the tinas..)
|
||||
if dur := frame_types.get(timeframe):
|
||||
# decrement by a frame's worth of duration and
|
||||
# retry a few times.
|
||||
last_start_dt.subtract(
|
||||
seconds=dur.total_seconds()
|
||||
orig_last_start_dt: datetime = last_start_dt
|
||||
gap_report: str = (
|
||||
f'EMPTY FRAME for `end_dt: {last_start_dt}`?\n'
|
||||
f'{mod.name} -> tf@fqme: {timeframe}@{mkt.fqme}\n'
|
||||
f'last_start_dt: {orig_last_start_dt}\n\n'
|
||||
f'bf_until: {backfill_until_dt}\n'
|
||||
)
|
||||
# EMPTY FRAME signal with 3 (likely) causes:
|
||||
#
|
||||
# 1. range contains legit gap in venue history
|
||||
# 2. history actually (edge case) **began** at the
|
||||
# value `last_start_dt`
|
||||
# 3. some other unknown error (ib blocking the
|
||||
# history-query bc they don't want you seeing how
|
||||
# they cucked all the tinas.. like with options
|
||||
# hist)
|
||||
#
|
||||
if def_frame_duration:
|
||||
# decrement by a duration's (frame) worth of time
|
||||
# as maybe indicated by the backend to see if we
|
||||
# can get older data before this possible
|
||||
# "history gap".
|
||||
last_start_dt: datetime = last_start_dt.subtract(
|
||||
seconds=def_frame_duration.total_seconds()
|
||||
)
|
||||
log.warning(
|
||||
f'{mod.name} -> EMPTY FRAME for end_dt?\n'
|
||||
f'tf@fqme: {timeframe}@{mkt.fqme}\n'
|
||||
'bf_until <- last_start_dt:\n'
|
||||
f'{backfill_until_dt} <- {last_start_dt}\n'
|
||||
f'Decrementing `end_dt` by {dur} and retry..\n'
|
||||
gap_report += (
|
||||
f'Decrementing `end_dt` and retrying with,\n'
|
||||
f'def_frame_duration: {def_frame_duration}\n'
|
||||
f'(new) last_start_dt: {last_start_dt}\n'
|
||||
)
|
||||
log.warning(gap_report)
|
||||
# skip writing to shm/tsdb and try the next
|
||||
# duration's worth of prior history.
|
||||
continue
|
||||
|
||||
# broker says there never was or is no more history to pull
|
||||
except DataUnavailable:
|
||||
log.warning(
|
||||
f'NO-MORE-DATA in range?\n'
|
||||
f'`{mod.name}` halted history:\n'
|
||||
f'tf@fqme: {timeframe}@{mkt.fqme}\n'
|
||||
'bf_until <- last_start_dt:\n'
|
||||
f'{backfill_until_dt} <- {last_start_dt}\n'
|
||||
)
|
||||
else:
|
||||
# await tractor.pause()
|
||||
raise DataUnavailable(gap_report)
|
||||
|
||||
# ugh, what's a better way?
|
||||
# TODO: fwiw, we probably want a way to signal a throttle
|
||||
# condition (eg. with ib) so that we can halt the
|
||||
# request loop until the condition is resolved?
|
||||
if timeframe > 1:
|
||||
await tractor.pause()
|
||||
# broker says there never was or is no more history to pull
|
||||
except DataUnavailable as due:
|
||||
message: str = due.args[0]
|
||||
log.warning(
|
||||
f'Provider {mod.name!r} halted backfill due to,\n\n'
|
||||
|
||||
f'{message}\n'
|
||||
|
||||
f'fqme: {mkt.fqme}\n'
|
||||
f'timeframe: {timeframe}\n'
|
||||
f'last_start_dt: {last_start_dt}\n'
|
||||
f'bf_until: {backfill_until_dt}\n'
|
||||
)
|
||||
# UGH: what's a better way?
|
||||
# TODO: backends are responsible for being correct on
|
||||
# this right!?
|
||||
# -[ ] in the `ib` case we could maybe offer some way
|
||||
# to halt the request loop until the condition is
|
||||
# resolved or should the backend be entirely in
|
||||
# charge of solving such faults? yes, right?
|
||||
return
|
||||
|
||||
time: np.ndarray = array['time']
|
||||
assert (
|
||||
array['time'][0]
|
||||
time[0]
|
||||
==
|
||||
next_start_dt.timestamp()
|
||||
)
|
||||
|
||||
diff = last_start_dt - next_start_dt
|
||||
frame_time_diff_s = diff.seconds
|
||||
assert time[-1] == next_end_dt.timestamp()
|
||||
|
||||
expected_dur: Interval = last_start_dt - next_start_dt
|
||||
|
||||
# frame's worth of sample-period-steps, in seconds
|
||||
frame_size_s: float = len(array) * timeframe
|
||||
expected_frame_size_s: float = frame_size_s + timeframe
|
||||
if frame_time_diff_s > expected_frame_size_s:
|
||||
|
||||
recv_frame_dur: Duration = (
|
||||
from_timestamp(array[-1]['time'])
|
||||
-
|
||||
from_timestamp(array[0]['time'])
|
||||
)
|
||||
if (
|
||||
(lt_frame := (recv_frame_dur < expected_dur))
|
||||
or
|
||||
(null_frame := (frame_size_s == 0))
|
||||
# ^XXX, should NEVER hit now!
|
||||
):
|
||||
# XXX: query result includes a start point prior to our
|
||||
# expected "frame size" and thus is likely some kind of
|
||||
# history gap (eg. market closed period, outage, etc.)
|
||||
# so just report it to console for now.
|
||||
if lt_frame:
|
||||
reason = 'Possible GAP (or first-datum)'
|
||||
else:
|
||||
assert null_frame
|
||||
reason = 'NULL-FRAME'
|
||||
|
||||
missing_dur: Interval = expected_dur.end - recv_frame_dur.end
|
||||
log.warning(
|
||||
'GAP DETECTED:\n'
|
||||
f'last_start_dt: {last_start_dt}\n'
|
||||
f'diff: {diff}\n'
|
||||
f'frame_time_diff_s: {frame_time_diff_s}\n'
|
||||
f'{timeframe}s-series {reason} detected!\n'
|
||||
f'fqme: {mkt.fqme}\n'
|
||||
f'last_start_dt: {last_start_dt}\n\n'
|
||||
f'recv interval: {recv_frame_dur}\n'
|
||||
f'expected interval: {expected_dur}\n\n'
|
||||
|
||||
f'Missing duration of history of {missing_dur.in_words()!r}\n'
|
||||
f'{missing_dur}\n'
|
||||
)
|
||||
# await tractor.pause()
|
||||
|
||||
to_push = diff_history(
|
||||
array,
|
||||
|
@ -565,7 +612,8 @@ async def start_backfill(
|
|||
# long-term storage.
|
||||
if (
|
||||
storage is not None
|
||||
and write_tsdb
|
||||
and
|
||||
write_tsdb
|
||||
):
|
||||
log.info(
|
||||
f'Writing {ln} frame to storage:\n'
|
||||
|
@ -578,6 +626,7 @@ async def start_backfill(
|
|||
'crypto',
|
||||
'crypto_currency',
|
||||
'fiat', # a "forex pair"
|
||||
'perpetual_future', # stupid "perps" from cex land
|
||||
}:
|
||||
# for now, our table key schema is not including
|
||||
# the dst[/src] source asset token.
|
||||
|
@ -685,7 +734,7 @@ async def back_load_from_tsdb(
|
|||
last_tsdb_dt
|
||||
and latest_start_dt
|
||||
):
|
||||
backfilled_size_s = (
|
||||
backfilled_size_s: Duration = (
|
||||
latest_start_dt - last_tsdb_dt
|
||||
).seconds
|
||||
# if the shm buffer len is not large enough to contain
|
||||
|
@ -908,6 +957,8 @@ async def tsdb_backfill(
|
|||
f'{pformat(config)}\n'
|
||||
)
|
||||
|
||||
# concurrently load the provider's most-recent-frame AND any
|
||||
# pre-existing tsdb history already saved in `piker` storage.
|
||||
dt_eps: list[DateTime, DateTime] = []
|
||||
async with trio.open_nursery() as tn:
|
||||
tn.start_soon(
|
||||
|
@ -918,7 +969,6 @@ async def tsdb_backfill(
|
|||
timeframe,
|
||||
config,
|
||||
)
|
||||
|
||||
tsdb_entry: tuple = await load_tsdb_hist(
|
||||
storage,
|
||||
mkt,
|
||||
|
@ -947,6 +997,25 @@ async def tsdb_backfill(
|
|||
mr_end_dt,
|
||||
) = dt_eps
|
||||
|
||||
first_frame_dur_s: Duration = (mr_end_dt - mr_start_dt).seconds
|
||||
calced_frame_size: Duration = mk_duration(
|
||||
seconds=first_frame_dur_s,
|
||||
)
|
||||
# NOTE, attempt to use the backend declared default frame
|
||||
# sizing (as allowed by their time-series query APIs) and
|
||||
# if not provided try to construct a default from the
|
||||
# first frame received above.
|
||||
def_frame_durs: dict[
|
||||
int,
|
||||
Duration,
|
||||
]|None = config.get('frame_types', None)
|
||||
if def_frame_durs:
|
||||
def_frame_size: Duration = def_frame_durs[timeframe]
|
||||
assert def_frame_size == calced_frame_size
|
||||
else:
|
||||
# use what we calced from first frame above.
|
||||
def_frame_size = calced_frame_size
|
||||
|
||||
# NOTE: when there's no offline data, there's 2 cases:
|
||||
# - data backend doesn't support timeframe/sample
|
||||
# period (in which case `dt_eps` should be `None` and
|
||||
|
@ -977,7 +1046,7 @@ async def tsdb_backfill(
|
|||
partial(
|
||||
start_backfill,
|
||||
get_hist=get_hist,
|
||||
frame_types=config.get('frame_types', None),
|
||||
def_frame_duration=def_frame_size,
|
||||
mod=mod,
|
||||
mkt=mkt,
|
||||
shm=shm,
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -25,11 +25,11 @@ build-backend = "poetry.core.masonry.api"
|
|||
ignore = []
|
||||
|
||||
# https://docs.astral.sh/ruff/settings/#lint_per-file-ignores
|
||||
"piker/ui/qt.py" = [
|
||||
"E402",
|
||||
'F401', # unused imports (without __all__ or blah as blah)
|
||||
# "F841", # unused variable rules
|
||||
]
|
||||
# "piker/ui/qt.py" = [
|
||||
# "E402",
|
||||
# 'F401', # unused imports (without __all__ or blah as blah)
|
||||
# # "F841", # unused variable rules
|
||||
# ]
|
||||
# ignore-init-module-imports = false
|
||||
|
||||
# ------ - ------
|
||||
|
@ -50,10 +50,8 @@ attrs = "^23.1.0"
|
|||
bidict = "^0.22.1"
|
||||
colorama = "^0.4.6"
|
||||
colorlog = "^6.7.0"
|
||||
cython = "^3.0.0"
|
||||
greenback = "^1.1.1"
|
||||
ib-insync = "^0.9.86"
|
||||
msgspec = "^0.18.0"
|
||||
msgspec = "^0.18.6"
|
||||
numba = "^0.59.0"
|
||||
numpy = "^1.25"
|
||||
polars = "^0.18.13"
|
||||
|
@ -71,13 +69,11 @@ pdbp = "^1.5.0"
|
|||
trio = "^0.24"
|
||||
pendulum = "^3.0.0"
|
||||
httpx = "^0.27.0"
|
||||
cryptofeed = "^2.4.0"
|
||||
pyarrow = "^17.0.0"
|
||||
|
||||
[tool.poetry.dependencies.tractor]
|
||||
develop = true
|
||||
git = 'https://github.com/goodboy/tractor.git'
|
||||
branch = 'asyncio_debugger_support'
|
||||
# path = "../tractor"
|
||||
|
||||
tractor = {path = "../tractor", develop = true}
|
||||
websockets = "12.0"
|
||||
[tool.poetry.dependencies.asyncvnc]
|
||||
git = 'https://github.com/pikers/asyncvnc.git'
|
||||
branch = 'main'
|
||||
|
@ -109,6 +105,8 @@ pytest = "^6.0.0"
|
|||
elasticsearch = "^8.9.0"
|
||||
xonsh = "^0.14.2"
|
||||
prompt-toolkit = "3.0.40"
|
||||
cython = "^3.0.0"
|
||||
greenback = "^1.1.1"
|
||||
|
||||
# console ehancements and eventually remote debugging
|
||||
# extras/helpers.
|
||||
|
|
|
@ -10,7 +10,7 @@ from piker import (
|
|||
config,
|
||||
)
|
||||
from piker.service import (
|
||||
Services,
|
||||
get_service_mngr,
|
||||
)
|
||||
from piker.log import get_console_log
|
||||
|
||||
|
@ -129,7 +129,7 @@ async def _open_test_pikerd(
|
|||
) as service_manager,
|
||||
):
|
||||
# this proc/actor is the pikerd
|
||||
assert service_manager is Services
|
||||
assert service_manager is get_service_mngr()
|
||||
|
||||
async with tractor.wait_for_actor(
|
||||
'pikerd',
|
||||
|
|
|
@ -26,7 +26,7 @@ import pytest
|
|||
import tractor
|
||||
from uuid import uuid4
|
||||
|
||||
from piker.service import Services
|
||||
from piker.service import ServiceMngr
|
||||
from piker.log import get_logger
|
||||
from piker.clearing._messages import (
|
||||
Order,
|
||||
|
@ -158,7 +158,7 @@ def load_and_check_pos(
|
|||
|
||||
|
||||
def test_ems_err_on_bad_broker(
|
||||
open_test_pikerd: Services,
|
||||
open_test_pikerd: ServiceMngr,
|
||||
loglevel: str,
|
||||
):
|
||||
async def load_bad_fqme():
|
||||
|
|
|
@ -15,7 +15,7 @@ import tractor
|
|||
|
||||
from piker.service import (
|
||||
find_service,
|
||||
Services,
|
||||
ServiceMngr,
|
||||
)
|
||||
from piker.data import (
|
||||
open_feed,
|
||||
|
@ -44,7 +44,7 @@ def test_runtime_boot(
|
|||
async def main():
|
||||
port = 6666
|
||||
daemon_addr = ('127.0.0.1', port)
|
||||
services: Services
|
||||
services: ServiceMngr
|
||||
|
||||
async with (
|
||||
open_test_pikerd(
|
||||
|
|
Loading…
Reference in New Issue