Compare commits

..

No commits in common. "daemon_sockaddr_config" and "310_plus" have entirely different histories.

79 changed files with 5546 additions and 12652 deletions

View File

@ -50,8 +50,3 @@ prefer_data_account = [
paper = "XX0000000"
margin = "X0000000"
ira = "X0000000"
[deribit]
key_id = 'XXXXXXXX'
key_secret = 'Xx_XxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXx'

View File

@ -3,12 +3,11 @@
version: "3.5"
services:
ib_gw_paper:
ib-gateway:
# other image tags available:
# https://github.com/waytrade/ib-gateway-docker#supported-tags
# image: waytrade/ib-gateway:981.3j
image: waytrade/ib-gateway:1012.2i
restart: always # restart whenev there's a crash or user clicsk
image: waytrade/ib-gateway:981.3j
restart: always
network_mode: 'host'
volumes:
@ -40,12 +39,14 @@ services:
# this compose file which looks something like:
# TWS_USERID='myuser'
# TWS_PASSWORD='guest'
# TRADING_MODE=paper (or live)
# VNC_SERVER_PASSWORD='diggity'
environment:
TWS_USERID: ${TWS_USERID}
TWS_PASSWORD: ${TWS_PASSWORD}
TRADING_MODE: 'paper'
VNC_SERVER_PASSWORD: 'doggy'
VNC_SERVER_PORT: '3003'
TRADING_MODE: ${TRADING_MODE:-paper}
VNC_SERVER_PASSWORD: ${VNC_SERVER_PASSWORD:-}
# ports:
# - target: 4002
@ -61,40 +62,3 @@ services:
# - "127.0.0.1:4001:4001"
# - "127.0.0.1:4002:4002"
# - "127.0.0.1:5900:5900"
ib_gw_live:
image: waytrade/ib-gateway:1012.2i
restart: always
network_mode: 'host'
volumes:
- type: bind
source: ./jts_live.ini
target: /root/jts/jts.ini
# don't let ibc clobber this file for
# the main reason of not having a stupid
# timezone set..
read_only: true
# force our own ibc config
- type: bind
source: ./ibc.ini
target: /root/ibc/config.ini
# force our noop script - socat isn't needed in host mode.
- type: bind
source: ./fork_ports_delayed.sh
target: /root/scripts/fork_ports_delayed.sh
# force our noop script - socat isn't needed in host mode.
- type: bind
source: ./run_x11_vnc.sh
target: /root/scripts/run_x11_vnc.sh
read_only: true
# NOTE: to fill these out, define an `.env` file in the same dir as
# this compose file which looks something like:
environment:
TRADING_MODE: 'live'
VNC_SERVER_PASSWORD: 'doggy'
VNC_SERVER_PORT: '3004'

View File

@ -188,7 +188,7 @@ AcceptNonBrokerageAccountWarning=yes
#
# The default value is 60.
LoginDialogDisplayTimeout=20
LoginDialogDisplayTimeout = 60
@ -292,7 +292,7 @@ ExistingSessionDetectedAction=primary
# be set dynamically at run-time: most users will never need it,
# so don't use it unless you know you need it.
; OverrideTwsApiPort=4002
OverrideTwsApiPort=4002
# Read-only Login

View File

@ -1,33 +0,0 @@
[IBGateway]
ApiOnly=true
LocalServerPort=4001
# NOTE: must be set if using IBC's "reject" mode
TrustedIPs=127.0.0.1
; RemoteHostOrderRouting=ndc1.ibllc.com
; WriteDebug=true
; RemotePortOrderRouting=4001
; useRemoteSettings=false
; tradingMode=p
; Steps=8
; colorPalletName=dark
# window geo, this may be useful for sending `xdotool` commands?
; MainWindow.Width=1986
; screenHeight=3960
[Logon]
Locale=en
# most markets are oriented around this zone
# so might as well hard code it.
TimeZone=America/New_York
UseSSL=true
displayedproxymsg=1
os_titlebar=true
s3store=true
useRemoteSettings=false
[Communication]
ctciAutoEncrypt=true
Region=usr
; Peer=cdc1.ibllc.com:4001

View File

@ -1,35 +1,16 @@
#!/bin/sh
# start vnc server and listen for connections
# on port specced in `$VNC_SERVER_PORT`
# start VNC server
x11vnc \
-listen 127.0.0.1 \
-allow 127.0.0.1 \
-rfbport "${VNC_SERVER_PORT}" \
-ncache_cr \
-listen localhost \
-display :1 \
-forever \
-shared \
-logappend /var/log/x11vnc.log \
-bg \
-nowf \
-noxdamage \
-noxfixes \
-no6 \
-noipv6 \
# -nowcr \
# TODO: can't use this because of ``asyncvnc`` issue:
-autoport 3003 \
# can't use this because of ``asyncvnc`` issue:
# https://github.com/barneygale/asyncvnc/issues/1
# -passwd 'ibcansmbz'
# XXX: optional graphics caching flags that seem to rekt the overlay
# of the 2 gw windows? When running a single gateway
# this seems to maybe optimize some memory usage?
# -ncache_cr \
# -ncache \
# NOTE: this will prevent logs from going to the console.
# -logappend /var/log/x11vnc.log \
# where to start allocating ports
# -autoport "${VNC_SERVER_PORT}" \

View File

@ -18,10 +18,3 @@
piker: trading gear for hackers.
"""
from ._daemon import open_piker_runtime
from .data.feed import open_feed
__all__ = [
'open_piker_runtime',
'open_feed',
]

View File

@ -22,10 +22,10 @@ from typing import Optional, Union, Callable, Any
from contextlib import asynccontextmanager as acm
from collections import defaultdict
from msgspec import Struct
import tractor
from pydantic import BaseModel
import trio
from trio_typing import TaskStatus
import tractor
from .log import get_logger, get_console_log
from .brokers import get_brokermod
@ -35,12 +35,7 @@ log = get_logger(__name__)
_root_dname = 'pikerd'
_registry_host: str = '127.0.0.1'
_registry_port: int = 6116
_registry_addr = (
_registry_host,
_registry_port,
)
_registry_addr = ('127.0.0.1', 6116)
_tractor_kwargs: dict[str, Any] = {
# use a different registry addr then tractor's default
'arbiter_addr': _registry_addr
@ -52,13 +47,16 @@ _root_modules = [
]
class Services(Struct):
class Services(BaseModel):
actor_n: tractor._supervise.ActorNursery
service_n: trio.Nursery
debug_mode: bool # tractor sub-actor debug mode flag
service_tasks: dict[str, tuple[trio.CancelScope, tractor.Portal]] = {}
class Config:
arbitrary_types_allowed = True
async def start_service_task(
self,
name: str,
@ -140,7 +138,6 @@ async def open_pikerd(
# XXX: you should pretty much never want debug mode
# for data daemons when running in production.
debug_mode: bool = False,
registry_addr: None | tuple[str, int] = None,
) -> Optional[tractor._portal.Portal]:
'''
@ -152,13 +149,14 @@ async def open_pikerd(
'''
global _services
assert _services is None
# XXX: this may open a root actor as well
async with (
tractor.open_root_actor(
# passed through to ``open_root_actor``
arbiter_addr=registry_addr or _registry_addr,
arbiter_addr=_registry_addr,
name=_root_dname,
loglevel=loglevel,
debug_mode=debug_mode,
@ -197,22 +195,23 @@ async def open_piker_runtime(
# XXX: you should pretty much never want debug mode
# for data daemons when running in production.
debug_mode: bool = False,
registry_addr: None | tuple[str, int] = _registry_addr,
) -> tractor.Actor:
) -> Optional[tractor._portal.Portal]:
'''
Start a piker actor who's runtime will automatically sync with
existing piker actors on the local link based on configuration.
Start a piker actor who's runtime will automatically
sync with existing piker actors in local network
based on configuration.
'''
global _services
assert _services is None
# XXX: this may open a root actor as well
async with (
tractor.open_root_actor(
# passed through to ``open_root_actor``
arbiter_addr=registry_addr,
arbiter_addr=_registry_addr,
name=name,
loglevel=loglevel,
debug_mode=debug_mode,
@ -221,7 +220,7 @@ async def open_piker_runtime(
# TODO: eventually we should be able to avoid
# having the root have more then permissions to
# spawn other specialized daemons I think?
enable_modules=_root_modules + enable_modules,
enable_modules=_root_modules,
) as _,
):
yield tractor.current_actor()
@ -253,7 +252,6 @@ async def maybe_open_runtime(
@acm
async def maybe_open_pikerd(
loglevel: Optional[str] = None,
registry_addr: None | tuple = None,
**kwargs,
) -> Union[tractor._portal.Portal, Services]:
@ -266,19 +264,11 @@ async def maybe_open_pikerd(
get_console_log(loglevel)
# subtle, we must have the runtime up here or portal lookup will fail
async with (
maybe_open_runtime(loglevel, **kwargs),
tractor.find_actor(_root_dname) as portal
):
# connect to any existing daemon presuming
# its registry socket was selected.
if (
portal is not None
and (
registry_addr is None
or portal.channel.raddr == registry_addr
)
):
async with maybe_open_runtime(loglevel, **kwargs):
async with tractor.find_actor(_root_dname) as portal:
# assert portal is not None
if portal is not None:
yield portal
return
@ -288,7 +278,6 @@ async def maybe_open_pikerd(
loglevel=loglevel,
debug_mode=kwargs.get('debug_mode', False),
registry_addr=registry_addr,
) as _:
# in the case where we're starting up the

View File

@ -18,10 +18,7 @@
Profiling wrappers for internal libs.
"""
import os
import sys
import time
from time import perf_counter
from functools import wraps
# NOTE: you can pass a flag to enable this:
@ -47,184 +44,3 @@ def timeit(fn):
return res
return wrapper
# Modified version of ``pyqtgraph.debug.Profiler`` that
# core seems hesitant to land in:
# https://github.com/pyqtgraph/pyqtgraph/pull/2281
class Profiler(object):
'''
Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
'''
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
# NOTE: without this defined at the class level
# you won't see apprpriately "nested" sub-profiler
# instance calls.
_msgs = []
# set this flag to disable all or individual profilers at runtime
disable = False
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(
cls,
msg=None,
disabled='env',
delayed=True,
ms_threshold: float = 0.0,
):
"""Optionally create a new profiler based on caller's qualname.
``ms_threshold`` can be set to value in ms for which, if the
total measured time of the lifetime of this profiler is **less
than** this value, then no profiling messages will be printed.
Setting ``delayed=False`` disables this feature since messages
are emitted immediately.
"""
if (
disabled is True
or (
disabled == 'env'
and len(cls._profilers) == 0
)
):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[-1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled == 'env' and func_qualname not in cls._profilers:
# don't do anything
return cls._disabledProfiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = perf_counter()
obj._mt = ms_threshold
obj._newMsg("> Entering " + obj._name)
return obj
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = perf_counter()
ms = (newTime - self._lastTime) * 1000
self._newMsg(" %s: %0.4f ms", msg, ms)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
tot_ms = (perf_counter() - self._firstTime) * 1000
self._newMsg(
"< Exiting %s, total time: %0.4f ms",
self._name,
tot_ms,
)
if tot_ms < self._mt:
# print(f'{tot_ms} < {self._mt}, clearing')
# NOTE: this list **must** be an instance var to avoid
# deleting common messages during GC I think?
self._msgs.clear()
# else:
# print(f'{tot_ms} > {self._mt}, not clearing')
# XXX: why is this needed?
# don't we **want to show** nested profiler messages?
if self._msgs: # and self._depth < 1:
# if self._msgs:
print("\n".join([m[0] % m[1] for m in self._msgs]))
# clear all entries
self._msgs.clear()
# type(self)._msgs = []
type(self)._depth -= 1

View File

@ -33,17 +33,14 @@ import asks
from fuzzywuzzy import process as fuzzy
import numpy as np
import tractor
from pydantic.dataclasses import dataclass
from pydantic import BaseModel
import wsproto
from .._cacheables import open_cached_client
from ._util import (
resproc,
SymbolNotFound,
DataUnavailable,
)
from ._util import resproc, SymbolNotFound
from ..log import get_logger, get_console_log
from ..data import ShmArray
from ..data.types import Struct
from ..data._web_bs import open_autorecon_ws, NoBsWs
log = get_logger(__name__)
@ -82,14 +79,12 @@ _show_wap_in_history = False
# https://binance-docs.github.io/apidocs/spot/en/#exchange-information
class Pair(Struct, frozen=True):
class Pair(BaseModel):
symbol: str
status: str
baseAsset: str
baseAssetPrecision: int
cancelReplaceAllowed: bool
allowTrailingStop: bool
quoteAsset: str
quotePrecision: int
quoteAssetPrecision: int
@ -109,14 +104,14 @@ class Pair(Struct, frozen=True):
permissions: list[str]
class OHLC(Struct):
'''
Description of the flattened OHLC quote format.
@dataclass
class OHLC:
"""Description of the flattened OHLC quote format.
For schema details see:
https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams
'''
"""
time: int
open: float
@ -265,7 +260,6 @@ class Client:
for i, bar in enumerate(bars):
bar = OHLC(*bar)
bar.typecast()
row = []
for j, (name, ftype) in enumerate(_ohlc_dtype[1:]):
@ -293,7 +287,7 @@ async def get_client() -> Client:
# validation type
class AggTrade(Struct):
class AggTrade(BaseModel):
e: str # Event type
E: int # Event time
s: str # Symbol
@ -347,9 +341,7 @@ async def stream_messages(ws: NoBsWs) -> AsyncGenerator[NoBsWs, dict]:
elif msg.get('e') == 'aggTrade':
# NOTE: this is purely for a definition, ``msgspec.Struct``
# does not runtime-validate until you decode/encode.
# see: https://jcristharif.com/msgspec/structs.html#type-validation
# validate
msg = AggTrade(**msg)
# TODO: type out and require this quote format
@ -360,8 +352,8 @@ async def stream_messages(ws: NoBsWs) -> AsyncGenerator[NoBsWs, dict]:
'brokerd_ts': time.time(),
'ticks': [{
'type': 'trade',
'price': float(msg.p),
'size': float(msg.q),
'price': msg.p,
'size': msg.q,
'broker_ts': msg.T,
}],
}
@ -392,7 +384,6 @@ async def open_history_client(
async with open_cached_client('binance') as client:
async def get_ohlc(
timeframe: float,
end_dt: Optional[datetime] = None,
start_dt: Optional[datetime] = None,
@ -401,8 +392,6 @@ async def open_history_client(
datetime, # start
datetime, # end
]:
if timeframe != 60:
raise DataUnavailable('Only 1m bars are supported')
array = await client.bars(
symbol,
@ -459,7 +448,7 @@ async def stream_quotes(
d = cache[sym.upper()]
syminfo = Pair(**d) # validation
si = sym_infos[sym] = syminfo.to_dict()
si = sym_infos[sym] = syminfo.dict()
# XXX: after manually inspecting the response format we
# just directly pick out the info we need

View File

@ -39,148 +39,6 @@ _config_dir = click.get_app_dir('piker')
_watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
OK = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_ok(s: str, **kwargs):
print(OK + s + ENDC, **kwargs)
def print_error(s: str, **kwargs):
print(FAIL + s + ENDC, **kwargs)
def get_method(client, meth_name: str):
print(f'checking client for method \'{meth_name}\'...', end='', flush=True)
method = getattr(client, meth_name, None)
assert method
print_ok('found!.')
return method
async def run_method(client, meth_name: str, **kwargs):
method = get_method(client, meth_name)
print('running...', end='', flush=True)
result = await method(**kwargs)
print_ok(f'done! result: {type(result)}')
return result
async def run_test(broker_name: str):
brokermod = get_brokermod(broker_name)
total = 0
passed = 0
failed = 0
print(f'getting client...', end='', flush=True)
if not hasattr(brokermod, 'get_client'):
print_error('fail! no \'get_client\' context manager found.')
return
async with brokermod.get_client(is_brokercheck=True) as client:
print_ok(f'done! inside client context.')
# check for methods present on brokermod
method_list = [
'backfill_bars',
'get_client',
'trades_dialogue',
'open_history_client',
'open_symbol_search',
'stream_quotes',
]
for method in method_list:
print(
f'checking brokermod for method \'{method}\'...',
end='', flush=True)
if not hasattr(brokermod, method):
print_error(f'fail! method \'{method}\' not found.')
failed += 1
else:
print_ok('done!')
passed += 1
total += 1
# check for methods present con brokermod.Client and their
# results
# for private methods only check is present
method_list = [
'get_balances',
'get_assets',
'get_trades',
'get_xfers',
'submit_limit',
'submit_cancel',
'search_symbols',
]
for method_name in method_list:
try:
get_method(client, method_name)
passed += 1
except AssertionError:
print_error(f'fail! method \'{method_name}\' not found.')
failed += 1
total += 1
# check for methods present con brokermod.Client and their
# results
syms = await run_method(client, 'symbol_info')
total += 1
if len(syms) == 0:
raise BaseException('Empty Symbol list?')
passed += 1
first_sym = tuple(syms.keys())[0]
method_list = [
('cache_symbols', {}),
('search_symbols', {'pattern': first_sym[:-1]}),
('bars', {'symbol': first_sym})
]
for method_name, method_kwargs in method_list:
try:
await run_method(client, method_name, **method_kwargs)
passed += 1
except AssertionError:
print_error(f'fail! method \'{method_name}\' not found.')
failed += 1
total += 1
print(f'total: {total}, passed: {passed}, failed: {failed}')
@cli.command()
@click.argument('broker', nargs=1, required=True)
@click.pass_obj
def brokercheck(config, broker):
'''
Test broker apis for completeness.
'''
async def bcheck_main():
async with maybe_spawn_brokerd(broker) as portal:
await portal.run(run_test, broker)
await portal.cancel_actor()
trio.run(run_test, broker)
@cli.command()
@click.option('--keys', '-k', multiple=True,
help='Return results only for these keys')
@ -335,8 +193,6 @@ def contracts(ctx, loglevel, broker, symbol, ids):
brokermod = get_brokermod(broker)
get_console_log(loglevel)
contracts = trio.run(partial(core.contracts, brokermod, symbol))
if not ids:
# just print out expiry dates which can be used with

View File

@ -1,70 +0,0 @@
``deribit`` backend
------------------
pretty good liquidity crypto derivatives, uses custom json rpc over ws for
client methods, then `cryptofeed` for data streams.
status
******
- supports option charts
- no order support yet
config
******
In order to get order mode support your ``brokers.toml``
needs to have something like the following:
.. code:: toml
[deribit]
key_id = 'XXXXXXXX'
key_secret = 'Xx_XxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXx'
To obtain an api id and secret you need to create an account, which can be a
real market account over at:
- deribit.com (requires KYC for deposit address)
Or a testnet account over at:
- test.deribit.com
For testnet once the account is created here is how you deposit fake crypto to
try it out:
1) Go to Wallet:
.. figure:: assets/0_wallet.png
:align: center
:target: assets/0_wallet.png
:alt: wallet page
2) Then click on the elipsis menu and select deposit
.. figure:: assets/1_wallet_select_deposit.png
:align: center
:target: assets/1_wallet_select_deposit.png
:alt: wallet deposit page
3) This will take you to the deposit address page
.. figure:: assets/2_gen_deposit_addr.png
:align: center
:target: assets/2_gen_deposit_addr.png
:alt: generate deposit address page
4) After clicking generate you should see the address, copy it and go to the
`coin faucet <https://test.deribit.com/dericoin/BTC/deposit>`_ and send fake
coins to that address.
.. figure:: assets/3_deposit_address.png
:align: center
:target: assets/3_deposit_address.png
:alt: generated address
5) Back in the deposit address page you should see the deposit in your history
.. figure:: assets/4_wallet_deposit_history.png
:align: center
:target: assets/4_wallet_deposit_history.png
:alt: wallet deposit history

View File

@ -1,65 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Deribit backend.
'''
from piker.log import get_logger
log = get_logger(__name__)
from .api import (
get_client,
)
from .feed import (
open_history_client,
open_symbol_search,
stream_quotes,
backfill_bars
)
# from .broker import (
# trades_dialogue,
# norm_trade_records,
# )
__all__ = [
'get_client',
# 'trades_dialogue',
'open_history_client',
'open_symbol_search',
'stream_quotes',
# 'norm_trade_records',
]
# tractor RPC enable arg
__enable_modules__: list[str] = [
'api',
'feed',
# 'broker',
]
# passed to ``tractor.ActorNursery.start_actor()``
_spawn_kwargs = {
'infect_asyncio': True,
}
# annotation to let backend agnostic code
# know if ``brokerd`` should be spawned with
# ``tractor``'s aio mode.
_infect_asyncio: bool = True

View File

@ -1,667 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Deribit backend.
'''
import json
import time
import asyncio
from contextlib import asynccontextmanager as acm, AsyncExitStack
from functools import partial
from datetime import datetime
from typing import Any, Optional, Iterable, Callable
import pendulum
import asks
import trio
from trio_typing import Nursery, TaskStatus
from fuzzywuzzy import process as fuzzy
import numpy as np
from piker.data.types import Struct
from piker.data._web_bs import (
NoBsWs,
open_autorecon_ws,
open_jsonrpc_session
)
from .._util import resproc
from piker import config
from piker.log import get_logger
from tractor.trionics import (
broadcast_receiver,
BroadcastReceiver,
maybe_open_context
)
from tractor import to_asyncio
from cryptofeed import FeedHandler
from cryptofeed.defines import (
DERIBIT,
L1_BOOK, TRADES,
OPTION, CALL, PUT
)
from cryptofeed.symbols import Symbol
log = get_logger(__name__)
_spawn_kwargs = {
'infect_asyncio': True,
}
_url = 'https://www.deribit.com'
_ws_url = 'wss://www.deribit.com/ws/api/v2'
_testnet_ws_url = 'wss://test.deribit.com/ws/api/v2'
# Broker specific ohlc schema (rest)
_ohlc_dtype = [
('index', int),
('time', int),
('open', float),
('high', float),
('low', float),
('close', float),
('volume', float),
('bar_wap', float), # will be zeroed by sampler if not filled
]
class JSONRPCResult(Struct):
jsonrpc: str = '2.0'
id: int
result: Optional[dict] = None
error: Optional[dict] = None
usIn: int
usOut: int
usDiff: int
testnet: bool
class KLinesResult(Struct):
close: list[float]
cost: list[float]
high: list[float]
low: list[float]
open: list[float]
status: str
ticks: list[int]
volume: list[float]
class Trade(Struct):
trade_seq: int
trade_id: str
timestamp: int
tick_direction: int
price: float
mark_price: float
iv: float
instrument_name: str
index_price: float
direction: str
combo_trade_id: Optional[int] = 0,
combo_id: Optional[str] = '',
amount: float
class LastTradesResult(Struct):
trades: list[Trade]
has_more: bool
# convert datetime obj timestamp to unixtime in milliseconds
def deribit_timestamp(when):
return int((when.timestamp() * 1000) + (when.microsecond / 1000))
def str_to_cb_sym(name: str) -> Symbol:
base, strike_price, expiry_date, option_type = name.split('-')
quote = base
if option_type == 'put':
option_type = PUT
elif option_type == 'call':
option_type = CALL
else:
raise Exception("Couldn\'t parse option type")
return Symbol(
base, quote,
type=OPTION,
strike_price=strike_price,
option_type=option_type,
expiry_date=expiry_date,
expiry_normalize=False)
def piker_sym_to_cb_sym(name: str) -> Symbol:
base, expiry_date, strike_price, option_type = tuple(
name.upper().split('-'))
quote = base
if option_type == 'P':
option_type = PUT
elif option_type == 'C':
option_type = CALL
else:
raise Exception("Couldn\'t parse option type")
return Symbol(
base, quote,
type=OPTION,
strike_price=strike_price,
option_type=option_type,
expiry_date=expiry_date.upper())
def cb_sym_to_deribit_inst(sym: Symbol):
# cryptofeed normalized
cb_norm = ['F', 'G', 'H', 'J', 'K', 'M', 'N', 'Q', 'U', 'V', 'X', 'Z']
# deribit specific
months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
exp = sym.expiry_date
# YYMDD
# 01234
year, month, day = (
exp[:2], months[cb_norm.index(exp[2:3])], exp[3:])
otype = 'C' if sym.option_type == CALL else 'P'
return f'{sym.base}-{day}{month}{year}-{sym.strike_price}-{otype}'
def get_config() -> dict[str, Any]:
conf, path = config.load()
section = conf.get('deribit')
# TODO: document why we send this, basically because logging params for cryptofeed
conf['log'] = {}
conf['log']['disabled'] = True
if section is None:
log.warning(f'No config section found for deribit in {path}')
return conf
class Client:
def __init__(self, json_rpc: Callable) -> None:
self._pairs: dict[str, Any] = None
config = get_config().get('deribit', {})
if ('key_id' in config) and ('key_secret' in config):
self._key_id = config['key_id']
self._key_secret = config['key_secret']
else:
self._key_id = None
self._key_secret = None
self.json_rpc = json_rpc
@property
def currencies(self):
return ['btc', 'eth', 'sol', 'usd']
async def get_balances(self, kind: str = 'option') -> dict[str, float]:
"""Return the set of positions for this account
by symbol.
"""
balances = {}
for currency in self.currencies:
resp = await self.json_rpc(
'private/get_positions', params={
'currency': currency.upper(),
'kind': kind})
balances[currency] = resp.result
return balances
async def get_assets(self) -> dict[str, float]:
"""Return the set of asset balances for this account
by symbol.
"""
balances = {}
for currency in self.currencies:
resp = await self.json_rpc(
'private/get_account_summary', params={
'currency': currency.upper()})
balances[currency] = resp.result['balance']
return balances
async def submit_limit(
self,
symbol: str,
price: float,
action: str,
size: float
) -> dict:
"""Place an order
"""
params = {
'instrument_name': symbol.upper(),
'amount': size,
'type': 'limit',
'price': price,
}
resp = await self.json_rpc(
f'private/{action}', params)
return resp.result
async def submit_cancel(self, oid: str):
"""Send cancel request for order id
"""
resp = await self.json_rpc(
'private/cancel', {'order_id': oid})
return resp.result
async def symbol_info(
self,
instrument: Optional[str] = None,
currency: str = 'btc', # BTC, ETH, SOL, USDC
kind: str = 'option',
expired: bool = False
) -> dict[str, Any]:
"""Get symbol info for the exchange.
"""
if self._pairs:
return self._pairs
# will retrieve all symbols by default
params = {
'currency': currency.upper(),
'kind': kind,
'expired': str(expired).lower()
}
resp = await self.json_rpc('public/get_instruments', params)
results = resp.result
instruments = {
item['instrument_name'].lower(): item
for item in results
}
if instrument is not None:
return instruments[instrument]
else:
return instruments
async def cache_symbols(
self,
) -> dict:
if not self._pairs:
self._pairs = await self.symbol_info()
return self._pairs
async def search_symbols(
self,
pattern: str,
limit: int = 30,
) -> dict[str, Any]:
data = await self.symbol_info()
matches = fuzzy.extractBests(
pattern,
data,
score_cutoff=35,
limit=limit
)
# repack in dict form
return {item[0]['instrument_name'].lower(): item[0]
for item in matches}
async def bars(
self,
symbol: str,
start_dt: Optional[datetime] = None,
end_dt: Optional[datetime] = None,
limit: int = 1000,
as_np: bool = True,
) -> dict:
instrument = symbol
if end_dt is None:
end_dt = pendulum.now('UTC')
if start_dt is None:
start_dt = end_dt.start_of(
'minute').subtract(minutes=limit)
start_time = deribit_timestamp(start_dt)
end_time = deribit_timestamp(end_dt)
# https://docs.deribit.com/#public-get_tradingview_chart_data
resp = await self.json_rpc(
'public/get_tradingview_chart_data',
params={
'instrument_name': instrument.upper(),
'start_timestamp': start_time,
'end_timestamp': end_time,
'resolution': '1'
})
result = KLinesResult(**resp.result)
new_bars = []
for i in range(len(result.close)):
_open = result.open[i]
high = result.high[i]
low = result.low[i]
close = result.close[i]
volume = result.volume[i]
row = [
(start_time + (i * (60 * 1000))) / 1000.0, # time
result.open[i],
result.high[i],
result.low[i],
result.close[i],
result.volume[i],
0
]
new_bars.append((i,) + tuple(row))
array = np.array(new_bars, dtype=_ohlc_dtype) if as_np else klines
return array
async def last_trades(
self,
instrument: str,
count: int = 10
):
resp = await self.json_rpc(
'public/get_last_trades_by_instrument',
params={
'instrument_name': instrument,
'count': count
})
return LastTradesResult(**resp.result)
@acm
async def get_client(
is_brokercheck: bool = False
) -> Client:
async with (
trio.open_nursery() as n,
open_jsonrpc_session(
_testnet_ws_url, dtype=JSONRPCResult) as json_rpc
):
client = Client(json_rpc)
_refresh_token: Optional[str] = None
_access_token: Optional[str] = None
async def _auth_loop(
task_status: TaskStatus = trio.TASK_STATUS_IGNORED
):
"""Background task that adquires a first access token and then will
refresh the access token while the nursery isn't cancelled.
https://docs.deribit.com/?python#authentication-2
"""
renew_time = 10
access_scope = 'trade:read_write'
_expiry_time = time.time()
got_access = False
nonlocal _refresh_token
nonlocal _access_token
while True:
if time.time() - _expiry_time < renew_time:
# if we are close to token expiry time
if _refresh_token != None:
# if we have a refresh token already dont need to send
# secret
params = {
'grant_type': 'refresh_token',
'refresh_token': _refresh_token,
'scope': access_scope
}
else:
# we don't have refresh token, send secret to initialize
params = {
'grant_type': 'client_credentials',
'client_id': client._key_id,
'client_secret': client._key_secret,
'scope': access_scope
}
resp = await json_rpc('public/auth', params)
result = resp.result
_expiry_time = time.time() + result['expires_in']
_refresh_token = result['refresh_token']
if 'access_token' in result:
_access_token = result['access_token']
if not got_access:
# first time this loop runs we must indicate task is
# started, we have auth
got_access = True
task_status.started()
else:
await trio.sleep(renew_time / 2)
# if we have client creds launch auth loop
if client._key_id is not None:
await n.start(_auth_loop)
await client.cache_symbols()
yield client
n.cancel_scope.cancel()
@acm
async def open_feed_handler():
fh = FeedHandler(config=get_config())
yield fh
await to_asyncio.run_task(fh.stop_async)
@acm
async def maybe_open_feed_handler() -> trio.abc.ReceiveStream:
async with maybe_open_context(
acm_func=open_feed_handler,
key='feedhandler',
) as (cache_hit, fh):
yield fh
async def aio_price_feed_relay(
fh: FeedHandler,
instrument: Symbol,
from_trio: asyncio.Queue,
to_trio: trio.abc.SendChannel,
) -> None:
async def _trade(data: dict, receipt_timestamp):
to_trio.send_nowait(('trade', {
'symbol': cb_sym_to_deribit_inst(
str_to_cb_sym(data.symbol)).lower(),
'last': data,
'broker_ts': time.time(),
'data': data.to_dict(),
'receipt': receipt_timestamp
}))
async def _l1(data: dict, receipt_timestamp):
to_trio.send_nowait(('l1', {
'symbol': cb_sym_to_deribit_inst(
str_to_cb_sym(data.symbol)).lower(),
'ticks': [
{'type': 'bid',
'price': float(data.bid_price), 'size': float(data.bid_size)},
{'type': 'bsize',
'price': float(data.bid_price), 'size': float(data.bid_size)},
{'type': 'ask',
'price': float(data.ask_price), 'size': float(data.ask_size)},
{'type': 'asize',
'price': float(data.ask_price), 'size': float(data.ask_size)}
]
}))
fh.add_feed(
DERIBIT,
channels=[TRADES, L1_BOOK],
symbols=[piker_sym_to_cb_sym(instrument)],
callbacks={
TRADES: _trade,
L1_BOOK: _l1
})
if not fh.running:
fh.run(
start_loop=False,
install_signal_handlers=False)
# sync with trio
to_trio.send_nowait(None)
await asyncio.sleep(float('inf'))
@acm
async def open_price_feed(
instrument: str
) -> trio.abc.ReceiveStream:
async with maybe_open_feed_handler() as fh:
async with to_asyncio.open_channel_from(
partial(
aio_price_feed_relay,
fh,
instrument
)
) as (first, chan):
yield chan
@acm
async def maybe_open_price_feed(
instrument: str
) -> trio.abc.ReceiveStream:
# TODO: add a predicate to maybe_open_context
async with maybe_open_context(
acm_func=open_price_feed,
kwargs={
'instrument': instrument
},
key=f'{instrument}-price',
) as (cache_hit, feed):
if cache_hit:
yield broadcast_receiver(feed, 10)
else:
yield feed
async def aio_order_feed_relay(
fh: FeedHandler,
instrument: Symbol,
from_trio: asyncio.Queue,
to_trio: trio.abc.SendChannel,
) -> None:
async def _fill(data: dict, receipt_timestamp):
breakpoint()
async def _order_info(data: dict, receipt_timestamp):
breakpoint()
fh.add_feed(
DERIBIT,
channels=[FILLS, ORDER_INFO],
symbols=[instrument.upper()],
callbacks={
FILLS: _fill,
ORDER_INFO: _order_info,
})
if not fh.running:
fh.run(
start_loop=False,
install_signal_handlers=False)
# sync with trio
to_trio.send_nowait(None)
await asyncio.sleep(float('inf'))
@acm
async def open_order_feed(
instrument: list[str]
) -> trio.abc.ReceiveStream:
async with maybe_open_feed_handler() as fh:
async with to_asyncio.open_channel_from(
partial(
aio_order_feed_relay,
fh,
instrument
)
) as (first, chan):
yield chan
@acm
async def maybe_open_order_feed(
instrument: str
) -> trio.abc.ReceiveStream:
# TODO: add a predicate to maybe_open_context
async with maybe_open_context(
acm_func=open_order_feed,
kwargs={
'instrument': instrument,
'fh': fh
},
key=f'{instrument}-order',
) as (cache_hit, feed):
if cache_hit:
yield broadcast_receiver(feed, 10)
else:
yield feed

Binary file not shown.

Before

Width:  |  Height:  |  Size: 169 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 132 KiB

View File

@ -1,200 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Deribit backend.
'''
from contextlib import asynccontextmanager as acm
from datetime import datetime
from typing import Any, Optional, Callable
import time
import trio
from trio_typing import TaskStatus
import pendulum
from fuzzywuzzy import process as fuzzy
import numpy as np
import tractor
from piker._cacheables import open_cached_client
from piker.log import get_logger, get_console_log
from piker.data import ShmArray
from piker.brokers._util import (
BrokerError,
DataUnavailable,
)
from cryptofeed import FeedHandler
from cryptofeed.defines import (
DERIBIT, L1_BOOK, TRADES, OPTION, CALL, PUT
)
from cryptofeed.symbols import Symbol
from .api import (
Client, Trade,
get_config,
str_to_cb_sym, piker_sym_to_cb_sym, cb_sym_to_deribit_inst,
maybe_open_price_feed
)
_spawn_kwargs = {
'infect_asyncio': True,
}
log = get_logger(__name__)
@acm
async def open_history_client(
instrument: str,
) -> tuple[Callable, int]:
# TODO implement history getter for the new storage layer.
async with open_cached_client('deribit') as client:
async def get_ohlc(
end_dt: Optional[datetime] = None,
start_dt: Optional[datetime] = None,
) -> tuple[
np.ndarray,
datetime, # start
datetime, # end
]:
array = await client.bars(
instrument,
start_dt=start_dt,
end_dt=end_dt,
)
if len(array) == 0:
raise DataUnavailable
start_dt = pendulum.from_timestamp(array[0]['time'])
end_dt = pendulum.from_timestamp(array[-1]['time'])
return array, start_dt, end_dt
yield get_ohlc, {'erlangs': 3, 'rate': 3}
async def backfill_bars(
symbol: str,
shm: ShmArray, # type: ignore # noqa
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
) -> None:
"""Fill historical bars into shared mem / storage afap.
"""
instrument = symbol
with trio.CancelScope() as cs:
async with open_cached_client('deribit') as client:
bars = await client.bars(instrument)
shm.push(bars)
task_status.started(cs)
async def stream_quotes(
send_chan: trio.abc.SendChannel,
symbols: list[str],
feed_is_live: trio.Event,
loglevel: str = None,
# startup sync
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
) -> None:
# XXX: required to propagate ``tractor`` loglevel to piker logging
get_console_log(loglevel or tractor.current_actor().loglevel)
sym = symbols[0]
async with (
open_cached_client('deribit') as client,
send_chan as send_chan
):
init_msgs = {
# pass back token, and bool, signalling if we're the writer
# and that history has been written
sym: {
'symbol_info': {
'asset_type': 'option',
'price_tick_size': 0.0005
},
'shm_write_opts': {'sum_tick_vml': False},
'fqsn': sym,
},
}
nsym = piker_sym_to_cb_sym(sym)
async with maybe_open_price_feed(sym) as stream:
cache = await client.cache_symbols()
last_trades = (await client.last_trades(
cb_sym_to_deribit_inst(nsym), count=1)).trades
if len(last_trades) == 0:
last_trade = None
async for typ, quote in stream:
if typ == 'trade':
last_trade = Trade(**(quote['data']))
break
else:
last_trade = Trade(**(last_trades[0]))
first_quote = {
'symbol': sym,
'last': last_trade.price,
'brokerd_ts': last_trade.timestamp,
'ticks': [{
'type': 'trade',
'price': last_trade.price,
'size': last_trade.amount,
'broker_ts': last_trade.timestamp
}]
}
task_status.started((init_msgs, first_quote))
feed_is_live.set()
async for typ, quote in stream:
topic = quote['symbol']
await send_chan.send({topic: quote})
@tractor.context
async def open_symbol_search(
ctx: tractor.Context,
) -> Client:
async with open_cached_client('deribit') as client:
# load all symbols locally for fast search
cache = await client.cache_symbols()
await ctx.started()
async with ctx.open_stream() as stream:
async for pattern in stream:
# repack in dict form
await stream.send(
await client.search_symbols(pattern))

View File

@ -1,134 +0,0 @@
``ib`` backend
--------------
more or less the "everything broker" for traditional and international
markets. they are the "go to" provider for automatic retail trading
and we interface to their APIs using the `ib_insync` project.
status
******
current support is *production grade* and both real-time data and order
management should be correct and fast. this backend is used by core devs
for live trading.
currently there is not yet full support for:
- options charting and trading
- paxos based crypto rt feeds and trading
config
******
In order to get order mode support your ``brokers.toml``
needs to have something like the following:
.. code:: toml
[ib]
hosts = [
"127.0.0.1",
]
# TODO: when we eventually spawn gateways in our
# container, we can just dynamically allocate these
# using IBC.
ports = [
4002,
4003,
4006,
4001,
7497,
]
# XXX: for a paper account the flex web query service
# is not supported so you have to manually download
# and XML report and put it in a location that can be
# accessed by the ``brokerd.ib`` backend code for parsing.
flex_token = '1111111111111111'
flex_trades_query_id = '6969696' # live accounts only?
# 3rd party web-api token
# (XXX: not sure if this works yet)
trade_log_token = '111111111111111'
# when clients are being scanned this determines
# which clients are preferred to be used for data feeds
# based on account names which are detected as active
# on each client.
prefer_data_account = [
# this has to be first in order to make data work with dual paper + live
'main',
'algopaper',
]
[ib.accounts]
main = 'U69696969'
algopaper = 'DU9696969'
If everything works correctly you should see any current positions
loaded in the pps pane on chart load and you should also be able to
check your trade records in the file::
<pikerk_conf_dir>/ledgers/trades_ib_algopaper.toml
An example ledger file will have entries written verbatim from the
trade events schema:
.. code:: toml
["0000e1a7.630f5e5a.01.01"]
secType = "FUT"
conId = 515416577
symbol = "MNQ"
lastTradeDateOrContractMonth = "20221216"
strike = 0.0
right = ""
multiplier = "2"
exchange = "GLOBEX"
primaryExchange = ""
currency = "USD"
localSymbol = "MNQZ2"
tradingClass = "MNQ"
includeExpired = false
secIdType = ""
secId = ""
comboLegsDescrip = ""
comboLegs = []
execId = "0000e1a7.630f5e5a.01.01"
time = 1661972086.0
acctNumber = "DU69696969"
side = "BOT"
shares = 1.0
price = 12372.75
permId = 441472655
clientId = 6116
orderId = 985
liquidation = 0
cumQty = 1.0
avgPrice = 12372.75
orderRef = ""
evRule = ""
evMultiplier = 0.0
modelCode = ""
lastLiquidity = 1
broker_time = 1661972086.0
name = "ib"
commission = 0.57
realizedPNL = 243.41
yield_ = 0.0
yieldRedemptionDate = 0
listingExchange = "GLOBEX"
date = "2022-08-31T18:54:46+00:00"
your ``pps.toml`` file will have position entries like,
.. code:: toml
[ib.algopaper."mnq.globex.20221216"]
size = -1.0
ppu = 12423.630576923071
bsuid = 515416577
expiry = "2022-12-16T00:00:00+00:00"
clears = [
{ dt = "2022-08-31T18:54:46+00:00", ppu = 12423.630576923071, accum_size = -19.0, price = 12372.75, size = 1.0, cost = 0.57, tid = "0000e1a7.630f5e5a.01.01" },
]

View File

@ -20,10 +20,15 @@ Interactive Brokers API backend.
Sub-modules within break into the core functionalities:
- ``broker.py`` part for orders / trading endpoints
- ``feed.py`` for real-time data feed endpoints
- ``api.py`` for the core API machinery which is ``trio``-ized
- ``data.py`` for real-time data feed endpoints
- ``client.py`` for the core API machinery which is ``trio``-ized
wrapping around ``ib_insync``.
- ``report.py`` for the hackery to build manual pp calcs
to avoid ib's absolute bullshit FIFO style position
tracking..
"""
from .api import (
get_client,
@ -33,10 +38,7 @@ from .feed import (
open_symbol_search,
stream_quotes,
)
from .broker import (
trades_dialogue,
norm_trade_records,
)
from .broker import trades_dialogue
__all__ = [
'get_client',

View File

@ -29,7 +29,6 @@ import itertools
from math import isnan
from typing import (
Any,
Optional,
Union,
)
import asyncio
@ -39,30 +38,16 @@ import time
from types import SimpleNamespace
from bidict import bidict
import trio
import tractor
from tractor import to_asyncio
import pendulum
import ib_insync as ibis
from ib_insync.contract import (
Contract,
ContractDetails,
Option,
)
from ib_insync.wrapper import RequestError
from ib_insync.contract import Contract, ContractDetails
from ib_insync.order import Order
from ib_insync.ticker import Ticker
from ib_insync.objects import (
BarDataList,
Position,
Fill,
Execution,
CommissionReport,
)
from ib_insync.wrapper import (
Wrapper,
RequestError,
)
from ib_insync.objects import Position
import ib_insync as ibis
from ib_insync.wrapper import Wrapper
from ib_insync.client import Client as ib_Client
import numpy as np
@ -80,11 +65,26 @@ _time_units = {
'h': ' hours',
}
_bar_sizes = {
1: '1 Sec',
60: '1 min',
60*60: '1 hour',
24*60*60: '1 day',
_time_frames = {
'1s': '1 Sec',
'5s': '5 Sec',
'30s': '30 Sec',
'1m': 'OneMinute',
'2m': 'TwoMinutes',
'3m': 'ThreeMinutes',
'4m': 'FourMinutes',
'5m': 'FiveMinutes',
'10m': 'TenMinutes',
'15m': 'FifteenMinutes',
'20m': 'TwentyMinutes',
'30m': 'HalfHour',
'1h': 'OneHour',
'2h': 'TwoHours',
'4h': 'FourHours',
'D': 'OneDay',
'W': 'OneWeek',
'M': 'OneMonth',
'Y': 'OneYear',
}
_show_wap_in_history: bool = False
@ -155,102 +155,70 @@ class NonShittyIB(ibis.IB):
self.client.apiEnd += self.disconnectedEvent
_futes_venues = (
'GLOBEX',
'NYMEX',
'CME',
'CMECRYPTO',
'COMEX',
'CMDTY', # special name case..
)
_adhoc_futes_set = {
# equities
'nq.globex',
'mnq.globex', # micro
'es.globex',
'mes.globex', # micro
# cypto$
'brr.cmecrypto',
'ethusdrr.cmecrypto',
# agriculture
'he.nymex', # lean hogs
'le.nymex', # live cattle (geezers)
'gf.nymex', # feeder cattle (younguns)
# raw
'lb.nymex', # random len lumber
# metals
# https://misc.interactivebrokers.com/cstools/contract_info/v3.10/index.php?action=Conid%20Info&wlId=IB&conid=69067924
'xauusd.cmdty', # london gold spot ^
'gc.nymex',
'mgc.nymex', # micro
# oil & gas
'cl.nymex',
'xagusd.cmdty', # silver spot
'ni.nymex', # silver futes
'qi.comex', # mini-silver futes
}
# taken from list here:
# https://www.interactivebrokers.com/en/trading/products-spot-currencies.php
_adhoc_fiat_set = set((
'USD, AED, AUD, CAD,'
'CHF, CNH, CZK, DKK,'
'EUR, GBP, HKD, HUF,'
'ILS, JPY, MXN, NOK,'
'NZD, PLN, RUB, SAR,'
'SEK, SGD, TRY, ZAR'
).split(' ,')
)
# map of symbols to contract ids
_adhoc_symbol_map = {
_adhoc_cmdty_data_map = {
# https://misc.interactivebrokers.com/cstools/contract_info/v3.10/index.php?action=Conid%20Info&wlId=IB&conid=69067924
# NOTE: some cmdtys/metals don't have trade data like gold/usd:
# https://groups.io/g/twsapi/message/44174
'XAUUSD': ({'conId': 69067924}, {'whatToShow': 'MIDPOINT'}),
}
for qsn in _adhoc_futes_set:
sym, venue = qsn.split('.')
assert venue.upper() in _futes_venues, f'{venue}'
_adhoc_symbol_map[sym.upper()] = (
{'exchange': venue},
{},
_futes_venues = (
'GLOBEX',
'NYMEX',
'CME',
'CMECRYPTO',
)
_adhoc_futes_set = {
# equities
'nq.globex',
'mnq.globex',
'es.globex',
'mes.globex',
# cypto$
'brr.cmecrypto',
'ethusdrr.cmecrypto',
# agriculture
'he.globex', # lean hogs
'le.globex', # live cattle (geezers)
'gf.globex', # feeder cattle (younguns)
# raw
'lb.globex', # random len lumber
# metals
'xauusd.cmdty', # gold spot
'gc.nymex',
'mgc.nymex',
'xagusd.cmdty', # silver spot
'ni.nymex', # silver futes
'qi.comex', # mini-silver futes
}
# exchanges we don't support at the moment due to not knowing
# how to do symbol-contract lookup correctly likely due
# to not having the data feeds subscribed.
_exch_skip_list = {
'ASX', # aussie stocks
'MEXI', # mexican stocks
# no idea
'VALUE',
'FUNDSERV',
'SWB2',
'PSE',
'VALUE', # no idea
}
# https://misc.interactivebrokers.com/cstools/contract_info/v3.10/index.php?action=Conid%20Info&wlId=IB&conid=69067924
_enters = 0
def bars_to_np(bars: list) -> np.ndarray:
'''
Convert a "bars list thing" (``BarDataList`` type from ibis)
Convert a "bars list thing" (``BarsList`` type from ibis)
into a numpy struct array.
'''
@ -270,27 +238,6 @@ def bars_to_np(bars: list) -> np.ndarray:
return nparr
# NOTE: pacing violations exist for higher sample rates:
# https://interactivebrokers.github.io/tws-api/historical_limitations.html#pacing_violations
# Also see note on duration limits being lifted on 1m+ periods,
# but they say "use with discretion":
# https://interactivebrokers.github.io/tws-api/historical_limitations.html#non-available_hd
_samplings: dict[int, tuple[str, str]] = {
1: (
'1 secs',
f'{int(2e3)} S',
pendulum.duration(seconds=2e3),
),
# TODO: benchmark >1 D duration on query to see if
# throughput can be made faster during backfilling.
60: (
'1 min',
'1 D',
pendulum.duration(days=1),
),
}
class Client:
'''
IB wrapped for our broker backend API.
@ -314,29 +261,27 @@ class Client:
# NOTE: the ib.client here is "throttled" to 45 rps by default
async def trades(self) -> dict[str, Any]:
'''
Return list of trade-fills from current session in ``dict``.
async def trades(
self,
# api_only: bool = False,
'''
fills: list[Fill] = self.ib.fills()
norm_fills: list[dict] = []
) -> dict[str, Any]:
# orders = await self.ib.reqCompletedOrdersAsync(
# apiOnly=api_only
# )
fills = await self.ib.reqExecutionsAsync()
norm_fills = []
for fill in fills:
fill = fill._asdict() # namedtuple
for key, val in fill.items():
match val:
case Contract() | Execution() | CommissionReport():
for key, val in fill.copy().items():
if isinstance(val, Contract):
fill[key] = asdict(val)
norm_fills.append(fill)
return norm_fills
async def orders(self) -> list[Order]:
return await self.ib.reqAllOpenOrdersAsync(
apiOnly=False,
)
async def bars(
self,
fqsn: str,
@ -345,55 +290,52 @@ class Client:
start_dt: Union[datetime, str] = "1970-01-01T00:00:00.000000-05:00",
end_dt: Union[datetime, str] = "",
# ohlc sample period in seconds
sample_period_s: int = 1,
sample_period_s: str = 1, # ohlc sample period
period_count: int = int(2e3), # <- max per 1s sample query
# optional "duration of time" equal to the
# length of the returned history frame.
duration: Optional[str] = None,
**kwargs,
) -> tuple[BarDataList, np.ndarray, pendulum.Duration]:
) -> list[dict[str, Any]]:
'''
Retreive OHLCV bars for a fqsn over a range to the present.
'''
# See API docs here:
# https://interactivebrokers.github.io/tws-api/historical_data.html
bars_kwargs = {'whatToShow': 'TRADES'}
bars_kwargs.update(kwargs)
bar_size, duration, dt_duration = _samplings[sample_period_s]
global _enters
# log.info(f'REQUESTING BARS {_enters} @ end={end_dt}')
print(
f"REQUESTING {duration}'s worth {bar_size} BARS\n"
f'{_enters} @ end={end_dt}"'
)
print(f'REQUESTING BARS {_enters} @ end={end_dt}')
if not end_dt:
end_dt = ''
_enters += 1
contract = (await self.find_contracts(fqsn))[0]
contract = await self.find_contract(fqsn)
bars_kwargs.update(getattr(contract, 'bars_kwargs', {}))
# _min = min(2000*100, count)
bars = await self.ib.reqHistoricalDataAsync(
contract,
endDateTime=end_dt,
formatDate=2,
# time history length values format:
# ``durationStr=integer{SPACE}unit (S|D|W|M|Y)``
# OHLC sampling values:
# 1 secs, 5 secs, 10 secs, 15 secs, 30 secs, 1 min, 2 mins,
# 3 mins, 5 mins, 10 mins, 15 mins, 20 mins, 30 mins,
# 1 hour, 2 hours, 3 hours, 4 hours, 8 hours, 1 day, 1W, 1M
barSizeSetting=bar_size,
# barSizeSetting='1 secs',
# time history length values format:
# ``durationStr=integer{SPACE}unit (S|D|W|M|Y)``
durationStr=duration,
# durationStr='{count} S'.format(count=15000 * 5),
# durationStr='{count} D'.format(count=1),
# barSizeSetting='5 secs',
durationStr='{count} S'.format(count=period_count),
# barSizeSetting='5 secs',
barSizeSetting='1 secs',
# barSizeSetting='1 min',
# always use extended hours
useRTH=False,
@ -404,21 +346,11 @@ class Client:
# whatToShow='TRADES',
)
if not bars:
# NOTE: there's 2 cases here to handle (and this should be
# read alongside the implementation of
# ``.reqHistoricalDataAsync()``):
# - no data is returned for the period likely due to
# a weekend, holiday or other non-trading period prior to
# ``end_dt`` which exceeds the ``duration``,
# - a timeout occurred in which case insync internals return
# an empty list thing with bars.clear()...
return [], np.empty(0), dt_duration
# TODO: we could maybe raise ``NoData`` instead if we
# rewrite the method in the first case? right now there's no
# way to detect a timeout.
# TODO: raise underlying error here
raise ValueError(f"No bars retreived for {fqsn}?")
nparr = bars_to_np(bars)
return bars, nparr, dt_duration
return bars, nparr
async def con_deats(
self,
@ -432,15 +364,7 @@ class Client:
futs.append(self.ib.reqContractDetailsAsync(con))
# batch request all details
try:
results = await asyncio.gather(*futs)
except RequestError as err:
msg = err.message
if (
'No security definition' in msg
):
log.warning(f'{msg}: {contracts}')
return {}
# one set per future result
details = {}
@ -449,11 +373,20 @@ class Client:
# XXX: if there is more then one entry in the details list
# then the contract is so called "ambiguous".
for d in details_set:
con = d.contract
# nested dataclass we probably don't need and that won't
# IPC serialize..
key = '.'.join([
con.symbol,
con.primaryExchange or con.exchange,
])
expiry = con.lastTradeDateOrContractMonth
if expiry:
key += f'.{expiry}'
# nested dataclass we probably don't need and that
# won't IPC serialize..
d.secIdList = ''
key, calc_price = con2fqsn(d.contract)
details[key] = d
return details
@ -483,20 +416,17 @@ class Client:
self,
pattern: str,
# how many contracts to search "up to"
upto: int = 16,
upto: int = 3,
asdicts: bool = True,
) -> dict[str, ContractDetails]:
# TODO add search though our adhoc-locally defined symbol set
# for futes/cmdtys/
try:
results = await self.search_stocks(
pattern,
upto=upto,
)
except ConnectionError:
return {}
for key, deats in results.copy().items():
@ -507,54 +437,21 @@ class Client:
if sectype == 'IND':
results[f'{sym}.IND'] = tract
results.pop(key)
# exch = tract.exchange
# XXX: add back one of these to get the weird deadlock
# on the debugger from root without the latest
# maybe_wait_for_debugger() fix in the `open_context()`
# exit.
# assert 0
# if con.exchange not in _exch_skip_list:
exch = tract.exchange
if exch not in _exch_skip_list:
# try to lookup any contracts from our adhoc set
# since often the exchange/venue is named slightly
# different (eg. BRR.CMECRYPTO` instead of just
# `.CME`).
info = _adhoc_symbol_map.get(sym)
if info:
con_kwargs, bars_kwargs = info
exch = con_kwargs['exchange']
if exch in _futes_venues:
# try get all possible contracts for symbol as per,
# https://interactivebrokers.github.io/tws-api/basic_contracts.html#fut
con = ibis.Future(
symbol=sym,
exchange=exch,
)
# TODO: make this work, think it's something to do
# with the qualify flag.
# cons = await self.find_contracts(
# contract=con,
# err_on_qualify=False,
# )
# if cons:
try:
all_deats = await self.con_deats([con])
results |= all_deats
# forex pairs
elif sectype == 'CASH':
dst, src = tract.localSymbol.split('.')
pair_key = "/".join([dst, src])
exch = tract.exchange.lower()
results[f'{pair_key}.{exch}'] = tract
results.pop(key)
# XXX: again seems to trigger the weird tractor
# bug with the debugger..
# assert 0
except RequestError as err:
log.warning(err.message)
return results
@ -586,19 +483,13 @@ class Client:
return con
async def get_con(
self,
conid: int,
) -> Contract:
return await self.ib.qualifyContractsAsync(
ibis.Contract(conId=conid)
)
def parse_patt2fqsn(
async def find_contract(
self,
pattern: str,
currency: str = 'USD',
**kwargs,
) -> tuple[str, str, str, str]:
) -> Contract:
# TODO: we can't use this currently because
# ``wrapper.starTicker()`` currently cashes ticker instances
@ -611,30 +502,12 @@ class Client:
# XXX UPDATE: we can probably do the tick/trades scraping
# inside our eventkit handler instead to bypass this entirely?
currency = ''
# fqsn parsing stage
# ------------------
if '.ib' in pattern:
from ..data._source import unpack_fqsn
_, symbol, expiry = unpack_fqsn(pattern)
broker, symbol, expiry = unpack_fqsn(pattern)
else:
symbol = pattern
expiry = ''
# another hack for forex pairs lul.
if (
'.idealpro' in symbol
# or '/' in symbol
):
exch = 'IDEALPRO'
symbol = symbol.removesuffix('.idealpro')
if '/' in symbol:
symbol, currency = symbol.split('/')
else:
# TODO: yes, a cache..
# try:
# # give the cache a go
# return self._contracts[symbol]
@ -645,80 +518,45 @@ class Client:
symbol, _, expiry = symbol.rpartition('.')
# use heuristics to figure out contract "type"
symbol, exch = symbol.upper().rsplit('.', maxsplit=1)
sym, exch = symbol.upper().rsplit('.', maxsplit=1)
return symbol, currency, exch, expiry
async def find_contracts(
self,
pattern: Optional[str] = None,
contract: Optional[Contract] = None,
qualify: bool = True,
err_on_qualify: bool = True,
) -> Contract:
if pattern is not None:
symbol, currency, exch, expiry = self.parse_patt2fqsn(
pattern,
)
sectype = ''
else:
assert contract
symbol = contract.symbol
sectype = contract.secType
exch = contract.exchange or contract.primaryExchange
expiry = contract.lastTradeDateOrContractMonth
currency = contract.currency
# contract searching stage
# ------------------------
qualify: bool = True
# futes
if exch in _futes_venues:
if expiry:
# get the "front" contract
con = await self.get_fute(
symbol=symbol,
contract = await self.get_fute(
symbol=sym,
exchange=exch,
expiry=expiry,
)
else:
# get the "front" contract
con = await self.get_fute(
symbol=symbol,
contract = await self.get_fute(
symbol=sym,
exchange=exch,
front=True,
)
elif (
exch in ('IDEALPRO')
or sectype == 'CASH'
):
# if '/' in symbol:
# currency = ''
# symbol, currency = symbol.split('/')
qualify = False
elif exch in ('FOREX'):
currency = ''
symbol, currency = sym.split('/')
con = ibis.Forex(
pair=''.join((symbol, currency)),
symbol=symbol,
currency=currency,
)
con.bars_kwargs = {'whatToShow': 'MIDPOINT'}
# commodities
elif exch == 'CMDTY': # eg. XAUUSD.CMDTY
con_kwargs, bars_kwargs = _adhoc_symbol_map[symbol]
con_kwargs, bars_kwargs = _adhoc_cmdty_data_map[sym]
con = ibis.Commodity(**con_kwargs)
con.bars_kwargs = bars_kwargs
# crypto$
elif exch == 'PAXOS': # btc.paxos
con = ibis.Crypto(
symbol=symbol,
currency=currency,
)
# stonks
else:
# TODO: metadata system for all these exchange rules..
@ -731,61 +569,41 @@ class Client:
exch = 'SMART'
else:
# XXX: order is super important here since
# a primary == 'SMART' won't ever work.
primaryExchange = exch
exch = 'SMART'
primaryExchange = exch
con = ibis.Stock(
symbol=symbol,
symbol=sym,
exchange=exch,
primaryExchange=primaryExchange,
currency=currency,
)
exch = 'SMART' if not exch else exch
contracts = [con]
if qualify:
try:
contracts = await self.ib.qualifyContractsAsync(con)
except RequestError as err:
msg = err.message
if (
'No security definition' in msg
and not err_on_qualify
):
log.warning(
f'Could not find def for {con}')
return None
exch = 'SMART' if not exch else exch
if qualify:
contract = (await self.ib.qualifyContractsAsync(con))[0]
else:
raise
if not contracts:
assert contract
except IndexError:
raise ValueError(f"No contract could be found {con}")
# pack all contracts into cache
for tract in contracts:
exch: str = tract.primaryExchange or tract.exchange or exch
pattern = f'{symbol}.{exch}'
expiry = tract.lastTradeDateOrContractMonth
# add an entry with expiry suffix if available
if expiry:
pattern += f'.{expiry}'
self._contracts[pattern] = contract
self._contracts[pattern.lower()] = tract
# add an aditional entry with expiry suffix if available
conexp = contract.lastTradeDateOrContractMonth
if conexp:
self._contracts[pattern + f'.{conexp}'] = contract
return contracts
return contract
async def get_head_time(
self,
fqsn: str,
contract: Contract,
) -> datetime:
'''
Return the first datetime stamp for ``contract``.
"""Return the first datetime stamp for ``contract``.
'''
contract = (await self.find_contracts(fqsn))[0]
"""
return await self.ib.reqHeadTimeStampAsync(
contract,
whatToShow='TRADES',
@ -796,10 +614,9 @@ class Client:
async def get_sym_details(
self,
symbol: str,
) -> tuple[Contract, Ticker, ContractDetails]:
contract = (await self.find_contracts(symbol))[0]
contract = await self.find_contract(symbol)
ticker: Ticker = self.ib.reqMktData(
contract,
snapshot=True,
@ -855,7 +672,9 @@ class Client:
# async to be consistent for the client proxy, and cuz why not.
def submit_limit(
self,
oid: str, # ignored since doesn't support defining your own
# ignored since ib doesn't support defining your
# own order id
oid: str,
symbol: str,
price: float,
action: str,
@ -871,9 +690,6 @@ class Client:
'''
Place an order and return integer request id provided by client.
Relevant docs:
- https://interactivebrokers.github.io/tws-api/order_limitations.html
'''
try:
contract = self._contracts[symbol]
@ -899,9 +715,6 @@ class Client:
optOutSmartRouting=True,
routeMarketableToBbo=True,
designatedLocation='SMART',
# TODO: make all orders GTC?
# https://interactivebrokers.github.io/tws-api/classIBApi_1_1Order.html#a95539081751afb9980f4c6bd1655a6ba
# goodTillDate=f"yyyyMMdd-HH:mm:ss",
),
)
except AssertionError: # errrg insync..
@ -991,73 +804,6 @@ class Client:
return self.ib.positions(account=account)
def con2fqsn(
con: Contract,
_cache: dict[int, (str, bool)] = {}
) -> tuple[str, bool]:
'''
Convert contracts to fqsn-style strings to be used both in symbol-search
matching and as feed tokens passed to the front end data deed layer.
Previously seen contracts are cached by id.
'''
# should be real volume for this contract by default
calc_price = False
if con.conId:
try:
return _cache[con.conId]
except KeyError:
pass
suffix = con.primaryExchange or con.exchange
symbol = con.symbol
expiry = con.lastTradeDateOrContractMonth or ''
match con:
case Option():
# TODO: option symbol parsing and sane display:
symbol = con.localSymbol.replace(' ', '')
case ibis.Commodity():
# commodities and forex don't have an exchange name and
# no real volume so we have to calculate the price
suffix = con.secType
# no real volume on this tract
calc_price = True
case ibis.Forex() | ibis.Contract(secType='CASH'):
dst, src = con.localSymbol.split('.')
symbol = ''.join([dst, src])
suffix = con.exchange
# no real volume on forex feeds..
calc_price = True
if not suffix:
entry = _adhoc_symbol_map.get(
con.symbol or con.localSymbol
)
if entry:
meta, kwargs = entry
cid = meta.get('conId')
if cid:
assert con.conId == meta['conId']
suffix = meta['exchange']
# append a `.<suffix>` to the returned symbol
# key for derivatives that normally is the expiry
# date key.
if expiry:
suffix += f'.{expiry}'
fqsn_key = '.'.join((symbol, suffix)).lower()
_cache[con.conId] = fqsn_key, calc_price
return fqsn_key, calc_price
# per-actor API ep caching
_client_cache: dict[tuple[str, int], Client] = {}
_scan_ignore: set[tuple[str, int]] = set()
@ -1065,23 +811,10 @@ _scan_ignore: set[tuple[str, int]] = set()
def get_config() -> dict[str, Any]:
conf, path = config.load('brokers')
conf, path = config.load()
section = conf.get('ib')
accounts = section.get('accounts')
if not accounts:
raise ValueError(
'brokers.toml -> `ib.accounts` must be defined\n'
f'location: {path}'
)
names = list(accounts.keys())
accts = section['accounts'] = bidict(accounts)
log.info(
f'brokers.toml defines {len(accts)} accounts: '
f'{pformat(names)}'
)
if section is None:
log.warning(f'No config section found for ib in {path}')
return {}
@ -1103,7 +836,6 @@ async def load_aio_clients(
# retry a few times to get the client going..
connect_retries: int = 3,
connect_timeout: float = 0.5,
disconnect_on_exit: bool = True,
) -> dict[str, Client]:
'''
@ -1176,12 +908,6 @@ async def load_aio_clients(
# careful.
timeout=connect_timeout,
)
# create and cache client
client = Client(ib)
# update all actor-global caches
log.info(f"Caching client for {sockaddr}")
_client_cache[sockaddr] = client
break
except (
@ -1205,9 +931,21 @@ async def load_aio_clients(
log.warning(
f'Failed to connect on {port} for {i} time, retrying...')
# create and cache client
client = Client(ib)
# Pre-collect all accounts available for this
# connection and map account names to this client
# instance.
pps = ib.positions()
if pps:
for pp in pps:
accounts_found[
accounts_def.inverse[pp.account]
] = client
# if there are accounts without positions we should still
# register them for this client
for value in ib.accountValues():
acct_number = value.account
@ -1228,6 +966,10 @@ async def load_aio_clients(
f'{pformat(accounts_found)}'
)
# update all actor-global caches
log.info(f"Caching client for {sockaddr}")
_client_cache[sockaddr] = client
# XXX: why aren't we just updating this directy above
# instead of using the intermediary `accounts_found`?
_accounts2clients.update(accounts_found)
@ -1245,11 +987,10 @@ async def load_aio_clients(
finally:
# TODO: for re-scans we'll want to not teardown clients which
# are up and stable right?
if disconnect_on_exit:
for acct, client in _accounts2clients.items():
log.info(f'Disconnecting {acct}@{client}')
client.ib.disconnect()
_client_cache.pop((host, port), None)
_client_cache.pop((host, port))
async def load_clients_for_trio(
@ -1278,6 +1019,9 @@ async def load_clients_for_trio(
await asyncio.sleep(float('inf'))
_proxies: dict[str, MethodProxy] = {}
@acm
async def open_client_proxies() -> tuple[
dict[str, MethodProxy],
@ -1285,6 +1029,7 @@ async def open_client_proxies() -> tuple[
]:
async with (
tractor.trionics.maybe_open_context(
# acm_func=open_client_proxies,
acm_func=tractor.to_asyncio.open_channel_from,
kwargs={'target': load_clients_for_trio},
@ -1299,14 +1044,13 @@ async def open_client_proxies() -> tuple[
if cache_hit:
log.info(f'Re-using cached clients: {clients}')
proxies = {}
for acct_name, client in clients.items():
proxy = await stack.enter_async_context(
open_client_proxy(client),
)
proxies[acct_name] = proxy
_proxies[acct_name] = proxy
yield proxies, clients
yield _proxies, clients
def get_preferred_data_client(
@ -1455,13 +1199,11 @@ async def open_client_proxy(
event_table = {}
async with (
to_asyncio.open_channel_from(
open_aio_client_method_relay,
client=client,
event_consumers=event_table,
) as (first, chan),
trio.open_nursery() as relay_n,
):

File diff suppressed because it is too large Load Diff

View File

@ -22,7 +22,6 @@ import asyncio
from contextlib import asynccontextmanager as acm
from dataclasses import asdict
from datetime import datetime
from functools import partial
from math import isnan
import time
from typing import (
@ -39,14 +38,10 @@ import tractor
import trio
from trio_typing import TaskStatus
from .._util import (
NoData,
DataUnavailable,
SymbolNotFound,
)
from piker.data._sharedmem import ShmArray
from .._util import SymbolNotFound, NoData
from .api import (
# _adhoc_futes_set,
con2fqsn,
_adhoc_futes_set,
log,
load_aio_clients,
ibis,
@ -107,7 +102,7 @@ async def open_data_client() -> MethodProxy:
@acm
async def open_history_client(
fqsn: str,
symbol: str,
) -> tuple[Callable, int]:
'''
@ -115,65 +110,26 @@ async def open_history_client(
that takes in ``pendulum.datetime`` and returns ``numpy`` arrays.
'''
# TODO:
# - add logic to handle tradable hours and only grab
# valid bars in the range?
# - we want to avoid overrunning the underlying shm array buffer and
# we should probably calc the number of calls to make depending on
# that until we have the `marketstore` daemon in place in which case
# the shm size will be driven by user config and available sys
# memory.
async with open_data_client() as proxy:
max_timeout: float = 2.
mean: float = 0
count: int = 0
head_dt = await proxy.get_head_time(fqsn=fqsn)
async def get_hist(
timeframe: float,
end_dt: Optional[datetime] = None,
start_dt: Optional[datetime] = None,
) -> tuple[np.ndarray, str]:
nonlocal max_timeout, mean, count
query_start = time.time()
out, timedout = await get_bars(
proxy,
fqsn,
timeframe,
end_dt=end_dt,
)
latency = time.time() - query_start
if (
not timedout
# and latency <= max_timeout
):
count += 1
mean += latency / count
print(
f'HISTORY FRAME QUERY LATENCY: {latency}\n'
f'mean: {mean}'
)
out, fails = await get_bars(proxy, symbol, end_dt=end_dt)
if (
out is None
):
# TODO: add logic here to handle tradable hours and only grab
# valid bars in the range
if out is None:
# could be trying to retreive bars over weekend
log.error(f"Can't grab bars starting at {end_dt}!?!?")
raise NoData(
f'{end_dt}',
# frame_size=2000,
frame_size=2000,
)
if (
end_dt and end_dt <= head_dt
):
raise DataUnavailable(f'First timestamp is {head_dt}')
bars, bars_array, first_dt, last_dt = out
# volume cleaning since there's -ve entries,
@ -188,7 +144,7 @@ async def open_history_client(
# quite sure why.. needs some tinkering and probably
# a lookthrough of the ``ib_insync`` machinery, for eg. maybe
# we have to do the batch queries on the `asyncio` side?
yield get_hist, {'erlangs': 1, 'rate': 3}
yield get_hist, {'erlangs': 1, 'rate': 6}
_pacing: str = (
@ -197,19 +153,96 @@ _pacing: str = (
)
async def wait_on_data_reset(
async def get_bars(
proxy: MethodProxy,
reset_type: str = 'data',
timeout: float = 16,
fqsn: str,
task_status: TaskStatus[
tuple[
trio.CancelScope,
trio.Event,
]
] = trio.TASK_STATUS_IGNORED,
) -> bool:
# blank to start which tells ib to look up the latest datum
end_dt: str = '',
) -> (dict, np.ndarray):
'''
Retrieve historical data from a ``trio``-side task using
a ``MethoProxy``.
'''
fails = 0
bars: Optional[list] = None
first_dt: datetime = None
last_dt: datetime = None
if end_dt:
last_dt = pendulum.from_timestamp(end_dt.timestamp())
for _ in range(10):
try:
out = await proxy.bars(
fqsn=fqsn,
end_dt=end_dt,
)
if out:
bars, bars_array = out
else:
await tractor.breakpoint()
if bars_array is None:
raise SymbolNotFound(fqsn)
first_dt = pendulum.from_timestamp(
bars[0].date.timestamp())
last_dt = pendulum.from_timestamp(
bars[-1].date.timestamp())
time = bars_array['time']
assert time[-1] == last_dt.timestamp()
assert time[0] == first_dt.timestamp()
log.info(
f'{len(bars)} bars retreived for {first_dt} -> {last_dt}'
)
return (bars, bars_array, first_dt, last_dt), fails
except RequestError as err:
msg = err.message
# why do we always need to rebind this?
# _err = err
if 'No market data permissions for' in msg:
# TODO: signalling for no permissions searches
raise NoData(
f'Symbol: {fqsn}',
)
elif (
err.code == 162
and 'HMDS query returned no data' in err.message
):
# XXX: this is now done in the storage mgmt layer
# and we shouldn't implicitly decrement the frame dt
# index since the upper layer may be doing so
# concurrently and we don't want to be delivering frames
# that weren't asked for.
log.warning(
f'NO DATA found ending @ {end_dt}\n'
)
# try to decrement start point and look further back
# end_dt = last_dt = last_dt.subtract(seconds=2000)
raise NoData(
f'Symbol: {fqsn}',
frame_size=2000,
)
elif _pacing in msg:
log.warning(
'History throttle rate reached!\n'
'Resetting farms with `ctrl-alt-f` hack\n'
)
# TODO: we might have to put a task lock around this
# method..
hist_ev = proxy.status_event(
@ -225,259 +258,144 @@ async def wait_on_data_reset(
# live_ev = proxy.status_event(
# 'Market data farm connection is OK:usfuture'
# )
# try to wait on the reset event(s) to arrive, a timeout
# will trigger a retry up to 6 times (for now).
tries: int = 2
timeout: float = 10
done = trio.Event()
with trio.move_on_after(timeout) as cs:
task_status.started((cs, done))
# try 3 time with a data reset then fail over to
# a connection reset.
for i in range(1, tries):
log.warning('Sending DATA RESET request')
res = await data_reset_hack(reset_type=reset_type)
await data_reset_hack(reset_type='data')
if not res:
log.warning(
'NO VNC DETECTED!\n'
'Manually press ctrl-alt-f on your IB java app'
)
done.set()
return False
# TODO: not sure if waiting on other events
# is all that useful here or not.
# - in theory you could wait on one of the ones above first
# to verify the reset request was sent?
# - we need the same for real-time quote feeds which can
# sometimes flake out and stop delivering..
with trio.move_on_after(timeout) as cs:
for name, ev in [
# TODO: not sure if waiting on other events
# is all that useful here or not. in theory
# you could wait on one of the ones above
# first to verify the reset request was
# sent?
('history', hist_ev),
]:
await ev.wait()
log.info(f"{name} DATA RESET")
done.set()
return True
break
if cs.cancel_called:
if cs.cancelled_caught:
fails += 1
log.warning(
'Data reset task canceled?'
f'Data reset {name} timeout, retrying {i}.'
)
done.set()
return False
_data_resetter_task: trio.Task | None = None
async def get_bars(
proxy: MethodProxy,
fqsn: str,
timeframe: int,
# blank to start which tells ib to look up the latest datum
end_dt: str = '',
# TODO: make this more dynamic based on measured frame rx latency?
# how long before we trigger a feed reset (seconds)
feed_reset_timeout: float = 3,
# how many days to subtract before giving up on further
# history queries for instrument, presuming that most don't
# not trade for a week XD
max_nodatas: int = 6,
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
) -> (dict, np.ndarray):
'''
Retrieve historical data from a ``trio``-side task using
a ``MethoProxy``.
'''
global _data_resetter_task
nodatas_count: int = 0
data_cs: trio.CancelScope | None = None
result: tuple[
ibis.objects.BarDataList,
np.ndarray,
datetime,
datetime,
] | None = None
result_ready = trio.Event()
async def query():
nonlocal result, data_cs, end_dt, nodatas_count
while True:
try:
out = await proxy.bars(
fqsn=fqsn,
end_dt=end_dt,
sample_period_s=timeframe,
# ideally we cancel the request just before we
# cancel on the ``trio``-side and trigger a data
# reset hack.. the problem is there's no way (with
# current impl) to detect a cancel case.
# timeout=timeout,
)
if out is None:
raise NoData(f'{end_dt}')
bars, bars_array, dt_duration = out
if not bars:
log.warning(
f'History is blank for {dt_duration} from {end_dt}'
)
end_dt -= dt_duration
continue
else:
if bars_array is None:
raise SymbolNotFound(fqsn)
log.warning('Sending CONNECTION RESET')
await data_reset_hack(reset_type='connection')
first_dt = pendulum.from_timestamp(
bars[0].date.timestamp())
with trio.move_on_after(timeout) as cs:
for name, ev in [
# TODO: not sure if waiting on other events
# is all that useful here or not. in theory
# you could wait on one of the ones above
# first to verify the reset request was
# sent?
('history', hist_ev),
]:
await ev.wait()
log.info(f"{name} DATA RESET")
last_dt = pendulum.from_timestamp(
bars[-1].date.timestamp())
time = bars_array['time']
assert time[-1] == last_dt.timestamp()
assert time[0] == first_dt.timestamp()
log.info(
f'{len(bars)} bars retreived {first_dt} -> {last_dt}'
)
if data_cs:
data_cs.cancel()
result = (bars, bars_array, first_dt, last_dt)
# signal data reset loop parent task
result_ready.set()
return result
except RequestError as err:
msg = err.message
if 'No market data permissions for' in msg:
# TODO: signalling for no permissions searches
raise NoData(
f'Symbol: {fqsn}',
)
elif err.code == 162:
if (
'HMDS query returned no data' in msg
):
# XXX: this is now done in the storage mgmt
# layer and we shouldn't implicitly decrement
# the frame dt index since the upper layer may
# be doing so concurrently and we don't want to
# be delivering frames that weren't asked for.
# try to decrement start point and look further back
# end_dt = end_dt.subtract(seconds=2000)
logmsg = "SUBTRACTING DAY from DT index"
if end_dt is not None:
end_dt = end_dt.subtract(days=1)
elif end_dt is None:
end_dt = pendulum.now().subtract(days=1)
log.warning(
f'NO DATA found ending @ {end_dt}\n'
+ logmsg
)
if nodatas_count >= max_nodatas:
raise DataUnavailable(
f'Presuming {fqsn} has no further history '
f'after {max_nodatas} tries..'
)
nodatas_count += 1
continue
elif 'API historical data query cancelled' in err.message:
log.warning(
'Query cancelled by IB (:eyeroll:):\n'
f'{err.message}'
)
continue
elif (
'Trading TWS session is connected from a different IP'
in err.message
):
log.warning("ignoring ip address warning")
continue
# XXX: more or less same as above timeout case
elif _pacing in msg:
log.warning(
'History throttle rate reached!\n'
'Resetting farms with `ctrl-alt-f` hack\n'
)
# cancel any existing reset task
if data_cs:
data_cs.cancel()
# spawn new data reset task
data_cs, reset_done = await nurse.start(
partial(
wait_on_data_reset,
proxy,
timeout=float('inf'),
reset_type='connection'
)
)
continue
if cs.cancelled_caught:
fails += 1
log.warning('Data CONNECTION RESET timeout!?')
else:
raise
# TODO: make this global across all history task/requests
# such that simultaneous symbol queries don't try data resettingn
# too fast..
unset_resetter: bool = False
async with trio.open_nursery() as nurse:
return None, None
# else: # throttle wasn't fixed so error out immediately
# raise _err
# start history request that we allow
# to run indefinitely until a result is acquired
nurse.start_soon(query)
# start history reset loop which waits up to the timeout
# for a result before triggering a data feed reset.
while not result_ready.is_set():
async def backfill_bars(
with trio.move_on_after(feed_reset_timeout):
await result_ready.wait()
break
fqsn: str,
shm: ShmArray, # type: ignore # noqa
if _data_resetter_task:
# don't double invoke the reset hack if another
# requester task already has it covered.
# TODO: we want to avoid overrunning the underlying shm array buffer
# and we should probably calc the number of calls to make depending
# on that until we have the `marketstore` daemon in place in which
# case the shm size will be driven by user config and available sys
# memory.
count: int = 16,
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
) -> None:
'''
Fill historical bars into shared mem / storage afap.
TODO: avoid pacing constraints:
https://github.com/pikers/piker/issues/128
'''
# last_dt1 = None
last_dt = None
with trio.CancelScope() as cs:
async with open_data_client() as proxy:
out, fails = await get_bars(proxy, fqsn)
if out is None:
raise RuntimeError("Could not pull currrent history?!")
(first_bars, bars_array, first_dt, last_dt) = out
vlm = bars_array['volume']
vlm[vlm < 0] = 0
last_dt = first_dt
# write historical data to buffer
shm.push(bars_array)
task_status.started(cs)
i = 0
while i < count:
out, fails = await get_bars(proxy, fqsn, end_dt=first_dt)
if out is None:
# could be trying to retreive bars over weekend
# TODO: add logic here to handle tradable hours and
# only grab valid bars in the range
log.error(f"Can't grab bars starting at {first_dt}!?!?")
# XXX: get_bars() should internally decrement dt by
# 2k seconds and try again.
continue
else:
_data_resetter_task = trio.lowlevel.current_task()
unset_resetter = True
# spawn new data reset task
data_cs, reset_done = await nurse.start(
partial(
wait_on_data_reset,
proxy,
timeout=float('inf'),
)
)
# sync wait on reset to complete
await reset_done.wait()
(first_bars, bars_array, first_dt, last_dt) = out
# last_dt1 = last_dt
# last_dt = first_dt
_data_resetter_task = None if unset_resetter else _data_resetter_task
return result, data_cs is not None
# volume cleaning since there's -ve entries,
# wood luv to know what crookery that is..
vlm = bars_array['volume']
vlm[vlm < 0] = 0
# TODO we should probably dig into forums to see what peeps
# think this data "means" and then use it as an indicator of
# sorts? dinkus has mentioned that $vlms for the day dont'
# match other platforms nor the summary stat tws shows in
# the monitor - it's probably worth investigating.
shm.push(bars_array, prepend=True)
i += 1
asset_type_map = {
@ -495,7 +413,6 @@ asset_type_map = {
'WAR': 'warrant',
'IOPT': 'warran',
'BAG': 'bag',
'CRYPTO': 'crypto', # bc it's diff then fiat?
# 'NEWS': 'news',
}
@ -535,9 +452,7 @@ async def _setup_quote_stream(
to_trio.send_nowait(None)
async with load_aio_clients(
disconnect_on_exit=False,
) as accts2clients:
async with load_aio_clients() as accts2clients:
caccount_name, client = get_preferred_data_client(accts2clients)
contract = contract or (await client.find_contract(symbol))
ticker: Ticker = client.ib.reqMktData(contract, ','.join(opts))
@ -583,11 +498,10 @@ async def _setup_quote_stream(
# Manually do the dereg ourselves.
teardown()
except trio.WouldBlock:
# log.warning(
# f'channel is blocking symbol feed for {symbol}?'
# f'\n{to_trio.statistics}'
# )
pass
log.warning(
f'channel is blocking symbol feed for {symbol}?'
f'\n{to_trio.statistics}'
)
# except trio.WouldBlock:
# # for slow debugging purposes to avoid clobbering prompt
@ -617,8 +531,7 @@ async def open_aio_quote_stream(
from_aio = _quote_streams.get(symbol)
if from_aio:
# if we already have a cached feed deliver a rx side clone
# to consumer
# if we already have a cached feed deliver a rx side clone to consumer
async with broadcast_receiver(
from_aio,
2**6,
@ -640,17 +553,38 @@ async def open_aio_quote_stream(
# TODO: cython/mypyc/numba this!
# or we can at least cache a majority of the values
# except for the ones we expect to change?..
def normalize(
ticker: Ticker,
calc_price: bool = False
) -> dict:
# should be real volume for this contract by default
calc_price = False
# check for special contract types
con = ticker.contract
fqsn, calc_price = con2fqsn(con)
if type(con) in (
ibis.Commodity,
ibis.Forex,
):
# commodities and forex don't have an exchange name and
# no real volume so we have to calculate the price
suffix = con.secType
# no real volume on this tract
calc_price = True
else:
suffix = con.primaryExchange
if not suffix:
suffix = con.exchange
# append a `.<suffix>` to the returned symbol
# key for derivatives that normally is the expiry
# date key.
expiry = con.lastTradeDateOrContractMonth
if expiry:
suffix += f'.{expiry}'
# convert named tuples to dicts so we send usable keys
new_ticks = []
@ -682,7 +616,9 @@ def normalize(
# generate fqsn with possible specialized suffix
# for derivatives, note the lowercase.
data['symbol'] = data['fqsn'] = fqsn
data['symbol'] = data['fqsn'] = '.'.join(
(con.symbol, suffix)
).lower()
# convert named tuples to dicts for transport
tbts = data.get('tickByTicks')
@ -747,13 +683,6 @@ async def stream_quotes(
# TODO: more consistent field translation
atype = syminfo['asset_type'] = asset_type_map[syminfo['secType']]
if atype in {
'forex',
'index',
'commodity',
}:
syminfo['no_vlm'] = True
# for stocks it seems TWS reports too small a tick size
# such that you can't submit orders with that granularity?
min_tick = 0.01 if atype == 'stock' else 0
@ -780,9 +709,9 @@ async def stream_quotes(
},
}
return init_msgs, syminfo
return init_msgs
init_msgs, syminfo = mk_init_msgs()
init_msgs = mk_init_msgs()
# TODO: we should instead spawn a task that waits on a feed to start
# and let it wait indefinitely..instead of this hard coded stuff.
@ -791,14 +720,7 @@ async def stream_quotes(
# it might be outside regular trading hours so see if we can at
# least grab history.
if (
isnan(first_ticker.last)
and type(first_ticker.contract) not in (
ibis.Commodity,
ibis.Forex,
ibis.Crypto,
)
):
if isnan(first_ticker.last):
task_status.started((init_msgs, first_quote))
# it's not really live but this will unblock
@ -809,77 +731,41 @@ async def stream_quotes(
await trio.sleep_forever()
return # we never expect feed to come up?
cs: Optional[trio.CancelScope] = None
startup: bool = True
while (
startup
or cs.cancel_called
):
with trio.CancelScope() as cs:
async with (
trio.open_nursery() as nurse,
open_aio_quote_stream(
async with open_aio_quote_stream(
symbol=sym,
contract=con,
) as stream,
):
) as stream:
# ugh, clear ticks since we've consumed them
# (ahem, ib_insync is stateful trash)
first_ticker.ticks = []
# only on first entry at feed boot up
if startup:
startup = False
task_status.started((init_msgs, first_quote))
# start a stream restarter task which monitors the
# data feed event.
async def reset_on_feed():
# TODO: this seems to be surpressed from the
# traceback in ``tractor``?
# assert 0
rt_ev = proxy.status_event(
'Market data farm connection is OK:usfarm'
)
await rt_ev.wait()
cs.cancel() # cancel called should now be set
nurse.start_soon(reset_on_feed)
async with aclosing(stream):
if syminfo.get('no_vlm', False):
# generally speaking these feeds don't
# include vlm data.
atype = syminfo['asset_type']
log.info(
f'No-vlm {sym}@{atype}, skipping quote poll'
)
else:
# wait for real volume on feed (trading might be
# closed)
if type(first_ticker.contract) not in (
ibis.Commodity,
ibis.Forex
):
# wait for real volume on feed (trading might be closed)
while True:
ticker = await stream.receive()
# for a real volume contract we rait for
# the first "real" trade to take place
# for a real volume contract we rait for the first
# "real" trade to take place
if (
# not calc_price
# and not ticker.rtTime
not ticker.rtTime
):
# spin consuming tickers until we
# get a real market datum
# spin consuming tickers until we get a real
# market datum
log.debug(f"New unsent ticker: {ticker}")
continue
else:
log.debug("Received first volume tick")
# ugh, clear ticks since we've
# consumed them (ahem, ib_insync is
# truly stateful trash)
log.debug("Received first real volume tick")
# ugh, clear ticks since we've consumed them
# (ahem, ib_insync is truly stateful trash)
ticker.ticks = []
# XXX: this works because we don't use
@ -919,9 +805,6 @@ async def data_reset_hack(
successful.
- other OS support?
- integration with ``ib-gw`` run in docker + Xorg?
- is it possible to offer a local server that can be accessed by
a client? Would be sure be handy for running native java blobs
that need to be wrangle.
'''
@ -952,10 +835,7 @@ async def data_reset_hack(
client.mouse.click()
client.keyboard.press('Ctrl', 'Alt', key) # keys are stacked
try:
await tractor.to_asyncio.run_task(vnc_click_hack)
except OSError:
return False
# we don't really need the ``xdotool`` approach any more B)
return True
@ -970,30 +850,14 @@ async def open_symbol_search(
# TODO: load user defined symbol set locally for fast search?
await ctx.started({})
async with (
open_client_proxies() as (proxies, clients),
open_data_client() as data_proxy,
):
async with open_data_client() as proxy:
async with ctx.open_stream() as stream:
# select a non-history client for symbol search to lighten
# the load in the main data node.
proxy = data_proxy
for name, proxy in proxies.items():
if proxy is data_proxy:
continue
break
ib_client = proxy._aio_ns.ib
log.info(f'Using {ib_client} for symbol search')
last = time.time()
async for pattern in stream:
log.info(f'received {pattern}')
now = time.time()
# this causes tractor hang...
# assert 0
async for pattern in stream:
log.debug(f'received {pattern}')
now = time.time()
assert pattern, 'IB can not accept blank search pattern'
@ -1007,14 +871,7 @@ async def open_symbol_search(
except trio.WouldBlock:
pass
if (
not pattern
or pattern.isspace()
# XXX: not sure if this is a bad assumption but it
# seems to make search snappier?
or len(pattern) < 1
):
if not pattern or pattern.isspace():
log.warning('empty pattern received, skipping..')
# TODO: *BUG* if nothing is returned here the client
@ -1029,7 +886,7 @@ async def open_symbol_search(
continue
log.info(f'searching for {pattern}')
log.debug(f'searching for {pattern}')
last = time.time()
@ -1040,8 +897,6 @@ async def open_symbol_search(
async def stash_results(target: Awaitable[list]):
stock_results.extend(await target)
for i in range(10):
with trio.move_on_after(3) as cs:
async with trio.open_nursery() as sn:
sn.start_soon(
stash_results,
@ -1054,26 +909,17 @@ async def open_symbol_search(
# trigger async request
await trio.sleep(0)
if cs.cancelled_caught:
log.warning(
f'Search timeout? {proxy._aio_ns.ib.client}'
# match against our ad-hoc set immediately
adhoc_matches = fuzzy.extractBests(
pattern,
list(_adhoc_futes_set),
score_cutoff=90,
)
continue
else:
break
# # match against our ad-hoc set immediately
# adhoc_matches = fuzzy.extractBests(
# pattern,
# list(_adhoc_futes_set),
# score_cutoff=90,
# )
# log.info(f'fuzzy matched adhocs: {adhoc_matches}')
# adhoc_match_results = {}
# if adhoc_matches:
# # TODO: do we need to pull contract details?
# adhoc_match_results = {i[0]: {} for i in
# adhoc_matches}
log.info(f'fuzzy matched adhocs: {adhoc_matches}')
adhoc_match_results = {}
if adhoc_matches:
# TODO: do we need to pull contract details?
adhoc_match_results = {i[0]: {} for i in adhoc_matches}
log.debug(f'fuzzy matching stocks {stock_results}')
stock_matches = fuzzy.extractBests(
@ -1082,8 +928,7 @@ async def open_symbol_search(
score_cutoff=50,
)
# matches = adhoc_match_results | {
matches = {
matches = adhoc_match_results | {
item[0]: {} for item in stock_matches
}
# TODO: we used to deliver contract details

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +0,0 @@
``kraken`` backend
------------------
though they don't have the most liquidity of all the cexes they sure are
accommodating to those of us who appreciate a little ``xmr``.
status
******
current support is *production grade* and both real-time data and order
management should be correct and fast. this backend is used by core devs
for live trading.
config
******
In order to get order mode support your ``brokers.toml``
needs to have something like the following:
.. code:: toml
[kraken]
accounts.spot = 'spot'
key_descr = "spot"
api_key = "69696969696969696696969696969696969696969696969696969696"
secret = "BOOBSBOOBSBOOBSBOOBSBOOBSSMBZ69696969696969669969696969696"
If everything works correctly you should see any current positions
loaded in the pps pane on chart load and you should also be able to
check your trade records in the file::
<pikerk_conf_dir>/ledgers/trades_kraken_spot.toml
An example ledger file will have entries written verbatim from the
trade events schema:
.. code:: toml
[TFJBKK-SMBZS-VJ4UWS]
ordertxid = "SMBZSA-7CNQU-3HWLNJ"
postxid = "SMBZSE-M7IF5-CFI7LT"
pair = "XXMRZEUR"
time = 1655691993.4133966
type = "buy"
ordertype = "limit"
price = "103.97000000"
cost = "499.99999977"
fee = "0.80000000"
vol = "4.80907954"
margin = "0.00000000"
misc = ""
your ``pps.toml`` file will have position entries like,
.. code:: toml
[kraken.spot."xmreur.kraken"]
size = 4.80907954
ppu = 103.97000000
bsuid = "XXMRZEUR"
clears = [
{ tid = "TFJBKK-SMBZS-VJ4UWS", cost = 0.8, price = 103.97, size = 4.80907954, dt = "2022-05-20T02:26:33.413397+00:00" },
]

View File

@ -1,61 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Kraken backend.
Sub-modules within break into the core functionalities:
- ``broker.py`` part for orders / trading endpoints
- ``feed.py`` for real-time data feed endpoints
- ``api.py`` for the core API machinery which is ``trio``-ized
wrapping around ``ib_insync``.
'''
from piker.log import get_logger
log = get_logger(__name__)
from .api import (
get_client,
)
from .feed import (
open_history_client,
open_symbol_search,
stream_quotes,
)
from .broker import (
trades_dialogue,
norm_trade_records,
)
__all__ = [
'get_client',
'trades_dialogue',
'open_history_client',
'open_symbol_search',
'stream_quotes',
'norm_trade_records',
]
# tractor RPC enable arg
__enable_modules__: list[str] = [
'api',
'feed',
'broker',
]

View File

@ -1,540 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Kraken web API wrapping.
'''
from contextlib import asynccontextmanager as acm
from datetime import datetime
import itertools
from typing import (
Any,
Optional,
Union,
)
import time
from bidict import bidict
import pendulum
import asks
from fuzzywuzzy import process as fuzzy
import numpy as np
import urllib.parse
import hashlib
import hmac
import base64
import trio
from piker import config
from piker.brokers._util import (
resproc,
SymbolNotFound,
BrokerError,
DataThrottle,
)
from piker.pp import Transaction
from . import log
# <uri>/<version>/
_url = 'https://api.kraken.com/0'
# Broker specific ohlc schema which includes a vwap field
_ohlc_dtype = [
('index', int),
('time', int),
('open', float),
('high', float),
('low', float),
('close', float),
('volume', float),
('count', int),
('bar_wap', float),
]
# UI components allow this to be declared such that additional
# (historical) fields can be exposed.
ohlc_dtype = np.dtype(_ohlc_dtype)
_show_wap_in_history = True
_symbol_info_translation: dict[str, str] = {
'tick_decimals': 'pair_decimals',
}
def get_config() -> dict[str, Any]:
conf, path = config.load()
section = conf.get('kraken')
if section is None:
log.warning(f'No config section found for kraken in {path}')
return {}
return section
def get_kraken_signature(
urlpath: str,
data: dict[str, Any],
secret: str
) -> str:
postdata = urllib.parse.urlencode(data)
encoded = (str(data['nonce']) + postdata).encode()
message = urlpath.encode() + hashlib.sha256(encoded).digest()
mac = hmac.new(base64.b64decode(secret), message, hashlib.sha512)
sigdigest = base64.b64encode(mac.digest())
return sigdigest.decode()
class InvalidKey(ValueError):
'''
EAPI:Invalid key
This error is returned when the API key used for the call is
either expired or disabled, please review the API key in your
Settings -> API tab of account management or generate a new one
and update your application.
'''
class Client:
# global symbol normalization table
_ntable: dict[str, str] = {}
_atable: bidict[str, str] = bidict()
def __init__(
self,
config: dict[str, str],
name: str = '',
api_key: str = '',
secret: str = ''
) -> None:
self._sesh = asks.Session(connections=4)
self._sesh.base_location = _url
self._sesh.headers.update({
'User-Agent':
'krakenex/2.1.0 (+https://github.com/veox/python3-krakenex)'
})
self.conf: dict[str, str] = config
self._pairs: list[str] = []
self._name = name
self._api_key = api_key
self._secret = secret
@property
def pairs(self) -> dict[str, Any]:
if self._pairs is None:
raise RuntimeError(
"Make sure to run `cache_symbols()` on startup!"
)
# retreive and cache all symbols
return self._pairs
async def _public(
self,
method: str,
data: dict,
) -> dict[str, Any]:
resp = await self._sesh.post(
path=f'/public/{method}',
json=data,
timeout=float('inf')
)
return resproc(resp, log)
async def _private(
self,
method: str,
data: dict,
uri_path: str
) -> dict[str, Any]:
headers = {
'Content-Type':
'application/x-www-form-urlencoded',
'API-Key':
self._api_key,
'API-Sign':
get_kraken_signature(uri_path, data, self._secret)
}
resp = await self._sesh.post(
path=f'/private/{method}',
data=data,
headers=headers,
timeout=float('inf')
)
return resproc(resp, log)
async def endpoint(
self,
method: str,
data: dict[str, Any]
) -> dict[str, Any]:
uri_path = f'/0/private/{method}'
data['nonce'] = str(int(1000*time.time()))
return await self._private(method, data, uri_path)
async def get_balances(
self,
) -> dict[str, float]:
'''
Return the set of asset balances for this account
by symbol.
'''
resp = await self.endpoint(
'Balance',
{},
)
by_bsuid = resp['result']
return {
self._atable[sym].lower(): float(bal)
for sym, bal in by_bsuid.items()
}
async def get_assets(self) -> dict[str, dict]:
resp = await self._public('Assets', {})
return resp['result']
async def cache_assets(self) -> None:
assets = self.assets = await self.get_assets()
for bsuid, info in assets.items():
self._atable[bsuid] = info['altname']
async def get_trades(
self,
fetch_limit: int = 10,
) -> dict[str, Any]:
'''
Get the trades (aka cleared orders) history from the rest endpoint:
https://docs.kraken.com/rest/#operation/getTradeHistory
'''
ofs = 0
trades_by_id: dict[str, Any] = {}
for i in itertools.count():
if i >= fetch_limit:
break
# increment 'ofs' pagination offset
ofs = i*50
resp = await self.endpoint(
'TradesHistory',
{'ofs': ofs},
)
by_id = resp['result']['trades']
trades_by_id.update(by_id)
# we can get up to 50 results per query
if (
len(by_id) < 50
):
err = resp.get('error')
if err:
raise BrokerError(err)
# we know we received the max amount of
# trade results so there may be more history.
# catch the end of the trades
count = resp['result']['count']
break
# santity check on update
assert count == len(trades_by_id.values())
return trades_by_id
async def get_xfers(
self,
asset: str,
src_asset: str = '',
) -> dict[str, Transaction]:
'''
Get asset balance transfer transactions.
Currently only withdrawals are supported.
'''
xfers: list[dict] = (await self.endpoint(
'WithdrawStatus',
{'asset': asset},
))['result']
# eg. resp schema:
# 'result': [{'method': 'Bitcoin', 'aclass': 'currency', 'asset':
# 'XXBT', 'refid': 'AGBJRMB-JHD2M4-NDI3NR', 'txid':
# 'b95d66d3bb6fd76cbccb93f7639f99a505cb20752c62ea0acc093a0e46547c44',
# 'info': 'bc1qc8enqjekwppmw3g80p56z5ns7ze3wraqk5rl9z',
# 'amount': '0.00300726', 'fee': '0.00001000', 'time':
# 1658347714, 'status': 'Success'}]}
trans: dict[str, Transaction] = {}
for entry in xfers:
# look up the normalized name
asset = self._atable[entry['asset']].lower()
# XXX: this is in the asset units (likely) so it isn't
# quite the same as a commisions cost necessarily..)
cost = float(entry['fee'])
tran = Transaction(
fqsn=asset + '.kraken',
tid=entry['txid'],
dt=pendulum.from_timestamp(entry['time']),
bsuid=f'{asset}{src_asset}',
size=-1*(
float(entry['amount'])
+
cost
),
# since this will be treated as a "sell" it
# shouldn't be needed to compute the be price.
price='NaN',
# XXX: see note above
cost=0,
)
trans[tran.tid] = tran
return trans
async def submit_limit(
self,
symbol: str,
price: float,
action: str,
size: float,
reqid: str = None,
validate: bool = False # set True test call without a real submission
) -> dict:
'''
Place an order and return integer request id provided by client.
'''
# Build common data dict for common keys from both endpoints
data = {
"pair": symbol,
"price": str(price),
"validate": validate
}
if reqid is None:
# Build order data for kraken api
data |= {
"ordertype": "limit",
"type": action,
"volume": str(size),
}
return await self.endpoint('AddOrder', data)
else:
# Edit order data for kraken api
data["txid"] = reqid
return await self.endpoint('EditOrder', data)
async def submit_cancel(
self,
reqid: str,
) -> dict:
'''
Send cancel request for order id ``reqid``.
'''
# txid is a transaction id given by kraken
return await self.endpoint('CancelOrder', {"txid": reqid})
async def symbol_info(
self,
pair: Optional[str] = None,
) -> dict[str, dict[str, str]]:
if pair is not None:
pairs = {'pair': pair}
else:
pairs = None # get all pairs
resp = await self._public('AssetPairs', pairs)
err = resp['error']
if err:
symbolname = pairs['pair'] if pair else None
raise SymbolNotFound(f'{symbolname}.kraken')
pairs = resp['result']
if pair is not None:
_, data = next(iter(pairs.items()))
return data
else:
return pairs
async def cache_symbols(
self,
) -> dict:
if not self._pairs:
self._pairs = await self.symbol_info()
ntable = {}
for restapikey, info in self._pairs.items():
ntable[restapikey] = ntable[info['wsname']] = info['altname']
self._ntable.update(ntable)
return self._pairs
async def search_symbols(
self,
pattern: str,
limit: int = None,
) -> dict[str, Any]:
if self._pairs is not None:
data = self._pairs
else:
data = await self.symbol_info()
matches = fuzzy.extractBests(
pattern,
data,
score_cutoff=50,
)
# repack in dict form
return {item[0]['altname']: item[0] for item in matches}
async def bars(
self,
symbol: str = 'XBTUSD',
# UTC 2017-07-02 12:53:20
since: Optional[Union[int, datetime]] = None,
count: int = 720, # <- max allowed per query
as_np: bool = True,
) -> dict:
if since is None:
since = pendulum.now('UTC').start_of('minute').subtract(
minutes=count).timestamp()
elif isinstance(since, int):
since = pendulum.from_timestamp(since).timestamp()
else: # presumably a pendulum datetime
since = since.timestamp()
# UTC 2017-07-02 12:53:20 is oldest seconds value
since = str(max(1499000000, int(since)))
json = await self._public(
'OHLC',
data={
'pair': symbol,
'since': since,
},
)
try:
res = json['result']
res.pop('last')
bars = next(iter(res.values()))
new_bars = []
first = bars[0]
last_nz_vwap = first[-3]
if last_nz_vwap == 0:
# use close if vwap is zero
last_nz_vwap = first[-4]
# convert all fields to native types
for i, bar in enumerate(bars):
# normalize weird zero-ed vwap values..cmon kraken..
# indicates vwap didn't change since last bar
vwap = float(bar.pop(-3))
if vwap != 0:
last_nz_vwap = vwap
if vwap == 0:
vwap = last_nz_vwap
# re-insert vwap as the last of the fields
bar.append(vwap)
new_bars.append(
(i,) + tuple(
ftype(bar[j]) for j, (name, ftype) in enumerate(
_ohlc_dtype[1:]
)
)
)
array = np.array(new_bars, dtype=_ohlc_dtype) if as_np else bars
return array
except KeyError:
errmsg = json['error'][0]
if 'not found' in errmsg:
raise SymbolNotFound(errmsg + f': {symbol}')
elif 'Too many requests' in errmsg:
raise DataThrottle(f'{symbol}')
else:
raise BrokerError(errmsg)
@classmethod
def normalize_symbol(
cls,
ticker: str
) -> str:
'''
Normalize symbol names to to a 3x3 pair from the global
definition map which we build out from the data retreived from
the 'AssetPairs' endpoint, see methods above.
'''
ticker = cls._ntable[ticker]
symlen = len(ticker)
if symlen != 6:
raise ValueError(f'Unhandled symbol: {ticker}')
return ticker.lower()
@acm
async def get_client() -> Client:
conf = get_config()
if conf:
client = Client(
conf,
name=conf['key_descr'],
api_key=conf['api_key'],
secret=conf['secret']
)
else:
client = Client({})
# at startup, load all symbols, and asset info in
# batch requests.
async with trio.open_nursery() as nurse:
nurse.start_soon(client.cache_assets)
await client.cache_symbols()
yield client

File diff suppressed because it is too large Load Diff

View File

@ -1,502 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Real-time and historical data feed endpoints.
'''
from contextlib import asynccontextmanager as acm
from datetime import datetime
from typing import (
Any,
Optional,
Callable,
)
import time
from async_generator import aclosing
from fuzzywuzzy import process as fuzzy
import numpy as np
import pendulum
from trio_typing import TaskStatus
import tractor
import trio
from piker._cacheables import open_cached_client
from piker.brokers._util import (
BrokerError,
DataThrottle,
DataUnavailable,
)
from piker.log import get_console_log
from piker.data import ShmArray
from piker.data.types import Struct
from piker.data._web_bs import open_autorecon_ws, NoBsWs
from . import log
from .api import (
Client,
)
# https://www.kraken.com/features/api#get-tradable-pairs
class Pair(Struct):
altname: str # alternate pair name
wsname: str # WebSocket pair name (if available)
aclass_base: str # asset class of base component
base: str # asset id of base component
aclass_quote: str # asset class of quote component
quote: str # asset id of quote component
lot: str # volume lot size
cost_decimals: int
costmin: float
pair_decimals: int # scaling decimal places for pair
lot_decimals: int # scaling decimal places for volume
# amount to multiply lot volume by to get currency volume
lot_multiplier: float
# array of leverage amounts available when buying
leverage_buy: list[int]
# array of leverage amounts available when selling
leverage_sell: list[int]
# fee schedule array in [volume, percent fee] tuples
fees: list[tuple[int, float]]
# maker fee schedule array in [volume, percent fee] tuples (if on
# maker/taker)
fees_maker: list[tuple[int, float]]
fee_volume_currency: str # volume discount currency
margin_call: str # margin call level
margin_stop: str # stop-out/liquidation margin level
ordermin: float # minimum order volume for pair
class OHLC(Struct):
'''
Description of the flattened OHLC quote format.
For schema details see:
https://docs.kraken.com/websockets/#message-ohlc
'''
chan_id: int # internal kraken id
chan_name: str # eg. ohlc-1 (name-interval)
pair: str # fx pair
time: float # Begin time of interval, in seconds since epoch
etime: float # End time of interval, in seconds since epoch
open: float # Open price of interval
high: float # High price within interval
low: float # Low price within interval
close: float # Close price of interval
vwap: float # Volume weighted average price within interval
volume: float # Accumulated volume **within interval**
count: int # Number of trades within interval
# (sampled) generated tick data
ticks: list[Any] = []
async def stream_messages(
ws: NoBsWs,
):
'''
Message stream parser and heartbeat handler.
Deliver ws subscription messages as well as handle heartbeat logic
though a single async generator.
'''
too_slow_count = last_hb = 0
while True:
with trio.move_on_after(5) as cs:
msg = await ws.recv_msg()
# trigger reconnection if heartbeat is laggy
if cs.cancelled_caught:
too_slow_count += 1
if too_slow_count > 20:
log.warning(
"Heartbeat is too slow, resetting ws connection")
await ws._connect()
too_slow_count = 0
continue
match msg:
case {'event': 'heartbeat'}:
now = time.time()
delay = now - last_hb
last_hb = now
# XXX: why tf is this not printing without --tl flag?
log.debug(f"Heartbeat after {delay}")
# print(f"Heartbeat after {delay}")
continue
case _:
# passthrough sub msgs
yield msg
async def process_data_feed_msgs(
ws: NoBsWs,
):
'''
Parse and pack data feed messages.
'''
async for msg in stream_messages(ws):
match msg:
case {
'errorMessage': errmsg
}:
raise BrokerError(errmsg)
case {
'event': 'subscriptionStatus',
} as sub:
log.info(
'WS subscription is active:\n'
f'{sub}'
)
continue
case [
chan_id,
*payload_array,
chan_name,
pair
]:
if 'ohlc' in chan_name:
ohlc = OHLC(
chan_id,
chan_name,
pair,
*payload_array[0]
)
ohlc.typecast()
yield 'ohlc', ohlc
elif 'spread' in chan_name:
bid, ask, ts, bsize, asize = map(
float, payload_array[0])
# TODO: really makes you think IB has a horrible API...
quote = {
'symbol': pair.replace('/', ''),
'ticks': [
{'type': 'bid', 'price': bid, 'size': bsize},
{'type': 'bsize', 'price': bid, 'size': bsize},
{'type': 'ask', 'price': ask, 'size': asize},
{'type': 'asize', 'price': ask, 'size': asize},
],
}
yield 'l1', quote
# elif 'book' in msg[-2]:
# chan_id, *payload_array, chan_name, pair = msg
# print(msg)
case _:
print(f'UNHANDLED MSG: {msg}')
# yield msg
def normalize(
ohlc: OHLC,
) -> dict:
quote = ohlc.to_dict()
quote['broker_ts'] = quote['time']
quote['brokerd_ts'] = time.time()
quote['symbol'] = quote['pair'] = quote['pair'].replace('/', '')
quote['last'] = quote['close']
quote['bar_wap'] = ohlc.vwap
# seriously eh? what's with this non-symmetry everywhere
# in subscription systems...
# XXX: piker style is always lowercases symbols.
topic = quote['pair'].replace('/', '').lower()
# print(quote)
return topic, quote
@acm
async def open_history_client(
symbol: str,
) -> tuple[Callable, int]:
# TODO implement history getter for the new storage layer.
async with open_cached_client('kraken') as client:
# lol, kraken won't send any more then the "last"
# 720 1m bars.. so we have to just ignore further
# requests of this type..
queries: int = 0
async def get_ohlc(
timeframe: float,
end_dt: Optional[datetime] = None,
start_dt: Optional[datetime] = None,
) -> tuple[
np.ndarray,
datetime, # start
datetime, # end
]:
nonlocal queries
if queries > 0:
raise DataUnavailable
count = 0
while count <= 3:
try:
array = await client.bars(
symbol,
since=end_dt,
)
count += 1
queries += 1
break
except DataThrottle:
log.warning(f'kraken OHLC throttle for {symbol}')
await trio.sleep(1)
start_dt = pendulum.from_timestamp(array[0]['time'])
end_dt = pendulum.from_timestamp(array[-1]['time'])
return array, start_dt, end_dt
yield get_ohlc, {'erlangs': 1, 'rate': 1}
async def backfill_bars(
sym: str,
shm: ShmArray, # type: ignore # noqa
count: int = 10, # NOTE: any more and we'll overrun the underlying buffer
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
) -> None:
'''
Fill historical bars into shared mem / storage afap.
'''
with trio.CancelScope() as cs:
async with open_cached_client('kraken') as client:
bars = await client.bars(symbol=sym)
shm.push(bars)
task_status.started(cs)
async def stream_quotes(
send_chan: trio.abc.SendChannel,
symbols: list[str],
feed_is_live: trio.Event,
loglevel: str = None,
# backend specific
sub_type: str = 'ohlc',
# startup sync
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
) -> None:
'''
Subscribe for ohlc stream of quotes for ``pairs``.
``pairs`` must be formatted <crypto_symbol>/<fiat_symbol>.
'''
# XXX: required to propagate ``tractor`` loglevel to piker logging
get_console_log(loglevel or tractor.current_actor().loglevel)
ws_pairs = {}
sym_infos = {}
async with open_cached_client('kraken') as client, send_chan as send_chan:
# keep client cached for real-time section
for sym in symbols:
# transform to upper since piker style is always lower
sym = sym.upper()
sym_info = await client.symbol_info(sym)
si = Pair(**sym_info) # validation
syminfo = si.to_dict()
syminfo['price_tick_size'] = 1 / 10**si.pair_decimals
syminfo['lot_tick_size'] = 1 / 10**si.lot_decimals
syminfo['asset_type'] = 'crypto'
sym_infos[sym] = syminfo
ws_pairs[sym] = si.wsname
symbol = symbols[0].lower()
init_msgs = {
# pass back token, and bool, signalling if we're the writer
# and that history has been written
symbol: {
'symbol_info': sym_infos[sym],
'shm_write_opts': {'sum_tick_vml': False},
'fqsn': sym,
},
}
@acm
async def subscribe(ws: NoBsWs):
# XXX: setup subs
# https://docs.kraken.com/websockets/#message-subscribe
# specific logic for this in kraken's sync client:
# https://github.com/krakenfx/kraken-wsclient-py/blob/master/kraken_wsclient_py/kraken_wsclient_py.py#L188
ohlc_sub = {
'event': 'subscribe',
'pair': list(ws_pairs.values()),
'subscription': {
'name': 'ohlc',
'interval': 1,
},
}
# TODO: we want to eventually allow unsubs which should
# be completely fine to request from a separate task
# since internally the ws methods appear to be FIFO
# locked.
await ws.send_msg(ohlc_sub)
# trade data (aka L1)
l1_sub = {
'event': 'subscribe',
'pair': list(ws_pairs.values()),
'subscription': {
'name': 'spread',
# 'depth': 10}
},
}
# pull a first quote and deliver
await ws.send_msg(l1_sub)
yield
# unsub from all pairs on teardown
await ws.send_msg({
'pair': list(ws_pairs.values()),
'event': 'unsubscribe',
'subscription': ['ohlc', 'spread'],
})
# XXX: do we need to ack the unsub?
# await ws.recv_msg()
# see the tips on reconnection logic:
# https://support.kraken.com/hc/en-us/articles/360044504011-WebSocket-API-unexpected-disconnections-from-market-data-feeds
ws: NoBsWs
async with (
open_autorecon_ws(
'wss://ws.kraken.com/',
fixture=subscribe,
) as ws,
aclosing(process_data_feed_msgs(ws)) as msg_gen,
):
# pull a first quote and deliver
typ, ohlc_last = await anext(msg_gen)
topic, quote = normalize(ohlc_last)
task_status.started((init_msgs, quote))
# lol, only "closes" when they're margin squeezing clients ;P
feed_is_live.set()
# keep start of last interval for volume tracking
last_interval_start = ohlc_last.etime
# start streaming
async for typ, ohlc in msg_gen:
if typ == 'ohlc':
# TODO: can get rid of all this by using
# ``trades`` subscription...
# generate tick values to match time & sales pane:
# https://trade.kraken.com/charts/KRAKEN:BTC-USD?period=1m
volume = ohlc.volume
# new OHLC sample interval
if ohlc.etime > last_interval_start:
last_interval_start = ohlc.etime
tick_volume = volume
else:
# this is the tick volume *within the interval*
tick_volume = volume - ohlc_last.volume
ohlc_last = ohlc
last = ohlc.close
if tick_volume:
ohlc.ticks.append({
'type': 'trade',
'price': last,
'size': tick_volume,
})
topic, quote = normalize(ohlc)
elif typ == 'l1':
quote = ohlc
topic = quote['symbol'].lower()
await send_chan.send({topic: quote})
@tractor.context
async def open_symbol_search(
ctx: tractor.Context,
) -> Client:
async with open_cached_client('kraken') as client:
# load all symbols locally for fast search
cache = await client.cache_symbols()
await ctx.started(cache)
async with ctx.open_stream() as stream:
async for pattern in stream:
matches = fuzzy.extractBests(
pattern,
cache,
score_cutoff=50,
)
# repack in dict form
await stream.send(
{item[0]['altname']: item[0]
for item in matches}
)

View File

@ -22,10 +22,54 @@ from enum import Enum
from typing import Optional
from bidict import bidict
from pydantic import BaseModel, validator
from ..data._source import Symbol
from ..data.types import Struct
from ..pp import Position
from ._messages import BrokerdPosition, Status
class Position(BaseModel):
'''
Basic pp (personal position) model with attached fills history.
This type should be IPC wire ready?
'''
symbol: Symbol
# last size and avg entry price
size: float
avg_price: float # TODO: contextual pricing
# ordered record of known constituent trade messages
fills: list[Status] = []
def update_from_msg(
self,
msg: BrokerdPosition,
) -> None:
# XXX: better place to do this?
symbol = self.symbol
lot_size_digits = symbol.lot_size_digits
avg_price, size = (
round(msg['avg_price'], ndigits=symbol.tick_size_digits),
round(msg['size'], ndigits=lot_size_digits),
)
self.avg_price = avg_price
self.size = size
@property
def dsize(self) -> float:
'''
The "dollar" size of the pp, normally in trading (fiat) unit
terms.
'''
return self.avg_price * self.size
_size_units = bidict({
@ -40,30 +84,33 @@ SizeUnit = Enum(
)
class Allocator(Struct):
class Allocator(BaseModel):
class Config:
validate_assignment = True
copy_on_model_validation = False
arbitrary_types_allowed = True
# required to get the account validator lookup working?
extra = 'allow'
underscore_attrs_are_private = False
symbol: Symbol
account: Optional[str] = 'paper'
_size_units: bidict[str, Optional[str]] = _size_units
# TODO: for enums this clearly doesn't fucking work, you can't set
# a default at startup by passing in a `dict` but yet you can set
# that value through assignment..for wtv cucked reason.. honestly, pure
# unintuitive garbage.
_size_unit: str = 'currency'
size_unit: str = 'currency'
_size_units: dict[str, Optional[str]] = _size_units
@property
def size_unit(self) -> str:
return self._size_unit
@size_unit.setter
def size_unit(self, v: str) -> Optional[str]:
@validator('size_unit', pre=True)
def maybe_lookup_key(cls, v):
# apply the corresponding enum key for the text "description" value
if v not in _size_units:
v = _size_units.inverse[v]
return _size_units.inverse[v]
assert v in _size_units
self._size_unit = v
return v
# TODO: if we ever want ot support non-uniform entry-slot-proportion
@ -93,13 +140,10 @@ class Allocator(Struct):
else:
return self.units_limit
def limit_info(self) -> tuple[str, float]:
return self.size_unit, self.limit()
def next_order_info(
self,
# we only need a startup size for exit calcs, we can then
# we only need a startup size for exit calcs, we can the
# determine how large slots should be if the initial pp size was
# larger then the current live one, and the live one is smaller
# then the initial config settings.
@ -129,7 +173,7 @@ class Allocator(Struct):
l_sub_pp = self.units_limit - abs_live_size
elif size_unit == 'currency':
live_cost_basis = abs_live_size * live_pp.ppu
live_cost_basis = abs_live_size * live_pp.avg_price
slot_size = currency_per_slot / price
l_sub_pp = (self.currency_limit - live_cost_basis) / price
@ -140,14 +184,12 @@ class Allocator(Struct):
# an entry (adding-to or starting a pp)
if (
action == 'buy' and live_size > 0 or
action == 'sell' and live_size < 0 or
live_size == 0
or (action == 'buy' and live_size > 0)
or action == 'sell' and live_size < 0
):
order_size = min(
slot_size,
max(l_sub_pp, 0),
)
order_size = min(slot_size, l_sub_pp)
# an exit (removing-from or going to net-zero pp)
else:
@ -163,7 +205,7 @@ class Allocator(Struct):
if size_unit == 'currency':
# compute the "projected" limit's worth of units at the
# current pp (weighted) price:
slot_size = currency_per_slot / live_pp.ppu
slot_size = currency_per_slot / live_pp.avg_price
else:
slot_size = u_per_slot
@ -202,12 +244,7 @@ class Allocator(Struct):
if order_size < slot_size:
# compute a fractional slots size to display
slots_used = self.slots_used(
Position(
symbol=sym,
size=order_size,
ppu=price,
bsuid=sym,
)
Position(symbol=sym, size=order_size, avg_price=price)
)
return {
@ -234,8 +271,8 @@ class Allocator(Struct):
abs_pp_size = abs(pp.size)
if self.size_unit == 'currency':
# live_currency_size = size or (abs_pp_size * pp.ppu)
live_currency_size = abs_pp_size * pp.ppu
# live_currency_size = size or (abs_pp_size * pp.avg_price)
live_currency_size = abs_pp_size * pp.avg_price
prop = live_currency_size / self.currency_limit
else:
@ -247,6 +284,14 @@ class Allocator(Struct):
return round(prop * self.slots)
_derivs = (
'future',
'continuous_future',
'option',
'futures_option',
)
def mk_allocator(
symbol: Symbol,
@ -255,7 +300,7 @@ def mk_allocator(
# default allocation settings
defaults: dict[str, float] = {
'account': None, # select paper by default
# 'size_unit': 'currency',
'size_unit': 'currency',
'units_limit': 400,
'currency_limit': 5e3,
'slots': 4,
@ -273,9 +318,42 @@ def mk_allocator(
'currency_limit': 6e3,
'slots': 6,
}
defaults.update(user_def)
return Allocator(
alloc = Allocator(
symbol=symbol,
**defaults,
)
asset_type = symbol.type_key
# specific configs by asset class / type
if asset_type in _derivs:
# since it's harder to know how currency "applies" in this case
# given leverage properties
alloc.size_unit = '# units'
# set units limit to slots size thus making make the next
# entry step 1.0
alloc.units_limit = alloc.slots
# if the current position is already greater then the limit
# settings, increase the limit to the current position
if alloc.size_unit == 'currency':
startup_size = startup_pp.size * startup_pp.avg_price
if startup_size > alloc.currency_limit:
alloc.currency_limit = round(startup_size, ndigits=2)
else:
startup_size = abs(startup_pp.size)
if startup_size > alloc.units_limit:
alloc.units_limit = startup_size
if asset_type in _derivs:
alloc.slots = alloc.units_limit
return alloc

View File

@ -19,24 +19,25 @@ Orders and execution client API.
"""
from contextlib import asynccontextmanager as acm
from typing import Dict
from pprint import pformat
from dataclasses import dataclass, field
import trio
import tractor
from tractor.trionics import broadcast_receiver
from ..log import get_logger
from ..data.types import Struct
from ._ems import _emsd_main
from .._daemon import maybe_open_emsd
from ._messages import Order, Cancel
from ..brokers import get_brokermod
log = get_logger(__name__)
class OrderBook(Struct):
@dataclass
class OrderBook:
'''EMS-client-side order book ctl and tracking.
A style similar to "model-view" is used here where this api is
@ -51,18 +52,20 @@ class OrderBook(Struct):
# mem channels used to relay order requests to the EMS daemon
_to_ems: trio.abc.SendChannel
_from_order_book: trio.abc.ReceiveChannel
_sent_orders: dict[str, Order] = {}
_sent_orders: Dict[str, Order] = field(default_factory=dict)
_ready_to_receive: trio.Event = trio.Event()
def send(
self,
msg: Order | dict,
msg: Order,
) -> dict:
self._sent_orders[msg.oid] = msg
self._to_ems.send_nowait(msg)
self._to_ems.send_nowait(msg.dict())
return msg
def send_update(
def update(
self,
uuid: str,
@ -70,8 +73,9 @@ class OrderBook(Struct):
) -> dict:
cmd = self._sent_orders[uuid]
msg = cmd.copy(update=data)
self._sent_orders[uuid] = msg
msg = cmd.dict()
msg.update(data)
self._sent_orders[uuid] = Order(**msg)
self._to_ems.send_nowait(msg)
return cmd
@ -79,18 +83,12 @@ class OrderBook(Struct):
"""Cancel an order (or alert) in the EMS.
"""
cmd = self._sent_orders.get(uuid)
if not cmd:
log.error(
f'Unknown order {uuid}!?\n'
f'Maybe there is a stale entry or line?\n'
f'You should report this as a bug!'
)
cmd = self._sent_orders[uuid]
msg = Cancel(
oid=uuid,
symbol=cmd.symbol,
)
self._to_ems.send_nowait(msg)
self._to_ems.send_nowait(msg.dict())
_orders: OrderBook = None
@ -151,17 +149,10 @@ async def relay_order_cmds_from_sync_code(
book = get_orders()
async with book._from_order_book.subscribe() as orders_stream:
async for cmd in orders_stream:
sym = cmd.symbol
msg = pformat(cmd)
if sym == symbol_key:
log.info(f'Send order cmd:\n{msg}')
if cmd['symbol'] == symbol_key:
log.info(f'Send order cmd:\n{pformat(cmd)}')
# send msg over IPC / wire
await to_ems_stream.send(cmd)
else:
log.warning(
f'Ignoring unmatched order cmd for {sym} != {symbol_key}:'
f'\n{msg}'
)
@acm
@ -213,35 +204,20 @@ async def open_ems(
from ..data._source import unpack_fqsn
broker, symbol, suffix = unpack_fqsn(fqsn)
mode: str = 'live'
async with maybe_open_emsd(broker) as portal:
mod = get_brokermod(broker)
if not getattr(mod, 'trades_dialogue', None):
mode = 'paper'
async with (
# connect to emsd
portal.open_context(
_emsd_main,
fqsn=fqsn,
exec_mode=mode,
) as (
ctx,
(
positions,
accounts,
dialogs,
)
),
) as (ctx, (positions, accounts)),
# open 2-way trade command stream
ctx.open_stream() as trades_stream,
):
# start sync code order msg delivery task
async with trio.open_nursery() as n:
n.start_soon(
relay_order_cmds_from_sync_code,
@ -249,10 +225,4 @@ async def open_ems(
trades_stream
)
yield (
book,
trades_stream,
positions,
accounts,
dialogs,
)
yield book, trades_stream, positions, accounts

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@ -15,95 +15,22 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Clearing sub-system message and protocols.
Clearing system messagingn types and protocols.
"""
# from collections import (
# ChainMap,
# deque,
# )
from typing import (
Optional,
Literal,
)
from typing import Optional, Union
# TODO: try out just encoding/send direction for now?
# import msgspec
from pydantic import BaseModel
from ..data._source import Symbol
from ..data.types import Struct
# TODO: a composite for tracking msg flow on 2-legged
# dialogs.
# class Dialog(ChainMap):
# '''
# Msg collection abstraction to easily track the state changes of
# a msg flow in one high level, query-able and immutable construct.
# The main use case is to query data from a (long-running)
# msg-transaction-sequence
# '''
# def update(
# self,
# msg,
# ) -> None:
# self.maps.insert(0, msg.to_dict())
# def flatten(self) -> dict:
# return dict(self)
# TODO: ``msgspec`` stuff worth paying attention to:
# - schema evolution:
# https://jcristharif.com/msgspec/usage.html#schema-evolution
# - for eg. ``BrokerdStatus``, instead just have separate messages?
# - use literals for a common msg determined by diff keys?
# - https://jcristharif.com/msgspec/usage.html#literal
# --------------
# Client -> emsd
# --------------
class Order(Struct):
# TODO: ideally we can combine these 2 fields into
# 1 and just use the size polarity to determine a buy/sell.
# i would like to see this become more like
# https://jcristharif.com/msgspec/usage.html#literal
# action: Literal[
# 'live',
# 'dark',
# 'alert',
# ]
action: Literal[
'buy',
'sell',
'alert',
]
# determines whether the create execution
# will be submitted to the ems or directly to
# the backend broker
exec_mode: Literal[
'dark',
'live',
# 'paper', no right?
]
# internal ``emdsd`` unique "order id"
oid: str # uuid4
symbol: str | Symbol
account: str # should we set a default as '' ?
price: float
size: float # -ve is "sell", +ve is "buy"
brokers: Optional[list[str]] = []
class Cancel(Struct):
'''
Cancel msg for removing a dark (ems triggered) or
class Cancel(BaseModel):
'''Cancel msg for removing a dark (ems triggered) or
broker-submitted (live) trigger/order.
'''
@ -112,61 +39,82 @@ class Cancel(Struct):
symbol: str
# --------------
class Order(BaseModel):
action: str # {'buy', 'sell', 'alert'}
# internal ``emdsd`` unique "order id"
oid: str # uuid4
symbol: Union[str, Symbol]
account: str # should we set a default as '' ?
price: float
size: float
brokers: list[str]
# Assigned once initial ack is received
# ack_time_ns: Optional[int] = None
# determines whether the create execution
# will be submitted to the ems or directly to
# the backend broker
exec_mode: str # {'dark', 'live', 'paper'}
class Config:
# just for pre-loading a ``Symbol`` when used
# in the order mode staging process
arbitrary_types_allowed = True
# don't copy this model instance when used in
# a recursive model
copy_on_model_validation = False
# Client <- emsd
# --------------
# update msgs from ems which relay state change info
# from the active clearing engine.
class Status(Struct):
class Status(BaseModel):
name: str = 'status'
oid: str # uuid4
time_ns: int
oid: str # uuid4 ems-order dialog id
resp: Literal[
'pending', # acked by broker but not yet open
'open',
'dark_open', # dark/algo triggered order is open in ems clearing loop
'triggered', # above triggered order sent to brokerd, or an alert closed
'closed', # fully cleared all size/units
'fill', # partial execution
'canceled',
'error',
]
# {
# 'dark_submitted',
# 'dark_cancelled',
# 'dark_triggered',
# 'broker_submitted',
# 'broker_cancelled',
# 'broker_executed',
# 'broker_filled',
# 'broker_errored',
# 'alert_submitted',
# 'alert_triggered',
# }
resp: str # "response", see above
# symbol: str
# trigger info
trigger_price: Optional[float] = None
# price: float
# broker: Optional[str] = None
# this maps normally to the ``BrokerdOrder.reqid`` below, an id
# normally allocated internally by the backend broker routing system
reqid: Optional[int | str] = None
broker_reqid: Optional[Union[int, str]] = None
# the (last) source order/request msg if provided
# (eg. the Order/Cancel which causes this msg) and
# acts as a back-reference to the corresponding
# request message which was the source of this msg.
req: Order | None = None
# XXX: better design/name here?
# flag that can be set to indicate a message for an order
# event that wasn't originated by piker's emsd (eg. some external
# trading system which does it's own order control but that you
# might want to "track" using piker UIs/systems).
src: Optional[str] = None
# set when a cancel request msg was set for this order flow dialog
# but the brokerd dialog isn't yet in a cancelled state.
cancel_called: bool = False
# for relaying a boxed brokerd-dialog-side msg data "through" the
# ems layer to clients.
# for relaying backend msg data "through" the ems layer
brokerd_msg: dict = {}
# ---------------
# emsd -> brokerd
# ---------------
# requests *sent* from ems to respective backend broker daemon
class BrokerdCancel(Struct):
class BrokerdCancel(BaseModel):
action: str = 'cancel'
oid: str # piker emsd order id
@ -179,38 +127,34 @@ class BrokerdCancel(Struct):
# for setting a unique order id then this value will be relayed back
# on the emsd order request stream as the ``BrokerdOrderAck.reqid``
# field
reqid: Optional[int | str] = None
reqid: Optional[Union[int, str]] = None
class BrokerdOrder(Struct):
class BrokerdOrder(BaseModel):
action: str # {buy, sell}
oid: str
account: str
time_ns: int
# TODO: if we instead rely on a +ve/-ve size to determine
# the action we more or less don't need this field right?
action: str = '' # {buy, sell}
# "broker request id": broker specific/internal order id if this is
# None, creates a new order otherwise if the id is valid the backend
# api must modify the existing matching order. If the broker allows
# for setting a unique order id then this value will be relayed back
# on the emsd order request stream as the ``BrokerdOrderAck.reqid``
# field
reqid: Optional[int | str] = None
reqid: Optional[Union[int, str]] = None
symbol: str # fqsn
symbol: str # symbol.<providername> ?
price: float
size: float
# ---------------
# emsd <- brokerd
# ---------------
# requests *received* to ems from broker backend
class BrokerdOrderAck(Struct):
class BrokerdOrderAck(BaseModel):
'''
Immediate reponse to a brokerd order request providing the broker
specific unique order id so that the EMS can associate this
@ -221,32 +165,39 @@ class BrokerdOrderAck(Struct):
name: str = 'ack'
# defined and provided by backend
reqid: int | str
reqid: Union[int, str]
# emsd id originally sent in matching request msg
oid: str
account: str = ''
class BrokerdStatus(Struct):
class BrokerdStatus(BaseModel):
name: str = 'status'
reqid: int | str
reqid: Union[int, str]
time_ns: int
status: Literal[
'open',
'canceled',
'fill',
'pending',
'error',
]
account: str
# XXX: should be best effort set for every update
account: str = ''
# {
# 'submitted',
# 'cancelled',
# 'filled',
# }
status: str
filled: float = 0.0
reason: str = ''
remaining: float = 0.0
# external: bool = False
# XXX: better design/name here?
# flag that can be set to indicate a message for an order
# event that wasn't originated by piker's emsd (eg. some external
# trading system which does it's own order control but that you
# might want to "track" using piker UIs/systems).
external: bool = False
# XXX: not required schema as of yet
broker_details: dict = {
@ -254,21 +205,21 @@ class BrokerdStatus(Struct):
}
class BrokerdFill(Struct):
class BrokerdFill(BaseModel):
'''
A single message indicating a "fill-details" event from the broker
if avaiable.
'''
name: str = 'fill'
reqid: int | str
reqid: Union[int, str]
time_ns: int
# order exeuction related
action: str
size: float
price: float
action: Optional[str] = None
broker_details: dict = {} # meta-data (eg. commisions etc.)
# brokerd timestamp required for order mode arrow placement on x-axis
@ -279,7 +230,7 @@ class BrokerdFill(Struct):
broker_time: float
class BrokerdError(Struct):
class BrokerdError(BaseModel):
'''
Optional error type that can be relayed to emsd for error handling.
@ -291,14 +242,14 @@ class BrokerdError(Struct):
# if no brokerd order request was actually submitted (eg. we errored
# at the ``pikerd`` layer) then there will be ``reqid`` allocated.
reqid: Optional[int | str] = None
reqid: Optional[Union[int, str]] = None
symbol: str
reason: str
broker_details: dict = {}
class BrokerdPosition(Struct):
class BrokerdPosition(BaseModel):
'''Position update event from brokerd.
'''
@ -307,6 +258,6 @@ class BrokerdPosition(Struct):
broker: str
account: str
symbol: str
currency: str
size: float
avg_price: float
currency: str = ''

View File

@ -18,71 +18,54 @@
Fake trading for forward testing.
"""
from collections import defaultdict
from contextlib import asynccontextmanager
from datetime import datetime
from operator import itemgetter
import itertools
import time
from typing import (
Any,
Optional,
Callable,
)
from typing import Tuple, Optional, Callable
import uuid
from bidict import bidict
import pendulum
import trio
import tractor
from dataclasses import dataclass
from .. import data
from ..data._source import Symbol
from ..data.types import Struct
from ..pp import (
Position,
Transaction,
)
from ..data._normalize import iterticks
from ..data._source import unpack_fqsn
from ..log import get_logger
from ._messages import (
BrokerdCancel,
BrokerdOrder,
BrokerdOrderAck,
BrokerdStatus,
BrokerdFill,
BrokerdPosition,
BrokerdError,
BrokerdCancel, BrokerdOrder, BrokerdOrderAck, BrokerdStatus,
BrokerdFill, BrokerdPosition, BrokerdError
)
log = get_logger(__name__)
class PaperBoi(Struct):
'''
Emulates a broker order client providing approximately the same API
and delivering an order-event response stream but with methods for
@dataclass
class PaperBoi:
"""
Emulates a broker order client providing the same API and
delivering an order-event response stream but with methods for
triggering desired events based on forward testing engine
requirements (eg open, closed, fill msgs).
requirements.
'''
"""
broker: str
ems_trades_stream: tractor.MsgStream
# map of paper "live" orders which be used
# to simulate fills based on paper engine settings
_buys: defaultdict[str, bidict]
_sells: defaultdict[str, bidict]
_buys: bidict
_sells: bidict
_reqids: bidict
_positions: dict[str, Position]
_trade_ledger: dict[str, Any]
_positions: dict[str, BrokerdPosition]
# init edge case L1 spread
last_ask: tuple[float, float] = (float('inf'), 0) # price, size
last_bid: tuple[float, float] = (0, 0)
last_ask: Tuple[float, float] = (float('inf'), 0) # price, size
last_bid: Tuple[float, float] = (0, 0)
async def submit_limit(
self,
@ -92,24 +75,27 @@ class PaperBoi(Struct):
action: str,
size: float,
reqid: Optional[str],
) -> int:
'''
Place an order and return integer request id provided by client.
"""Place an order and return integer request id provided by client.
"""
is_modify: bool = False
if reqid is None:
reqid = str(uuid.uuid4())
else:
# order is already existing, this is a modify
(oid, symbol, action, old_price) = self._reqids[reqid]
assert old_price != price
is_modify = True
# register order internally
self._reqids[reqid] = (oid, symbol, action, price)
'''
if action == 'alert':
# bypass all fill simulation
return reqid
entry = self._reqids.get(reqid)
if entry:
# order is already existing, this is a modify
(oid, symbol, action, old_price) = entry
else:
# register order internally
self._reqids[reqid] = (oid, symbol, action, price)
# TODO: net latency model
# we checkpoint here quickly particulalry
# for dark orders since we want the dark_executed
@ -121,18 +107,15 @@ class PaperBoi(Struct):
size = -size
msg = BrokerdStatus(
status='open',
# account=f'paper_{self.broker}',
account='paper',
status='submitted',
reqid=reqid,
broker=self.broker,
time_ns=time.time_ns(),
filled=0.0,
reason='paper_trigger',
remaining=size,
broker_details={'name': 'paperboi'},
)
await self.ems_trades_stream.send(msg)
await self.ems_trades_stream.send(msg.dict())
# if we're already a clearing price simulate an immediate fill
if (
@ -140,28 +123,28 @@ class PaperBoi(Struct):
) or (
action == 'sell' and (clear_price := self.last_bid[0]) >= price
):
await self.fake_fill(
symbol,
clear_price,
size,
action,
reqid,
oid,
)
await self.fake_fill(symbol, clear_price, size, action, reqid, oid)
# register this submissions as a paper live order
else:
# set the simulated order in the respective table for lookup
# and trigger by the simulated clearing task normally
# running ``simulate_fills()``.
# register this submissions as a paper live order
# submit order to book simulation fill loop
if action == 'buy':
orders = self._buys
elif action == 'sell':
orders = self._sells
# {symbol -> bidict[oid, (<price data>)]}
orders[symbol][oid] = (price, size, reqid, action)
# set the simulated order in the respective table for lookup
# and trigger by the simulated clearing task normally
# running ``simulate_fills()``.
if is_modify:
# remove any existing order for the old price
orders[symbol].pop((oid, old_price))
# buys/sells: (symbol -> (price -> order))
orders.setdefault(symbol, {})[(oid, price)] = (size, reqid, action)
return reqid
@ -174,26 +157,26 @@ class PaperBoi(Struct):
oid, symbol, action, price = self._reqids[reqid]
if action == 'buy':
self._buys[symbol].pop(oid, None)
self._buys[symbol].pop((oid, price))
elif action == 'sell':
self._sells[symbol].pop(oid, None)
self._sells[symbol].pop((oid, price))
# TODO: net latency model
await trio.sleep(0.05)
msg = BrokerdStatus(
status='canceled',
account='paper',
status='cancelled',
oid=oid,
reqid=reqid,
broker=self.broker,
time_ns=time.time_ns(),
broker_details={'name': 'paperboi'},
)
await self.ems_trades_stream.send(msg)
await self.ems_trades_stream.send(msg.dict())
async def fake_fill(
self,
fqsn: str,
symbol: str,
price: float,
size: float,
action: str, # one of {'buy', 'sell'}
@ -207,21 +190,21 @@ class PaperBoi(Struct):
remaining: float = 0,
) -> None:
'''
Pretend to fill a broker order @ price and size.
"""Pretend to fill a broker order @ price and size.
'''
"""
# TODO: net latency model
await trio.sleep(0.05)
fill_time_ns = time.time_ns()
fill_time_s = time.time()
fill_msg = BrokerdFill(
msg = BrokerdFill(
reqid=reqid,
time_ns=fill_time_ns,
time_ns=time.time_ns(),
action=action,
size=size,
price=price,
broker_time=datetime.now().timestamp(),
broker_details={
'paper_info': {
@ -231,67 +214,79 @@ class PaperBoi(Struct):
'name': self.broker + '_paper',
},
)
log.info(f'Fake filling order:\n{fill_msg}')
await self.ems_trades_stream.send(fill_msg)
self._trade_ledger.update(fill_msg.to_dict())
await self.ems_trades_stream.send(msg.dict())
if order_complete:
msg = BrokerdStatus(
reqid=reqid,
time_ns=time.time_ns(),
# account=f'paper_{self.broker}',
account='paper',
status='closed',
status='filled',
filled=size,
remaining=0 if order_complete else remaining,
)
await self.ems_trades_stream.send(msg)
# lookup any existing position
key = fqsn.rstrip(f'.{self.broker}')
pp = self._positions.setdefault(
fqsn,
Position(
Symbol(
key=key,
broker_info={self.broker: {}},
),
size=size,
ppu=price,
bsuid=key,
)
)
t = Transaction(
fqsn=fqsn,
tid=oid,
action=action,
size=size,
price=price,
cost=0, # TODO: cost model
dt=pendulum.from_timestamp(fill_time_s),
bsuid=key,
)
pp.add_clear(t)
pp_msg = BrokerdPosition(
broker_details={
'paper_info': {
'oid': oid,
},
'name': self.broker,
},
)
await self.ems_trades_stream.send(msg.dict())
# lookup any existing position
token = f'{symbol}.{self.broker}'
pp_msg = self._positions.setdefault(
token,
BrokerdPosition(
broker=self.broker,
account='paper',
symbol=fqsn,
symbol=symbol,
# TODO: we need to look up the asset currency from
# broker info. i guess for crypto this can be
# inferred from the pair?
currency='',
size=pp.size,
avg_price=pp.ppu,
size=0.0,
avg_price=0,
)
)
await self.ems_trades_stream.send(pp_msg)
# "avg position price" calcs
# TODO: eventually it'd be nice to have a small set of routines
# to do this stuff from a sequence of cleared orders to enable
# so called "contextual positions".
new_size = size + pp_msg.size
# old size minus the new size gives us size differential with
# +ve -> increase in pp size
# -ve -> decrease in pp size
size_diff = abs(new_size) - abs(pp_msg.size)
if new_size == 0:
pp_msg.avg_price = 0
elif size_diff > 0:
# only update the "average position price" when the position
# size increases not when it decreases (i.e. the position is
# being made smaller)
pp_msg.avg_price = (
abs(size) * price + pp_msg.avg_price * abs(pp_msg.size)
) / abs(new_size)
pp_msg.size = new_size
await self.ems_trades_stream.send(pp_msg.dict())
async def simulate_fills(
quote_stream: tractor.MsgStream, # noqa
quote_stream: 'tractor.ReceiveStream', # noqa
client: PaperBoi,
) -> None:
# TODO: more machinery to better simulate real-world market things:
@ -311,116 +306,61 @@ async def simulate_fills(
# this stream may eventually contain multiple symbols
async for quotes in quote_stream:
for sym, quote in quotes.items():
for tick in iterticks(
quote,
# dark order price filter(s)
types=('ask', 'bid', 'trade', 'last')
):
tick_price = tick['price']
# print(tick)
tick_price = tick.get('price')
ttype = tick['type']
buys: bidict[str, tuple] = client._buys[sym]
iter_buys = reversed(sorted(
buys.values(),
key=itemgetter(0),
))
if ttype in ('ask',):
def buy_on_ask(our_price):
return tick_price <= our_price
sells: bidict[str, tuple] = client._sells[sym]
iter_sells = sorted(
sells.values(),
key=itemgetter(0)
)
def sell_on_bid(our_price):
return tick_price >= our_price
match tick:
# on an ask queue tick, only clear buy entries
case {
'price': tick_price,
'type': 'ask',
}:
client.last_ask = (
tick_price,
tick.get('size', client.last_ask[1]),
)
iter_entries = zip(
iter_buys,
itertools.repeat(buy_on_ask)
)
orders = client._buys.get(sym, {})
book_sequence = reversed(
sorted(orders.keys(), key=itemgetter(1)))
def pred(our_price):
return tick_price < our_price
elif ttype in ('bid',):
# on a bid queue tick, only clear sell entries
case {
'price': tick_price,
'type': 'bid',
}:
client.last_bid = (
tick_price,
tick.get('size', client.last_bid[1]),
)
iter_entries = zip(
iter_sells,
itertools.repeat(sell_on_bid)
)
orders = client._sells.get(sym, {})
book_sequence = sorted(orders.keys(), key=itemgetter(1))
# TODO: fix this block, though it definitely
# costs a lot more CPU-wise
# - doesn't seem like clears are happening still on
# "resting" limit orders?
case {
'price': tick_price,
'type': ('trade' | 'last'),
}:
# in the clearing price / last price case we
# want to iterate both sides of our book for
# clears since we don't know which direction the
# price is going to move (especially with HFT)
# and thus we simply interleave both sides (buys
# and sells) until one side clears and then
# break until the next tick?
def interleave():
for pair in zip(
iter_buys,
iter_sells,
):
for order_info, pred in zip(
pair,
itertools.cycle([buy_on_ask, sell_on_bid]),
):
yield order_info, pred
def pred(our_price):
return tick_price > our_price
iter_entries = interleave()
# NOTE: all other (non-clearable) tick event types
# - we don't want to sping the simulated clear loop
# below unecessarily and further don't want to pop
# simulated live orders prematurely.
case _:
elif ttype in ('trade', 'last'):
# TODO: simulate actual book queues and our orders
# place in it, might require full L2 data?
continue
# iterate all potentially clearable book prices
# in FIFO order per side.
for order_info, pred in iter_entries:
(our_price, size, reqid, action) = order_info
# iterate book prices descending
for oid, our_price in book_sequence:
if pred(our_price):
# print(order_info)
clearable = pred(our_price)
if clearable:
# pop and retreive order info
oid = {
'buy': buys,
'sell': sells
}[action].inverse.pop(order_info)
# retreive order info
(size, reqid, action) = orders.pop((oid, our_price))
# clearing price would have filled entirely
await client.fake_fill(
fqsn=sym,
symbol=sym,
# todo slippage to determine fill price
price=tick_price,
size=size,
@ -428,6 +368,9 @@ async def simulate_fills(
reqid=reqid,
oid=oid,
)
else:
# prices are iterated in sorted order so we're done
break
async def handle_order_requests(
@ -437,83 +380,68 @@ async def handle_order_requests(
) -> None:
request_msg: dict
# order_request: dict
async for request_msg in ems_order_stream:
match request_msg:
case {'action': ('buy' | 'sell')}:
order = BrokerdOrder(**request_msg)
account = order.account
# error on bad inputs
reason = None
action = request_msg['action']
if action in {'buy', 'sell'}:
account = request_msg['account']
if account != 'paper':
reason = f'No account found:`{account}` (paper only)?'
elif order.size == 0:
reason = 'Invalid size: 0'
if reason:
log.error(reason)
log.error(
'This is a paper account, only a `paper` selection is valid'
)
await ems_order_stream.send(BrokerdError(
oid=order.oid,
symbol=order.symbol,
reason=reason,
))
oid=request_msg['oid'],
symbol=request_msg['symbol'],
reason=f'Paper only. No account found: `{account}` ?',
).dict())
continue
reqid = order.reqid or str(uuid.uuid4())
# deliver ack that order has been submitted to broker routing
await ems_order_stream.send(
BrokerdOrderAck(
oid=order.oid,
reqid=reqid,
)
)
# validate
order = BrokerdOrder(**request_msg)
# call our client api to submit the order
reqid = await client.submit_limit(
oid=order.oid,
symbol=f'{order.symbol}.{client.broker}',
symbol=order.symbol,
price=order.price,
action=order.action,
size=order.size,
# XXX: by default 0 tells ``ib_insync`` methods that
# there is no existing order so ask the client to create
# a new one (which it seems to do by allocating an int
# counter - collision prone..)
reqid=reqid,
reqid=order.reqid,
)
log.info(f'Submitted paper LIMIT {reqid}:\n{order}')
case {'action': 'cancel'}:
# deliver ack that order has been submitted to broker routing
await ems_order_stream.send(
BrokerdOrderAck(
# ems order request id
oid=order.oid,
# broker specific request id
reqid=reqid,
).dict()
)
elif action == 'cancel':
msg = BrokerdCancel(**request_msg)
await client.submit_cancel(
reqid=msg.reqid
)
case _:
else:
log.error(f'Unknown order command: {request_msg}')
_reqids: bidict[str, tuple] = {}
_buys: defaultdict[
str, # symbol
bidict[
str, # oid
tuple[float, float, str, str], # order info
]
] = defaultdict(bidict)
_sells: defaultdict[
str, # symbol
bidict[
str, # oid
tuple[float, float, str, str], # order info
]
] = defaultdict(bidict)
_positions: dict[str, Position] = {}
@tractor.context
async def trades_dialogue(
@ -523,59 +451,39 @@ async def trades_dialogue(
loglevel: str = None,
) -> None:
tractor.log.get_console_log(loglevel)
async with (
data.open_feed(
[fqsn],
loglevel=loglevel,
) as feed,
):
pp_msgs: list[BrokerdPosition] = []
pos: Position
token: str # f'{symbol}.{self.broker}'
for token, pos in _positions.items():
pp_msgs.append(BrokerdPosition(
broker=broker,
account='paper',
symbol=pos.symbol.front_fqsn(),
size=pos.size,
avg_price=pos.ppu,
))
# TODO: load paper positions per broker from .toml config file
# and pass as symbol to position data mapping: ``dict[str, dict]``
await ctx.started((
pp_msgs,
['paper'],
))
# await ctx.started(all_positions)
await ctx.started(({}, {'paper',}))
async with (
ctx.open_stream() as ems_stream,
trio.open_nursery() as n,
):
client = PaperBoi(
broker,
ems_stream,
_buys=_buys,
_sells=_sells,
_buys={},
_sells={},
_reqids=_reqids,
_reqids={},
# TODO: load paper positions from ``positions.toml``
_positions=_positions,
# TODO: load postions from ledger file
_trade_ledger={},
_positions={},
)
n.start_soon(
handle_order_requests,
client,
ems_stream,
)
n.start_soon(handle_order_requests, client, ems_stream)
# paper engine simulator clearing task
await simulate_fills(feed.stream, client)
@ -603,7 +511,6 @@ async def open_paperboi(
# (we likely don't need more then one proc for basic
# simulated order clearing)
if portal is None:
log.info('Starting new paper-engine actor')
portal = await tn.start_actor(
service_name,
enable_modules=[__name__]
@ -616,4 +523,5 @@ async def open_paperboi(
loglevel=loglevel,
) as (ctx, first):
yield ctx, first

View File

@ -27,11 +27,7 @@ import tractor
from ..log import get_console_log, get_logger, colorize_json
from ..brokers import get_brokermod
from .._daemon import (
_tractor_kwargs,
_registry_host,
_registry_port,
)
from .._daemon import _tractor_kwargs
from .. import config
@ -43,21 +39,13 @@ DEFAULT_BROKER = 'questrade'
@click.option('--loglevel', '-l', default='warning', help='Logging level')
@click.option('--tl', is_flag=True, help='Enable tractor logging')
@click.option('--pdb', is_flag=True, help='Enable tractor debug mode')
@click.option('--host', '-h', default=None, help='Host addr to bind')
@click.option('--port', '-p', default=None, help='Port number to bind')
@click.option('--host', '-h', default='127.0.0.1', help='Host address to bind')
@click.option(
'--tsdb',
is_flag=True,
help='Enable local ``marketstore`` instance'
)
def pikerd(
loglevel: str,
host: str,
port: int,
tl: bool,
pdb: bool,
tsdb: bool,
):
def pikerd(loglevel, host, tl, pdb, tsdb):
'''
Spawn the piker broker-daemon.
@ -74,21 +62,12 @@ def pikerd(
"\n"
))
reg_addr: None | tuple[str, int] = None
if host or port:
reg_addr = (
host or _registry_host,
int(port) or _registry_port,
)
async def main():
async with (
open_pikerd(
loglevel=loglevel,
debug_mode=pdb,
registry_addr=reg_addr,
), # normally delivers a ``Services`` handle
trio.open_nursery() as n,
):
@ -104,9 +83,9 @@ def pikerd(
)
log.info(
f'`marketstored` up!\n'
f'pid: {pid}\n'
f'container id: {cid[:12]}\n'
f'`marketstore` up!\n'
f'`marketstored` pid: {pid}\n'
f'docker container id: {cid}\n'
f'config: {pformat(config)}'
)
@ -125,19 +104,8 @@ def pikerd(
@click.option('--loglevel', '-l', default='warning', help='Logging level')
@click.option('--tl', is_flag=True, help='Enable tractor logging')
@click.option('--configdir', '-c', help='Configuration directory')
@click.option('--host', '-h', default=None, help='Host addr to bind')
@click.option('--port', '-p', default=None, help='Port number to bind')
@click.pass_context
def cli(
ctx: click.Context,
brokers: list[str],
loglevel: str,
tl: bool,
configdir: str,
host: str,
port: int,
) -> None:
def cli(ctx, brokers, loglevel, tl, configdir):
if configdir is not None:
assert os.path.isdir(configdir), f"`{configdir}` is not a valid path"
config._override_config_dir(configdir)
@ -149,13 +117,6 @@ def cli(
else:
brokermods = [get_brokermod(broker) for broker in brokers]
reg_addr: None | tuple[str, int] = None
if host or port:
reg_addr = (
host or _registry_host,
int(port) or _registry_port,
)
ctx.obj.update({
'brokers': brokers,
'brokermods': brokermods,
@ -164,7 +125,6 @@ def cli(
'log': get_console_log(loglevel),
'confdir': config._config_dir,
'wl_path': config._watchlists_data_path,
'registry_addr': reg_addr,
})
# allow enabling same loglevel in ``tractor`` machinery
@ -178,26 +138,25 @@ def cli(
@click.pass_obj
def services(config, tl, names):
from .._daemon import open_piker_runtime
async def list_services():
async with (
open_piker_runtime(
name='service_query',
loglevel=config['loglevel'] if tl else None,
),
tractor.get_arbiter(
async with tractor.get_arbiter(
*_tractor_kwargs['arbiter_addr']
) as portal
):
) as portal:
registry = await portal.run_from_ns('self', 'get_registry')
json_d = {}
for key, socket in registry.items():
# name, uuid = uid
host, port = socket
json_d[key] = f'{host}:{port}'
click.echo(f"{colorize_json(json_d)}")
trio.run(list_services)
tractor.run(
list_services,
name='service_query',
loglevel=config['loglevel'] if tl else None,
arbiter_addr=_tractor_kwargs['arbiter_addr'],
)
def _load_clis() -> None:

View File

@ -21,7 +21,6 @@ Broker configuration mgmt.
import platform
import sys
import os
from os import path
from os.path import dirname
import shutil
from typing import Optional
@ -112,7 +111,6 @@ if _parent_user:
_conf_names: set[str] = {
'brokers',
'pps',
'trades',
'watchlists',
}
@ -149,21 +147,19 @@ def get_conf_path(
conf_name: str = 'brokers',
) -> str:
'''
Return the top-level default config path normally under
``~/.config/piker`` on linux for a given ``conf_name``, the config
name.
"""Return the default config path normally under
``~/.config/piker`` on linux.
Contains files such as:
- brokers.toml
- pp.toml
- watchlists.toml
- trades.toml
# maybe coming soon ;)
- signals.toml
- strats.toml
'''
"""
assert conf_name in _conf_names
fn = _conf_fn_w_ext(conf_name)
return os.path.join(
@ -177,7 +173,7 @@ def repodir():
Return the abspath to the repo directory.
'''
dirpath = path.abspath(
dirpath = os.path.abspath(
# we're 3 levels down in **this** module file
dirname(dirname(os.path.realpath(__file__)))
)
@ -186,9 +182,7 @@ def repodir():
def load(
conf_name: str = 'brokers',
path: str = None,
**tomlkws,
path: str = None
) -> (dict, str):
'''
@ -196,7 +190,6 @@ def load(
'''
path = path or get_conf_path(conf_name)
if not os.path.isfile(path):
fn = _conf_fn_w_ext(conf_name)
@ -209,11 +202,8 @@ def load(
# if one exists.
if os.path.isfile(template):
shutil.copyfile(template, path)
else:
with open(path, 'w'):
pass # touch
config = toml.load(path, **tomlkws)
config = toml.load(path)
log.debug(f"Read config file {path}")
return config, path
@ -222,7 +212,6 @@ def write(
config: dict, # toml config as dict
name: str = 'brokers',
path: str = None,
**toml_kwargs,
) -> None:
''''
@ -246,14 +235,11 @@ def write(
f"{path}"
)
with open(path, 'w') as cf:
return toml.dump(
config,
cf,
**toml_kwargs,
)
return toml.dump(config, cf)
def load_accounts(
providers: Optional[list[str]] = None
) -> bidict[str, Optional[str]]:

View File

@ -37,13 +37,8 @@ from docker.models.containers import Container as DockerContainer
from docker.errors import (
DockerException,
APIError,
# ContainerError,
)
import requests
from requests.exceptions import (
ConnectionError,
ReadTimeout,
)
from requests.exceptions import ConnectionError, ReadTimeout
from ..log import get_logger, get_console_log
from .. import config
@ -55,8 +50,8 @@ class DockerNotStarted(Exception):
'Prolly you dint start da daemon bruh'
class ApplicationLogError(Exception):
'App in container reported an error in logs'
class ContainerError(RuntimeError):
'Error reported via app-container logging level'
@acm
@ -101,9 +96,9 @@ async def open_docker(
# not perms?
raise
# finally:
# if client:
# client.close()
finally:
if client:
client.close()
class Container:
@ -161,7 +156,7 @@ class Container:
# print(f'level: {level}')
if level in ('error', 'fatal'):
raise ApplicationLogError(msg)
raise ContainerError(msg)
if patt in msg:
return True
@ -190,29 +185,12 @@ class Container:
if 'is not running' in err.explanation:
return False
def hard_kill(self, start: float) -> None:
delay = time.time() - start
# get out the big guns, bc apparently marketstore
# doesn't actually know how to terminate gracefully
# :eyeroll:...
log.error(
f'SIGKILL-ing: {self.cntr.id} after {delay}s\n'
)
self.try_signal('SIGKILL')
self.cntr.wait(
timeout=3,
condition='not-running',
)
async def cancel(
self,
stop_msg: str,
hard_kill: bool = False,
) -> None:
cid = self.cntr.id
# first try a graceful cancel
log.cancel(
f'SIGINT cancelling container: {cid}\n'
@ -221,25 +199,15 @@ class Container:
self.try_signal('SIGINT')
start = time.time()
for _ in range(6):
for _ in range(30):
with trio.move_on_after(0.5) as cs:
log.cancel('polling for CNTR logs...')
try:
cs.shield = True
await self.process_logs_until(stop_msg)
except ApplicationLogError:
hard_kill = True
else:
# if we aren't cancelled on above checkpoint then we
# assume we read the expected stop msg and
# terminated.
break
if cs.cancelled_caught:
# on timeout just try a hard kill after
# a quick container sync-wait.
hard_kill = True
# if we aren't cancelled on above checkpoint then we
# assume we read the expected stop msg and terminated.
break
try:
log.info(f'Polling for container shutdown:\n{cid}')
@ -250,7 +218,6 @@ class Container:
condition='not-running',
)
# graceful exit if we didn't time out
break
except (
@ -262,22 +229,24 @@ class Container:
except (
docker.errors.APIError,
ConnectionError,
requests.exceptions.ConnectionError,
trio.Cancelled,
):
log.exception('Docker connection failure')
self.hard_kill(start)
raise
except trio.Cancelled:
log.exception('trio cancelled...')
self.hard_kill(start)
break
else:
hard_kill = True
delay = time.time() - start
log.error(
f'Failed to kill container {cid} after {delay}s\n'
'sending SIGKILL..'
)
# get out the big guns, bc apparently marketstore
# doesn't actually know how to terminate gracefully
# :eyeroll:...
self.try_signal('SIGKILL')
self.cntr.wait(
timeout=3,
condition='not-running',
)
if hard_kill:
self.hard_kill(start)
else:
log.cancel(f'Container stopped: {cid}')
@ -320,12 +289,14 @@ async def open_ahabd(
))
try:
# TODO: we might eventually want a proxy-style msg-prot here
# to allow remote control of containers without needing
# callers to have root perms?
await trio.sleep_forever()
finally:
with trio.CancelScope(shield=True):
await cntr.cancel(stop_msg)

View File

@ -56,7 +56,7 @@ def iterticks(
sig = (
time,
tick['price'],
tick.get('size')
tick['size']
)
if ttype == 'dark_trade':

View File

@ -37,9 +37,6 @@ if TYPE_CHECKING:
log = get_logger(__name__)
_default_delay_s: float = 1.0
class sampler:
'''
Global sampling engine registry.
@ -107,18 +104,14 @@ async def increment_ohlc_buffer(
# TODO: do we want to support dynamically
# adding a "lower" lowest increment period?
await trio.sleep(ad)
total_s += delay_s
total_s += lowest
# increment all subscribed shm arrays
# TODO:
# - this in ``numba``
# - just lookup shms for this step instead of iterating?
for this_delay_s, shms in sampler.ohlcv_shms.items():
# short-circuit on any not-ready because slower sample
# rate consuming shm buffers.
if total_s % this_delay_s != 0:
# print(f'skipping `{this_delay_s}s` sample update')
for delay_s, shms in sampler.ohlcv_shms.items():
if total_s % delay_s != 0:
continue
# TODO: ``numba`` this!
@ -137,7 +130,7 @@ async def increment_ohlc_buffer(
# this copies non-std fields (eg. vwap) from the last datum
last[
['time', 'volume', 'open', 'high', 'low', 'close']
][0] = (t + this_delay_s, 0, close, close, close, close)
][0] = (t + delay_s, 0, close, close, close, close)
# write to the buffer
shm.push(last)
@ -159,6 +152,7 @@ async def broadcast(
'''
subs = sampler.subscribers.get(delay_s, ())
first = last = -1
if shm is None:
@ -227,8 +221,7 @@ async def iter_ohlc_periods(
async def sample_and_broadcast(
bus: _FeedsBus, # noqa
rt_shm: ShmArray,
hist_shm: ShmArray,
shm: ShmArray,
quote_stream: trio.abc.ReceiveChannel,
brokername: str,
sum_tick_vlm: bool = True,
@ -264,12 +257,8 @@ async def sample_and_broadcast(
last = tick['price']
# more compact inline-way to do this assignment
# to both buffers?
for shm in [rt_shm, hist_shm]:
# update last entry
# benchmarked in the 4-5 us range
# for shm in [rt_shm, hist_shm]:
o, high, low, v = shm.array[-1][
['open', 'high', 'low', 'volume']
]

View File

@ -1,5 +1,5 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
@ -27,14 +27,13 @@ from multiprocessing.shared_memory import SharedMemory, _USE_POSIX
if _USE_POSIX:
from _posixshmem import shm_unlink
# import msgspec
import numpy as np
from numpy.lib import recfunctions as rfn
import tractor
import numpy as np
from pydantic import BaseModel
from numpy.lib import recfunctions as rfn
from ..log import get_logger
from ._source import base_iohlc_dtype
from .types import Struct
log = get_logger(__name__)
@ -50,11 +49,7 @@ _rt_buffer_start = int((_days_worth - 1) * _secs_in_day)
def cuckoff_mantracker():
'''
Disable all ``multiprocessing``` "resource tracking" machinery since
it's an absolute multi-threaded mess of non-SC madness.
'''
from multiprocessing import resource_tracker as mantracker
# Tell the "resource tracker" thing to fuck off.
@ -112,39 +107,36 @@ class SharedInt:
log.warning(f'Shm for {name} already unlinked?')
class _Token(Struct, frozen=True):
class _Token(BaseModel):
'''
Internal represenation of a shared memory "token"
which can be used to key a system wide post shm entry.
'''
class Config:
frozen = True
shm_name: str # this servers as a "key" value
shm_first_index_name: str
shm_last_index_name: str
dtype_descr: tuple
size: int # in struct-array index / row terms
@property
def dtype(self) -> np.dtype:
return np.dtype(list(map(tuple, self.dtype_descr))).descr
def as_msg(self):
return self.to_dict()
return self.dict()
@classmethod
def from_msg(cls, msg: dict) -> _Token:
if isinstance(msg, _Token):
return msg
# TODO: native struct decoding
# return _token_dec.decode(msg)
msg['dtype_descr'] = tuple(map(tuple, msg['dtype_descr']))
return _Token(**msg)
# _token_dec = msgspec.msgpack.Decoder(_Token)
# TODO: this api?
# _known_tokens = tractor.ActorVar('_shm_tokens', {})
# _known_tokens = tractor.ContextStack('_known_tokens', )
@ -163,7 +155,6 @@ def get_shm_token(key: str) -> _Token:
def _make_token(
key: str,
size: int,
dtype: Optional[np.dtype] = None,
) -> _Token:
'''
@ -176,8 +167,7 @@ def _make_token(
shm_name=key,
shm_first_index_name=key + "_first",
shm_last_index_name=key + "_last",
dtype_descr=tuple(np.dtype(dtype).descr),
size=size,
dtype_descr=np.dtype(dtype).descr
)
@ -229,7 +219,6 @@ class ShmArray:
shm_first_index_name=self._first._shm.name,
shm_last_index_name=self._last._shm.name,
dtype_descr=tuple(self._array.dtype.descr),
size=self._len,
)
@property
@ -444,7 +433,7 @@ class ShmArray:
def open_shm_array(
key: Optional[str] = None,
size: int = _default_size, # see above
size: int = _default_size,
dtype: Optional[np.dtype] = None,
readonly: bool = False,
@ -475,8 +464,7 @@ def open_shm_array(
token = _make_token(
key=key,
size=size,
dtype=dtype,
dtype=dtype
)
# create single entry arrays for storing an first and last indices
@ -528,15 +516,15 @@ def open_shm_array(
# "unlink" created shm on process teardown by
# pushing teardown calls onto actor context stack
stack = tractor.current_actor().lifetime_stack
stack.callback(shmarr.close)
stack.callback(shmarr.destroy)
tractor._actor._lifetime_stack.callback(shmarr.close)
tractor._actor._lifetime_stack.callback(shmarr.destroy)
return shmarr
def attach_shm_array(
token: tuple[str, str, tuple[str, str]],
size: int = _default_size,
readonly: bool = True,
) -> ShmArray:
@ -575,7 +563,7 @@ def attach_shm_array(
raise _err
shmarr = np.ndarray(
(token.size,),
(size,),
dtype=token.dtype,
buffer=shm.buf
)
@ -614,8 +602,8 @@ def attach_shm_array(
if key not in _known_tokens:
_known_tokens[key] = token
# "close" attached shm on actor teardown
tractor.current_actor().lifetime_stack.callback(sha.close)
# "close" attached shm on process teardown
tractor._actor._lifetime_stack.callback(sha.close)
return sha
@ -643,7 +631,6 @@ def maybe_open_shm_array(
use ``attach_shm_array``.
'''
size = kwargs.pop('size', _default_size)
try:
# see if we already know this key
token = _known_tokens[key]
@ -651,11 +638,7 @@ def maybe_open_shm_array(
except KeyError:
log.warning(f"Could not find {key} in shms cache")
if dtype:
token = _make_token(
key,
size=size,
dtype=dtype,
)
token = _make_token(key, dtype)
try:
return attach_shm_array(token=token, **kwargs), False
except FileNotFoundError:

View File

@ -23,7 +23,7 @@ import decimal
from bidict import bidict
import numpy as np
from msgspec import Struct
from pydantic import BaseModel
# from numba import from_dtype
@ -126,7 +126,7 @@ def unpack_fqsn(fqsn: str) -> tuple[str, str, str]:
)
class Symbol(Struct):
class Symbol(BaseModel):
'''
I guess this is some kinda container thing for dealing with
all the different meta-data formats from brokers?
@ -152,7 +152,9 @@ class Symbol(Struct):
info: dict[str, Any],
suffix: str = '',
) -> Symbol:
# XXX: like wtf..
# ) -> 'Symbol':
) -> None:
tick_size = info.get('price_tick_size', 0.01)
lot_tick_size = info.get('lot_tick_size', 0.0)
@ -173,7 +175,9 @@ class Symbol(Struct):
fqsn: str,
info: dict[str, Any],
) -> Symbol:
# XXX: like wtf..
# ) -> 'Symbol':
) -> None:
broker, key, suffix = unpack_fqsn(fqsn)
return cls.from_broker_info(
broker,
@ -236,7 +240,7 @@ class Symbol(Struct):
'''
tokens = self.tokens()
fqsn = '.'.join(map(str.lower, tokens))
fqsn = '.'.join(tokens)
return fqsn
def iterfqsns(self) -> list[str]:

View File

@ -19,9 +19,8 @@ ToOlS fOr CoPInG wITh "tHE wEB" protocols.
"""
from contextlib import asynccontextmanager, AsyncExitStack
from itertools import count
from types import ModuleType
from typing import Any, Optional, Callable, AsyncGenerator
from typing import Any, Callable, AsyncGenerator
import json
import trio
@ -36,8 +35,6 @@ from trio_websocket._impl import (
from ..log import get_logger
from .types import Struct
log = get_logger(__name__)
@ -56,11 +53,13 @@ class NoBsWs:
def __init__(
self,
url: str,
token: str,
stack: AsyncExitStack,
fixture: Optional[Callable] = None,
serializer: ModuleType = json
fixture: Callable,
serializer: ModuleType = json,
):
self.url = url
self.token = token
self.fixture = fixture
self._stack = stack
self._ws: 'WebSocketConnection' = None # noqa
@ -83,12 +82,15 @@ class NoBsWs:
self._ws = await self._stack.enter_async_context(
trio_websocket.open_websocket_url(self.url)
)
if self.fixture is not None:
# rerun user code fixture
if self.token == '':
ret = await self._stack.enter_async_context(
self.fixture(self)
)
else:
ret = await self._stack.enter_async_context(
self.fixture(self, self.token)
)
assert ret is None
@ -126,26 +128,21 @@ class NoBsWs:
except self.recon_errors:
await self._connect()
def __aiter__(self):
return self
async def __anext__(self):
return await self.recv_msg()
@asynccontextmanager
async def open_autorecon_ws(
url: str,
# TODO: proper type annot smh
fixture: Optional[Callable] = None,
fixture: Callable,
# used for authenticated websockets
token: str = '',
) -> AsyncGenerator[tuple[...], NoBsWs]:
"""Apparently we can QoS for all sorts of reasons..so catch em.
"""
async with AsyncExitStack() as stack:
ws = NoBsWs(url, stack, fixture=fixture)
ws = NoBsWs(url, token, stack, fixture=fixture)
await ws._connect()
try:
@ -153,86 +150,3 @@ async def open_autorecon_ws(
finally:
await stack.aclose()
'''
JSONRPC response-request style machinery for transparent multiplexing of msgs
over a NoBsWs.
'''
class JSONRPCResult(Struct):
jsonrpc: str = '2.0'
id: int
result: Optional[dict] = None
error: Optional[dict] = None
@asynccontextmanager
async def open_jsonrpc_session(
url: str,
start_id: int = 0,
dtype: type = JSONRPCResult
) -> Callable[[str, dict], dict]:
async with (
trio.open_nursery() as n,
open_autorecon_ws(url) as ws
):
rpc_id: Iterable = count(start_id)
rpc_results: dict[int, dict] = {}
async def json_rpc(method: str, params: dict) -> dict:
'''
perform a json rpc call and wait for the result, raise exception in
case of error field present on response
'''
msg = {
'jsonrpc': '2.0',
'id': next(rpc_id),
'method': method,
'params': params
}
_id = msg['id']
rpc_results[_id] = {
'result': None,
'event': trio.Event()
}
await ws.send_msg(msg)
await rpc_results[_id]['event'].wait()
ret = rpc_results[_id]['result']
del rpc_results[_id]
if ret.error is not None:
raise Exception(json.dumps(ret.error, indent=4))
return ret
async def recv_task():
'''
receives every ws message and stores it in its corresponding result
field, then sets the event to wakeup original sender tasks.
'''
async for msg in ws:
msg = dtype(**msg)
if msg.id not in rpc_results:
log.warning(f'Wasn\'t expecting ws msg: {json.dumps(msg, indent=4)}')
res = rpc_results.setdefault(
msg.id,
{'result': None, 'event': trio.Event()}
)
res['result'] = msg
res['event'].set()
n.start_soon(recv_task)
yield json_rpc
n.cancel_scope.cancel()

File diff suppressed because it is too large Load Diff

View File

@ -37,7 +37,7 @@ import time
from math import isnan
from bidict import bidict
from msgspec.msgpack import encode, decode
import msgpack
import pyqtgraph as pg
import numpy as np
import tractor
@ -56,7 +56,6 @@ if TYPE_CHECKING:
from .feed import maybe_open_feed
from ..log import get_logger, get_console_log
from .._profile import Profiler
log = get_logger(__name__)
@ -388,57 +387,50 @@ class Storage:
async def load(
self,
fqsn: str,
timeframe: int,
) -> tuple[
np.ndarray, # timeframe sampled array-series
dict[int, np.ndarray], # timeframe (in secs) to series
Optional[datetime], # first dt
Optional[datetime], # last dt
]:
first_tsdb_dt, last_tsdb_dt = None, None
hist = await self.read_ohlcv(
tsdb_arrays = await self.read_ohlcv(
fqsn,
# on first load we don't need to pull the max
# history per request size worth.
limit=3000,
timeframe=timeframe,
)
log.info(f'Loaded tsdb history {hist}')
log.info(f'Loaded tsdb history {tsdb_arrays}')
if len(hist):
times = hist['Epoch']
if tsdb_arrays:
fastest = list(tsdb_arrays.values())[0]
times = fastest['Epoch']
first, last = times[0], times[-1]
first_tsdb_dt, last_tsdb_dt = map(
pendulum.from_timestamp, [first, last]
)
return (
hist, # array-data
first_tsdb_dt, # start of query-frame
last_tsdb_dt, # most recent
)
return tsdb_arrays, first_tsdb_dt, last_tsdb_dt
async def read_ohlcv(
self,
fqsn: str,
timeframe: int | str,
timeframe: Optional[Union[int, str]] = None,
end: Optional[int] = None,
limit: int = int(800e3),
) -> dict[
int,
Union[dict, np.ndarray],
) -> tuple[
MarketstoreClient,
Union[dict, np.ndarray]
]:
client = self.client
syms = await client.list_symbols()
if fqsn not in syms:
return {}
# use the provided timeframe or 1s by default
tfstr = tf_in_1s.get(timeframe, tf_in_1s[1])
tfstr = tf_in_1s[1]
params = Params(
symbols=fqsn,
@ -452,68 +444,58 @@ class Storage:
limit=limit,
)
if timeframe is None:
log.info(f'starting {fqsn} tsdb granularity scan..')
# loop through and try to find highest granularity
for tfstr in tf_in_1s.values():
try:
log.info(f'querying for {tfstr}@{fqsn}')
params.set('timeframe', tfstr)
result = await client.query(params)
break
except purerpc.grpclib.exceptions.UnknownError:
# indicate there is no history for this timeframe
# XXX: this is already logged by the container and
# thus shows up through `marketstored` logs relay.
# log.warning(f'{tfstr}@{fqsn} not found')
continue
else:
return {}
else:
result = await client.query(params)
# TODO: it turns out column access on recarrays is actually slower:
# https://jakevdp.github.io/PythonDataScienceHandbook/02.09-structured-data-numpy.html#RecordArrays:-Structured-Arrays-with-a-Twist
# it might make sense to make these structured arrays?
data_set = result.by_symbols()[fqsn]
array = data_set.array
# Fill out a `numpy` array-results map
arrays = {}
for fqsn, data_set in result.by_symbols().items():
arrays.setdefault(fqsn, {})[
tf_in_1s.inverse[data_set.timeframe]
] = data_set.array
# XXX: ensure sample rate is as expected
time = data_set.array['Epoch']
if len(time) > 1:
time_step = time[-1] - time[-2]
ts = tf_in_1s.inverse[data_set.timeframe]
if time_step != ts:
log.warning(
f'MKTS BUG: wrong timeframe loaded: {time_step}'
'YOUR DATABASE LIKELY CONTAINS BAD DATA FROM AN OLD BUG'
f'WIPING HISTORY FOR {ts}s'
)
await self.delete_ts(fqsn, timeframe)
# try reading again..
return await self.read_ohlcv(
fqsn,
timeframe,
end,
limit,
)
return array
return arrays[fqsn][timeframe] if timeframe else arrays[fqsn]
async def delete_ts(
self,
key: str,
timeframe: Optional[Union[int, str]] = None,
fmt: str = 'OHLCV',
) -> bool:
client = self.client
syms = await client.list_symbols()
print(syms)
if key not in syms:
raise KeyError(f'`{key}` table key not found in\n{syms}?')
# if key not in syms:
# raise KeyError(f'`{fqsn}` table key not found?')
tbk = mk_tbk((
key,
tf_in_1s.get(timeframe, tf_in_1s[60]),
fmt,
))
return await client.destroy(tbk=tbk)
return await client.destroy(tbk=key)
async def write_ohlcv(
self,
fqsn: str,
ohlcv: np.ndarray,
timeframe: int,
append_and_duplicate: bool = True,
limit: int = int(800e3),
@ -537,18 +519,17 @@ class Storage:
m, r = divmod(len(mkts_array), limit)
tfkey = tf_in_1s[timeframe]
for i in range(m, 1):
to_push = mkts_array[i-1:i*limit]
# write to db
resp = await self.client.write(
to_push,
tbk=f'{fqsn}/{tfkey}/OHLCV',
tbk=f'{fqsn}/1Sec/OHLCV',
# NOTE: will will append duplicates
# for the same timestamp-index.
# TODO: pre-deduplicate?
# TODO: pre deduplicate?
isvariablelength=append_and_duplicate,
)
@ -567,7 +548,7 @@ class Storage:
# write to db
resp = await self.client.write(
to_push,
tbk=f'{fqsn}/{tfkey}/OHLCV',
tbk=f'{fqsn}/1Sec/OHLCV',
# NOTE: will will append duplicates
# for the same timestamp-index.
@ -596,7 +577,6 @@ class Storage:
# def delete_range(self, start_dt, end_dt) -> None:
# ...
@acm
async def open_storage_client(
fqsn: str,
@ -646,7 +626,7 @@ async def tsdb_history_update(
# * the original data feed arch blurb:
# - https://github.com/pikers/piker/issues/98
#
profiler = Profiler(
profiler = pg.debug.Profiler(
disabled=False, # not pg_profile_enabled(),
delayed=False,
)
@ -662,8 +642,8 @@ async def tsdb_history_update(
):
profiler(f'opened feed for {fqsn}')
# to_append = feed.hist_shm.array
# to_prepend = None
to_append = feed.shm.array
to_prepend = None
if fqsn:
symbol = feed.symbols.get(fqsn)
@ -671,21 +651,21 @@ async def tsdb_history_update(
fqsn = symbol.front_fqsn()
# diff db history with shm and only write the missing portions
# ohlcv = feed.hist_shm.array
ohlcv = feed.shm.array
# TODO: use pg profiler
# for secs in (1, 60):
# tsdb_array = await storage.read_ohlcv(
# fqsn,
# timeframe=timeframe,
# )
# # hist diffing:
# # these aren't currently used but can be referenced from
# # within the embedded ipython shell below.
# to_append = ohlcv[ohlcv['time'] > ts['Epoch'][-1]]
# to_prepend = ohlcv[ohlcv['time'] < ts['Epoch'][0]]
tsdb_arrays = await storage.read_ohlcv(fqsn)
# hist diffing
if tsdb_arrays:
for secs in (1, 60):
ts = tsdb_arrays.get(secs)
if ts is not None and len(ts):
# these aren't currently used but can be referenced from
# within the embedded ipython shell below.
to_append = ohlcv[ohlcv['time'] > ts['Epoch'][-1]]
to_prepend = ohlcv[ohlcv['time'] < ts['Epoch'][0]]
# profiler('Finished db arrays diffs')
profiler('Finished db arrays diffs')
syms = await storage.client.list_symbols()
log.info(f'Existing tsdb symbol set:\n{pformat(syms)}')
@ -794,13 +774,12 @@ async def stream_quotes(
async with open_websocket_url(f'ws://{host}:{port}/ws') as ws:
# send subs topics to server
resp = await ws.send_message(
encode({'streams': list(tbks.values())})
msgpack.dumps({'streams': list(tbks.values())})
)
log.info(resp)
async def recv() -> dict[str, Any]:
return decode((await ws.get_message()), encoding='utf-8')
return msgpack.loads((await ws.get_message()), encoding='utf-8')
streams = (await recv())['streams']
log.info(f"Subscribed to {streams}")

View File

@ -1,87 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Built-in (extension) types.
"""
import sys
from typing import Optional
from pprint import pformat
import msgspec
class Struct(
msgspec.Struct,
# https://jcristharif.com/msgspec/structs.html#tagged-unions
# tag='pikerstruct',
# tag=True,
):
'''
A "human friendlier" (aka repl buddy) struct subtype.
'''
def to_dict(self) -> dict:
return {
f: getattr(self, f)
for f in self.__struct_fields__
}
def __repr__(self):
# only turn on pprint when we detect a python REPL
# at runtime B)
if (
hasattr(sys, 'ps1')
# TODO: check if we're in pdb
):
return self.pformat()
return super().__repr__()
def pformat(self) -> str:
return f'Struct({pformat(self.to_dict())})'
def copy(
self,
update: Optional[dict] = None,
) -> msgspec.Struct:
'''
Validate-typecast all self defined fields, return a copy of us
with all such fields.
This is kinda like the default behaviour in `pydantic.BaseModel`.
'''
if update:
for k, v in update.items():
setattr(self, k, v)
# roundtrip serialize to validate
return msgspec.msgpack.Decoder(
type=type(self)
).decode(
msgspec.msgpack.Encoder().encode(self)
)
def typecast(
self,
# fields: Optional[list[str]] = None,
) -> None:
for fname, ftype in self.__annotations__.items():
setattr(self, fname, ftype(getattr(self, fname)))

View File

@ -78,8 +78,7 @@ class Fsp:
# + the consuming fsp *to* the consumers output
# shm flow.
_flow_registry: dict[
tuple[_Token, str],
tuple[_Token, Optional[ShmArray]],
tuple[_Token, str], _Token,
] = {}
def __init__(
@ -121,6 +120,7 @@ class Fsp:
):
return self.func(*args, **kwargs)
# TODO: lru_cache this? prettty sure it'll work?
def get_shm(
self,
src_shm: ShmArray,
@ -131,27 +131,12 @@ class Fsp:
for this "instance" of a signal processor for
the given ``key``.
The destination shm "token" and array are cached if possible to
minimize multiple stdlib/system calls.
'''
dst_token, maybe_array = self._flow_registry[
dst_token = self._flow_registry[
(src_shm._token, self.name)
]
if maybe_array is None:
self._flow_registry[
(src_shm._token, self.name)
] = (
dst_token,
# "cache" the ``ShmArray`` such that
# we call the underlying "attach" code as few
# times as possible as per:
# - https://github.com/pikers/piker/issues/359
# - https://github.com/pikers/piker/issues/332
maybe_array := attach_shm_array(dst_token)
)
return maybe_array
shm = attach_shm_array(dst_token)
return shm
def fsp(

View File

@ -37,14 +37,12 @@ from .. import data
from ..data import attach_shm_array
from ..data.feed import Feed
from ..data._sharedmem import ShmArray
from ..data._sampling import _default_delay_s
from ..data._source import Symbol
from ._api import (
Fsp,
_load_builtins,
_Token,
)
from .._profile import Profiler
log = get_logger(__name__)
@ -92,7 +90,7 @@ async def fsp_compute(
) -> None:
profiler = Profiler(
profiler = pg.debug.Profiler(
delayed=False,
disabled=True
)
@ -107,7 +105,7 @@ async def fsp_compute(
filter_quotes_by_sym(fqsn, quote_stream),
# XXX: currently the ``ohlcv`` arg
feed.rt_shm,
feed.shm,
)
# Conduct a single iteration of fsp with historical bars input
@ -116,7 +114,7 @@ async def fsp_compute(
dict[str, np.ndarray], # multi-output case
np.ndarray, # single output case
]
history_output = await anext(out_stream)
history_output = await out_stream.__anext__()
func_name = func.__name__
profiler(f'{func_name} generated history')
@ -263,7 +261,7 @@ async def cascade(
destination shm array buffer.
'''
profiler = Profiler(
profiler = pg.debug.Profiler(
delayed=False,
disabled=False
)
@ -286,10 +284,9 @@ async def cascade(
# TODO: ugh i hate this wind/unwind to list over the wire
# but not sure how else to do it.
for (token, fsp_name, dst_token) in shm_registry:
Fsp._flow_registry[(
_Token.from_msg(token),
fsp_name,
)] = _Token.from_msg(dst_token), None
Fsp._flow_registry[
(_Token.from_msg(token), fsp_name)
] = _Token.from_msg(dst_token)
fsp: Fsp = reg.get(
NamespacePath(ns_path)
@ -315,7 +312,7 @@ async def cascade(
profiler(f'{func}: feed up')
assert src.token == feed.rt_shm.token
assert src.token == feed.shm.token
# last_len = new_len = len(src.array)
func_name = func.__name__
@ -377,8 +374,7 @@ async def cascade(
'key': dst_shm_token,
'first': dst._first.value,
'last': dst._last.value,
}
})
}})
return tracker, index
def is_synced(
@ -422,11 +418,7 @@ async def cascade(
# detect sample period step for subscription to increment
# signal
times = src.array['time']
if len(times) > 1:
delay_s = times[-1] - times[times != times[-1]][-1]
else:
# our default "HFT" sample rate.
delay_s = _default_delay_s
# Increment the underlying shared memory buffer on every
# "increment" msg received from the underlying data feed.
@ -437,8 +429,7 @@ async def cascade(
profiler(f'{func_name}: sample stream up')
profiler.finish()
async for i in istream:
# log.runtime(f'FSP incrementing {i}')
async for _ in istream:
# respawn the compute task if the source
# array has been updated such that we compute

View File

@ -1,975 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Personal/Private position parsing, calculating, summarizing in a way
that doesn't try to cuk most humans who prefer to not lose their moneys..
(looking at you `ib` and dirt-bird friends)
'''
from contextlib import contextmanager as cm
from pprint import pformat
import os
from os import path
from math import copysign
import re
import time
from typing import (
Any,
Optional,
Union,
)
import pendulum
from pendulum import datetime, now
import tomli
import toml
from . import config
from .brokers import get_brokermod
from .clearing._messages import BrokerdPosition, Status
from .data._source import Symbol
from .log import get_logger
from .data.types import Struct
log = get_logger(__name__)
@cm
def open_trade_ledger(
broker: str,
account: str,
) -> str:
'''
Indempotently create and read in a trade log file from the
``<configuration_dir>/ledgers/`` directory.
Files are named per broker account of the form
``<brokername>_<accountname>.toml``. The ``accountname`` here is the
name as defined in the user's ``brokers.toml`` config.
'''
ldir = path.join(config._config_dir, 'ledgers')
if not path.isdir(ldir):
os.makedirs(ldir)
fname = f'trades_{broker}_{account}.toml'
tradesfile = path.join(ldir, fname)
if not path.isfile(tradesfile):
log.info(
f'Creating new local trades ledger: {tradesfile}'
)
with open(tradesfile, 'w') as cf:
pass # touch
with open(tradesfile, 'rb') as cf:
start = time.time()
ledger = tomli.load(cf)
print(f'Ledger load took {time.time() - start}s')
cpy = ledger.copy()
try:
yield cpy
finally:
if cpy != ledger:
# TODO: show diff output?
# https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
print(f'Updating ledger for {tradesfile}:\n')
ledger.update(cpy)
# we write on close the mutated ledger data
with open(tradesfile, 'w') as cf:
toml.dump(ledger, cf)
class Transaction(Struct, frozen=True):
# TODO: should this be ``.to`` (see below)?
fqsn: str
tid: Union[str, int] # unique transaction id
size: float
price: float
cost: float # commisions or other additional costs
dt: datetime
expiry: Optional[datetime] = None
# optional key normally derived from the broker
# backend which ensures the instrument-symbol this record
# is for is truly unique.
bsuid: Optional[Union[str, int]] = None
# optional fqsn for the source "asset"/money symbol?
# from: Optional[str] = None
class Position(Struct):
'''
Basic pp (personal/piker position) model with attached clearing
transaction history.
'''
symbol: Symbol
# can be +ve or -ve for long/short
size: float
# "breakeven price" above or below which pnl moves above and below
# zero for the entirety of the current "trade state".
ppu: float
# unique backend symbol id
bsuid: str
split_ratio: Optional[int] = None
# ordered record of known constituent trade messages
clears: dict[
Union[str, int, Status], # trade id
dict[str, Any], # transaction history summaries
] = {}
first_clear_dt: Optional[datetime] = None
expiry: Optional[datetime] = None
def to_dict(self) -> dict:
return {
f: getattr(self, f)
for f in self.__struct_fields__
}
def to_pretoml(self) -> tuple[str, dict]:
'''
Prep this position's data contents for export to toml including
re-structuring of the ``.clears`` table to an array of
inline-subtables for better ``pps.toml`` compactness.
'''
d = self.to_dict()
clears = d.pop('clears')
expiry = d.pop('expiry')
if self.split_ratio is None:
d.pop('split_ratio')
# should be obvious from clears/event table
d.pop('first_clear_dt')
# TODO: we need to figure out how to have one top level
# listing venue here even when the backend isn't providing
# it via the trades ledger..
# drop symbol obj in serialized form
s = d.pop('symbol')
fqsn = s.front_fqsn()
if self.expiry is None:
d.pop('expiry', None)
elif expiry:
d['expiry'] = str(expiry)
toml_clears_list = []
# reverse sort so latest clears are at top of section?
for tid, data in sorted(
list(clears.items()),
# sort by datetime
key=lambda item: item[1]['dt'],
):
inline_table = toml.TomlDecoder().get_empty_inline_table()
# serialize datetime to parsable `str`
inline_table['dt'] = str(data['dt'])
# insert optional clear fields in column order
for k in ['ppu', 'accum_size']:
val = data.get(k)
if val:
inline_table[k] = val
# insert required fields
for k in ['price', 'size', 'cost']:
inline_table[k] = data[k]
inline_table['tid'] = tid
toml_clears_list.append(inline_table)
d['clears'] = toml_clears_list
return fqsn, d
def ensure_state(self) -> None:
'''
Audit either the `.size` and `.ppu` local instance vars against
the clears table calculations and return the calc-ed values if
they differ and log warnings to console.
'''
clears = list(self.clears.values())
self.first_clear_dt = min(list(entry['dt'] for entry in clears))
last_clear = clears[-1]
csize = self.calc_size()
accum = last_clear['accum_size']
if not self.expired():
if (
csize != accum
and csize != round(accum * self.split_ratio or 1)
):
raise ValueError(f'Size mismatch: {csize}')
else:
assert csize == 0, 'Contract is expired but non-zero size?'
if self.size != csize:
log.warning(
'Position state mismatch:\n'
f'{self.size} => {csize}'
)
self.size = csize
cppu = self.calc_ppu()
ppu = last_clear['ppu']
if (
cppu != ppu
and self.split_ratio is not None
# handle any split info entered (for now) manually by user
and cppu != (ppu / self.split_ratio)
):
raise ValueError(f'PPU mismatch: {cppu}')
if self.ppu != cppu:
log.warning(
'Position state mismatch:\n'
f'{self.ppu} => {cppu}'
)
self.ppu = cppu
def update_from_msg(
self,
msg: BrokerdPosition,
) -> None:
# XXX: better place to do this?
symbol = self.symbol
lot_size_digits = symbol.lot_size_digits
ppu, size = (
round(
msg['avg_price'],
ndigits=symbol.tick_size_digits
),
round(
msg['size'],
ndigits=lot_size_digits
),
)
self.ppu = ppu
self.size = size
@property
def dsize(self) -> float:
'''
The "dollar" size of the pp, normally in trading (fiat) unit
terms.
'''
return self.ppu * self.size
# TODO: idea: "real LIFO" dynamic positioning.
# - when a trade takes place where the pnl for
# the (set of) trade(s) is below the breakeven price
# it may be that the trader took a +ve pnl on a short(er)
# term trade in the same account.
# - in this case we could recalc the be price to
# be reverted back to it's prior value before the nearest term
# trade was opened.?
# def lifo_price() -> float:
# ...
def calc_ppu(
self,
# include transaction cost in breakeven price
# and presume the worst case of the same cost
# to exit this transaction (even though in reality
# it will be dynamic based on exit stratetgy).
cost_scalar: float = 2,
) -> float:
'''
Compute the "price-per-unit" price for the given non-zero sized
rolling position.
The recurrence relation which computes this (exponential) mean
per new clear which **increases** the accumulative postiion size
is:
ppu[-1] = (
ppu[-2] * accum_size[-2]
+
ppu[-1] * size
) / accum_size[-1]
where `cost_basis` for the current step is simply the price
* size of the most recent clearing transaction.
'''
asize_h: list[float] = [] # historical accumulative size
ppu_h: list[float] = [] # historical price-per-unit
clears = list(self.clears.items())
for i, (tid, entry) in enumerate(clears):
clear_size = entry['size']
clear_price = entry['price']
last_accum_size = asize_h[-1] if asize_h else 0
accum_size = last_accum_size + clear_size
accum_sign = copysign(1, accum_size)
sign_change: bool = False
if accum_size == 0:
ppu_h.append(0)
asize_h.append(0)
continue
# test if the pp somehow went "passed" a net zero size state
# resulting in a change of the "sign" of the size (+ve for
# long, -ve for short).
sign_change = (
copysign(1, last_accum_size) + accum_sign == 0
and last_accum_size != 0
)
# since we passed the net-zero-size state the new size
# after sum should be the remaining size the new
# "direction" (aka, long vs. short) for this clear.
if sign_change:
clear_size = accum_size
abs_diff = abs(accum_size)
asize_h.append(0)
ppu_h.append(0)
else:
# old size minus the new size gives us size diff with
# +ve -> increase in pp size
# -ve -> decrease in pp size
abs_diff = abs(accum_size) - abs(last_accum_size)
# XXX: LIFO breakeven price update. only an increaze in size
# of the position contributes the breakeven price,
# a decrease does not (i.e. the position is being made
# smaller).
# abs_clear_size = abs(clear_size)
abs_new_size = abs(accum_size)
if abs_diff > 0:
cost_basis = (
# cost basis for this clear
clear_price * abs(clear_size)
+
# transaction cost
accum_sign * cost_scalar * entry['cost']
)
if asize_h:
size_last = abs(asize_h[-1])
cb_last = ppu_h[-1] * size_last
ppu = (cost_basis + cb_last) / abs_new_size
else:
ppu = cost_basis / abs_new_size
ppu_h.append(ppu)
asize_h.append(accum_size)
else:
# on "exit" clears from a given direction,
# only the size changes not the price-per-unit
# need to be updated since the ppu remains constant
# and gets weighted by the new size.
asize_h.append(accum_size)
ppu_h.append(ppu_h[-1])
final_ppu = ppu_h[-1] if ppu_h else 0
# handle any split info entered (for now) manually by user
if self.split_ratio is not None:
final_ppu /= self.split_ratio
return final_ppu
def expired(self) -> bool:
'''
Predicate which checks if the contract/instrument is past its expiry.
'''
return bool(self.expiry) and self.expiry < now()
def calc_size(self) -> float:
'''
Calculate the unit size of this position in the destination
asset using the clears/trade event table; zero if expired.
'''
size: float = 0
# time-expired pps (normally derivatives) are "closed"
# and have a zero size.
if self.expired():
return 0
for tid, entry in self.clears.items():
size += entry['size']
if self.split_ratio is not None:
size = round(size * self.split_ratio)
return size
def minimize_clears(
self,
) -> dict[str, dict]:
'''
Minimize the position's clears entries by removing
all transactions before the last net zero size to avoid
unecessary history irrelevant to the current pp state.
'''
size: float = 0
clears_since_zero: list[tuple(str, dict)] = []
# TODO: we might just want to always do this when iterating
# a ledger? keep a state of the last net-zero and only do the
# full iterate when no state was stashed?
# scan for the last "net zero" position by iterating
# transactions until the next net-zero size, rinse, repeat.
for tid, clear in self.clears.items():
size += clear['size']
clears_since_zero.append((tid, clear))
if size == 0:
clears_since_zero.clear()
self.clears = dict(clears_since_zero)
return self.clears
def add_clear(
self,
t: Transaction,
) -> dict:
'''
Update clearing table and populate rolling ppu and accumulative
size in both the clears entry and local attrs state.
'''
clear = self.clears[t.tid] = {
'cost': t.cost,
'price': t.price,
'size': t.size,
'dt': t.dt,
}
# TODO: compute these incrementally instead
# of re-looping through each time resulting in O(n**2)
# behaviour..?
# NOTE: we compute these **after** adding the entry in order to
# make the recurrence relation math work inside
# ``.calc_size()``.
self.size = clear['accum_size'] = self.calc_size()
self.ppu = clear['ppu'] = self.calc_ppu()
return clear
def sugest_split(self) -> float:
...
class PpTable(Struct):
brokername: str
acctid: str
pps: dict[str, Position]
conf: Optional[dict] = {}
def update_from_trans(
self,
trans: dict[str, Transaction],
cost_scalar: float = 2,
) -> dict[str, Position]:
pps = self.pps
updated: dict[str, Position] = {}
# lifo update all pps from records
for tid, t in trans.items():
pp = pps.setdefault(
t.bsuid,
# if no existing pp, allocate fresh one.
Position(
Symbol.from_fqsn(
t.fqsn,
info={},
),
size=0.0,
ppu=0.0,
bsuid=t.bsuid,
expiry=t.expiry,
)
)
clears = pp.clears
if clears:
first_clear_dt = pp.first_clear_dt
# don't do updates for ledger records we already have
# included in the current pps state.
if (
t.tid in clears
or first_clear_dt and t.dt < first_clear_dt
):
# NOTE: likely you'll see repeats of the same
# ``Transaction`` passed in here if/when you are restarting
# a ``brokerd.ib`` where the API will re-report trades from
# the current session, so we need to make sure we don't
# "double count" these in pp calculations.
continue
# update clearing table
pp.add_clear(t)
updated[t.bsuid] = pp
# minimize clears tables and update sizing.
for bsuid, pp in updated.items():
pp.ensure_state()
return updated
def dump_active(
self,
) -> tuple[
dict[str, Position],
dict[str, Position]
]:
'''
Iterate all tabulated positions, render active positions to
a ``dict`` format amenable to serialization (via TOML) and drop
from state (``.pps``) as well as return in a ``dict`` all
``Position``s which have recently closed.
'''
# NOTE: newly closed position are also important to report/return
# since a consumer, like an order mode UI ;), might want to react
# based on the closure (for example removing the breakeven line
# and clearing the entry from any lists/monitors).
closed_pp_objs: dict[str, Position] = {}
open_pp_objs: dict[str, Position] = {}
pp_objs = self.pps
for bsuid in list(pp_objs):
pp = pp_objs[bsuid]
# XXX: debug hook for size mismatches
# qqqbsuid = 320227571
# if bsuid == qqqbsuid:
# breakpoint()
pp.ensure_state()
if (
# "net-zero" is a "closed" position
pp.size == 0
# time-expired pps (normally derivatives) are "closed"
or (pp.expiry and pp.expiry < now())
):
# for expired cases
pp.size = 0
# NOTE: we DO NOT pop the pp here since it can still be
# used to check for duplicate clears that may come in as
# new transaction from some backend API and need to be
# ignored; the closed positions won't be written to the
# ``pps.toml`` since ``pp_active_entries`` above is what's
# written.
closed_pp_objs[bsuid] = pp
else:
open_pp_objs[bsuid] = pp
return open_pp_objs, closed_pp_objs
def to_toml(
self,
) -> dict[str, Any]:
active, closed = self.dump_active()
# ONLY dict-serialize all active positions; those that are closed
# we don't store in the ``pps.toml``.
to_toml_dict = {}
for bsuid, pos in active.items():
# keep the minimal amount of clears that make up this
# position since the last net-zero state.
pos.minimize_clears()
pos.ensure_state()
# serialize to pre-toml form
fqsn, asdict = pos.to_pretoml()
log.info(f'Updating active pp: {fqsn}')
# XXX: ugh, it's cuz we push the section under
# the broker name.. maybe we need to rethink this?
brokerless_key = fqsn.removeprefix(f'{self.brokername}.')
to_toml_dict[brokerless_key] = asdict
return to_toml_dict
def write_config(self) -> None:
'''
Write the current position table to the user's ``pps.toml``.
'''
# TODO: show diff output?
# https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
print(f'Updating ``pps.toml`` for {path}:\n')
# active, closed_pp_objs = table.dump_active()
pp_entries = self.to_toml()
self.conf[self.brokername][self.acctid] = pp_entries
# TODO: why tf haven't they already done this for inline
# tables smh..
enc = PpsEncoder(preserve=True)
# table_bs_type = type(toml.TomlDecoder().get_empty_inline_table())
enc.dump_funcs[
toml.decoder.InlineTableDict
] = enc.dump_inline_table
config.write(
self.conf,
'pps',
encoder=enc,
)
def load_pps_from_ledger(
brokername: str,
acctname: str,
# post normalization filter on ledger entries to be processed
filter_by: Optional[list[dict]] = None,
) -> tuple[
dict[str, Transaction],
dict[str, Position],
]:
'''
Open a ledger file by broker name and account and read in and
process any trade records into our normalized ``Transaction`` form
and then update the equivalent ``Pptable`` and deliver the two
bsuid-mapped dict-sets of the transactions and pps.
'''
with (
open_trade_ledger(brokername, acctname) as ledger,
open_pps(brokername, acctname) as table,
):
if not ledger:
# null case, no ledger file with content
return {}
mod = get_brokermod(brokername)
src_records: dict[str, Transaction] = mod.norm_trade_records(ledger)
if filter_by:
records = {}
bsuids = set(filter_by)
for tid, r in src_records.items():
if r.bsuid in bsuids:
records[tid] = r
else:
records = src_records
updated = table.update_from_trans(records)
return records, updated
# TODO: instead see if we can hack tomli and tomli-w to do the same:
# - https://github.com/hukkin/tomli
# - https://github.com/hukkin/tomli-w
class PpsEncoder(toml.TomlEncoder):
'''
Special "styled" encoder that makes a ``pps.toml`` redable and
compact by putting `.clears` tables inline and everything else
flat-ish.
'''
separator = ','
def dump_list(self, v):
'''
Dump an inline list with a newline after every element and
with consideration for denoted inline table types.
'''
retval = "[\n"
for u in v:
if isinstance(u, toml.decoder.InlineTableDict):
out = self.dump_inline_table(u)
else:
out = str(self.dump_value(u))
retval += " " + out + "," + "\n"
retval += "]"
return retval
def dump_inline_table(self, section):
"""Preserve inline table in its compact syntax instead of expanding
into subsection.
https://github.com/toml-lang/toml#user-content-inline-table
"""
val_list = []
for k, v in section.items():
# if isinstance(v, toml.decoder.InlineTableDict):
if isinstance(v, dict):
val = self.dump_inline_table(v)
else:
val = str(self.dump_value(v))
val_list.append(k + " = " + val)
retval = "{ " + ", ".join(val_list) + " }"
return retval
def dump_sections(self, o, sup):
retstr = ""
if sup != "" and sup[-1] != ".":
sup += '.'
retdict = self._dict()
arraystr = ""
for section in o:
qsection = str(section)
value = o[section]
if not re.match(r'^[A-Za-z0-9_-]+$', section):
qsection = toml.encoder._dump_str(section)
# arrayoftables = False
if (
self.preserve
and isinstance(value, toml.decoder.InlineTableDict)
):
retstr += (
qsection
+
" = "
+
self.dump_inline_table(o[section])
+
'\n' # only on the final terminating left brace
)
# XXX: this code i'm pretty sure is just blatantly bad
# and/or wrong..
# if isinstance(o[section], list):
# for a in o[section]:
# if isinstance(a, dict):
# arrayoftables = True
# if arrayoftables:
# for a in o[section]:
# arraytabstr = "\n"
# arraystr += "[[" + sup + qsection + "]]\n"
# s, d = self.dump_sections(a, sup + qsection)
# if s:
# if s[0] == "[":
# arraytabstr += s
# else:
# arraystr += s
# while d:
# newd = self._dict()
# for dsec in d:
# s1, d1 = self.dump_sections(d[dsec], sup +
# qsection + "." +
# dsec)
# if s1:
# arraytabstr += ("[" + sup + qsection +
# "." + dsec + "]\n")
# arraytabstr += s1
# for s1 in d1:
# newd[dsec + "." + s1] = d1[s1]
# d = newd
# arraystr += arraytabstr
elif isinstance(value, dict):
retdict[qsection] = o[section]
elif o[section] is not None:
retstr += (
qsection
+
" = "
+
str(self.dump_value(o[section]))
)
# if not isinstance(value, dict):
if not isinstance(value, toml.decoder.InlineTableDict):
# inline tables should not contain newlines:
# https://toml.io/en/v1.0.0#inline-table
retstr += '\n'
else:
raise ValueError(value)
retstr += arraystr
return (retstr, retdict)
@cm
def open_pps(
brokername: str,
acctid: str,
write_on_exit: bool = True,
) -> PpTable:
'''
Read out broker-specific position entries from
incremental update file: ``pps.toml``.
'''
conf, path = config.load('pps')
brokersection = conf.setdefault(brokername, {})
pps = brokersection.setdefault(acctid, {})
# TODO: ideally we can pass in an existing
# pps state to this right? such that we
# don't have to do a ledger reload all the
# time.. a couple ideas I can think of,
# - mirror this in some client side actor which
# does the actual ledger updates (say the paper
# engine proc if we decide to always spawn it?),
# - do diffs against updates from the ledger writer
# actor and the in-mem state here?
pp_objs = {}
table = PpTable(
brokername,
acctid,
pp_objs,
conf=conf,
)
# unmarshal/load ``pps.toml`` config entries into object form
# and update `PpTable` obj entries.
for fqsn, entry in pps.items():
bsuid = entry['bsuid']
# convert clears sub-tables (only in this form
# for toml re-presentation) back into a master table.
clears_list = entry['clears']
# index clears entries in "object" form by tid in a top
# level dict instead of a list (as is presented in our
# ``pps.toml``).
clears = pp_objs.setdefault(bsuid, {})
# TODO: should be make a ``Struct`` for clear/event entries?
# convert "clear events table" from the toml config (list of
# a dicts) and load it into object form for use in position
# processing of new clear events.
trans: list[Transaction] = []
for clears_table in clears_list:
tid = clears_table.pop('tid')
dtstr = clears_table['dt']
dt = pendulum.parse(dtstr)
clears_table['dt'] = dt
trans.append(Transaction(
fqsn=bsuid,
bsuid=bsuid,
tid=tid,
size=clears_table['size'],
price=clears_table['price'],
cost=clears_table['cost'],
dt=dt,
))
clears[tid] = clears_table
size = entry['size']
# TODO: remove but, handle old field name for now
ppu = entry.get('ppu', entry.get('be_price', 0))
split_ratio = entry.get('split_ratio')
expiry = entry.get('expiry')
if expiry:
expiry = pendulum.parse(expiry)
pp = pp_objs[bsuid] = Position(
Symbol.from_fqsn(fqsn, info={}),
size=size,
ppu=ppu,
split_ratio=split_ratio,
expiry=expiry,
bsuid=entry['bsuid'],
)
# XXX: super critical, we need to be sure to include
# all pps.toml clears to avoid reusing clears that were
# already included in the current incremental update
# state, since today's records may have already been
# processed!
for t in trans:
pp.add_clear(t)
# audit entries loaded from toml
pp.ensure_state()
try:
yield table
finally:
if write_on_exit:
table.write_config()
if __name__ == '__main__':
import sys
args = sys.argv
assert len(args) > 1, 'Specifiy account(s) from `brokers.toml`'
args = args[1:]
for acctid in args:
broker, name = acctid.split('.')
trans, updated_pps = load_pps_from_ledger(broker, name)
print(
f'Processing transactions into pps for {broker}:{acctid}\n'
f'{pformat(trans)}\n\n'
f'{pformat(updated_pps)}'
)

View File

@ -32,22 +32,16 @@ def mk_marker_path(
style: str,
) -> QGraphicsPathItem:
'''
Add a marker to be displayed on the line wrapped in
a ``QGraphicsPathItem`` ready to be placed using scene coordinates
(not view).
"""Add a marker to be displayed on the line wrapped in a ``QGraphicsPathItem``
ready to be placed using scene coordinates (not view).
**Arguments**
style String indicating the style of marker to add:
``'<|'``, ``'|>'``, ``'>|'``, ``'|<'``, ``'<|>'``,
``'>|<'``, ``'^'``, ``'v'``, ``'o'``
size Size of the marker in pixels.
This code is taken nearly verbatim from the
`InfiniteLine.addMarker()` method but does not attempt do be aware
of low(er) level graphics controls and expects for the output
polygon to be applied to a ``QGraphicsPathItem``.
'''
"""
path = QtGui.QPainterPath()
if style == 'o':
@ -93,8 +87,7 @@ def mk_marker_path(
class LevelMarker(QGraphicsPathItem):
'''
An arrow marker path graphich which redraws itself
'''An arrow marker path graphich which redraws itself
to the specified view coordinate level on each paint cycle.
'''
@ -111,8 +104,7 @@ class LevelMarker(QGraphicsPathItem):
# get polygon and scale
super().__init__()
# self.setScale(size, size)
self.setScale(size)
self.scale(size, size)
# interally generates path
self._style = None
@ -122,7 +114,6 @@ class LevelMarker(QGraphicsPathItem):
self.get_level = get_level
self._on_paint = on_paint
self.scene_x = lambda: chart.marker_right_points()[1]
self.level: float = 0
self.keep_in_view = keep_in_view
@ -158,9 +149,12 @@ class LevelMarker(QGraphicsPathItem):
def w(self) -> float:
return self.path_br().width()
def position_in_view(self) -> None:
'''
Show a pp off-screen indicator for a level label.
def position_in_view(
self,
# level: float,
) -> None:
'''Show a pp off-screen indicator for a level label.
This is like in fps games where you have a gps "nav" indicator
but your teammate is outside the range of view, except in 2D, on
@ -168,6 +162,7 @@ class LevelMarker(QGraphicsPathItem):
'''
level = self.get_level()
view = self.chart.getViewBox()
vr = view.state['viewRange']
ymn, ymx = vr[1]
@ -191,6 +186,7 @@ class LevelMarker(QGraphicsPathItem):
)
elif level < ymn: # pin to bottom of view
self.setPos(
QPointF(
x,
@ -215,8 +211,7 @@ class LevelMarker(QGraphicsPathItem):
w: QtWidgets.QWidget
) -> None:
'''
Core paint which we override to always update
'''Core paint which we override to always update
our marker position in scene coordinates from a
view cooridnate "level".
@ -240,12 +235,11 @@ def qgo_draw_markers(
right_offset: float,
) -> float:
'''
Paint markers in ``pg.GraphicsItem`` style by first
"""Paint markers in ``pg.GraphicsItem`` style by first
removing the view transform for the painter, drawing the markers
in scene coords, then restoring the view coords.
'''
"""
# paint markers in native coordinate system
orig_tr = p.transform()

View File

@ -78,8 +78,6 @@ async def _async_main(
"""
from . import _display
from ._pg_overrides import _do_overrides
_do_overrides()
godwidget = main_widget
@ -109,8 +107,9 @@ async def _async_main(
# setup search widget and focus main chart view at startup
# search widget is a singleton alongside the godwidget
search = _search.SearchWidget(godwidget=godwidget)
# search.bar.unfocus()
# godwidget.hbox.addWidget(search)
search.bar.unfocus()
godwidget.hbox.addWidget(search)
godwidget.search = search
symbol, _, provider = sym.rpartition('.')
@ -179,6 +178,6 @@ def _main(
run_qtractor(
func=_async_main,
args=(sym, brokernames, piker_loglevel),
main_widget_type=GodWidget,
main_widget=GodWidget,
tractor_kwargs=tractor_kwargs,
)

View File

@ -39,17 +39,12 @@ class Axis(pg.AxisItem):
'''
A better axis that sizes tick contents considering font size.
Also includes tick values lru caching originally proposed in but never
accepted upstream:
https://github.com/pyqtgraph/pyqtgraph/pull/2160
'''
def __init__(
self,
linkedsplits,
typical_max_str: str = '100 000.000',
text_color: str = 'bracket',
lru_cache_tick_strings: bool = True,
**kwargs
) -> None:
@ -96,34 +91,6 @@ class Axis(pg.AxisItem):
# size the pertinent axis dimension to a "typical value"
self.size_to_values()
# NOTE: requires override ``.tickValues()`` method seen below.
if lru_cache_tick_strings:
self.tickStrings = lru_cache(
maxsize=2**20
)(self.tickStrings)
# NOTE: only overriden to cast tick values entries into tuples
# for use with the lru caching.
def tickValues(
self,
minVal: float,
maxVal: float,
size: int,
) -> list[tuple[float, tuple[str]]]:
'''
Repack tick values into tuples for lru caching.
'''
ticks = []
for scalar, values in super().tickValues(minVal, maxVal, size):
ticks.append((
scalar,
tuple(values), # this
))
return ticks
@property
def text_color(self) -> str:
return self._text_color

View File

@ -19,11 +19,7 @@ High level chart-widget apis.
'''
from __future__ import annotations
from typing import (
Iterator,
Optional,
TYPE_CHECKING,
)
from typing import Optional, TYPE_CHECKING
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import (
@ -72,9 +68,6 @@ from ._forms import FieldsForm
from .._profile import pg_profile_enabled, ms_slower_then
from ._overlay import PlotItemOverlay
from ._flows import Flow
from ._search import SearchWidget
from . import _pg_overrides as pgo
from .._profile import Profiler
if TYPE_CHECKING:
from ._display import DisplayState
@ -92,9 +85,6 @@ class GodWidget(QWidget):
modify them.
'''
search: SearchWidget
mode_name: str = 'god'
def __init__(
self,
@ -104,8 +94,6 @@ class GodWidget(QWidget):
super().__init__(parent)
self.search: Optional[SearchWidget] = None
self.hbox = QHBoxLayout(self)
self.hbox.setContentsMargins(0, 0, 0, 0)
self.hbox.setSpacing(6)
@ -127,10 +115,7 @@ class GodWidget(QWidget):
# self.vbox.addLayout(self.hbox)
self._chart_cache: dict[str, LinkedSplits] = {}
self.hist_linked: Optional[LinkedSplits] = None
self.rt_linked: Optional[LinkedSplits] = None
self._active_cursor: Optional[Cursor] = None
self.linkedsplits: Optional[LinkedSplits] = None
# assigned in the startup func `_async_main()`
self._root_n: trio.Nursery = None
@ -138,14 +123,6 @@ class GodWidget(QWidget):
self._widgets: dict[str, QWidget] = {}
self._resizing: bool = False
# TODO: do we need this, when would god get resized
# and the window does not? Never right?!
# self.reg_for_resize(self)
@property
def linkedsplits(self) -> LinkedSplits:
return self.rt_linked
# def init_timeframes_ui(self):
# self.tf_layout = QHBoxLayout()
# self.tf_layout.setSpacing(0)
@ -171,19 +148,19 @@ class GodWidget(QWidget):
def set_chart_symbol(
self,
symbol_key: str, # of form <fqsn>.<providername>
all_linked: tuple[LinkedSplits, LinkedSplits], # type: ignore
linkedsplits: LinkedSplits, # type: ignore
) -> None:
# re-sort org cache symbol list in LIFO order
cache = self._chart_cache
cache.pop(symbol_key, None)
cache[symbol_key] = all_linked
cache[symbol_key] = linkedsplits
def get_chart_symbol(
self,
symbol_key: str,
) -> tuple[LinkedSplits, LinkedSplits]: # type: ignore
) -> LinkedSplits: # type: ignore
return self._chart_cache.get(symbol_key)
async def load_symbol(
@ -205,33 +182,28 @@ class GodWidget(QWidget):
# fully qualified symbol name (SNS i guess is what we're making?)
fqsn = '.'.join([symbol_key, providername])
all_linked = self.get_chart_symbol(fqsn)
linkedsplits = self.get_chart_symbol(fqsn)
order_mode_started = trio.Event()
if not self.vbox.isEmpty():
# XXX: seems to make switching slower?
# qframe = self.hist_linked.chart.qframe
# if qframe.sidepane is self.search:
# qframe.hbox.removeWidget(self.search)
for linked in [self.rt_linked, self.hist_linked]:
# XXX: this is CRITICAL especially with pixel buffer caching
linked.hide()
linked.unfocus()
self.linkedsplits.hide()
self.linkedsplits.unfocus()
# XXX: pretty sure we don't need this
# remove any existing plots?
# XXX: ahh we might want to support cache unloading..
# self.vbox.removeWidget(linked)
# self.vbox.removeWidget(self.linkedsplits)
# switching to a new viewable chart
if all_linked is None or reset:
if linkedsplits is None or reset:
from ._display import display_symbol_data
# we must load a fresh linked charts set
self.rt_linked = rt_charts = LinkedSplits(self)
self.hist_linked = hist_charts = LinkedSplits(self)
linkedsplits = LinkedSplits(self)
# spawn new task to start up and update new sub-chart instances
self._root_n.start_soon(
@ -243,70 +215,43 @@ class GodWidget(QWidget):
order_mode_started,
)
# self.vbox.addWidget(hist_charts)
self.vbox.addWidget(rt_charts)
self.set_chart_symbol(
fqsn,
(hist_charts, rt_charts),
)
for linked in [hist_charts, rt_charts]:
linked.show()
linked.focus()
self.set_chart_symbol(fqsn, linkedsplits)
self.vbox.addWidget(linkedsplits)
linkedsplits.show()
linkedsplits.focus()
await trio.sleep(0)
else:
# symbol is already loaded and ems ready
order_mode_started.set()
self.hist_linked, self.rt_linked = all_linked
for linked in all_linked:
# TODO:
# - we'll probably want per-instrument/provider state here?
# change the order config form over to the new chart
# XXX: since the pp config is a singleton widget we have to
# also switch it over to the new chart's interal-layout
# self.linkedsplits.chart.qframe.hbox.removeWidget(self.pp_pane)
chart = linkedsplits.chart
# chart is already in memory so just focus it
linked.show()
linked.focus()
linked.graphics_cycle()
linkedsplits.show()
linkedsplits.focus()
linkedsplits.graphics_cycle()
await trio.sleep(0)
# resume feeds *after* rendering chart view asap
chart = linked.chart
if chart:
chart.resume_all_feeds()
# TODO: we need a check to see if the chart
# last had the xlast in view, if so then shift so it's
# still in view, if the user was viewing history then
# do nothing yah?
self.rt_linked.chart.default_view()
chart.default_view()
# if a history chart instance is already up then
# set the search widget as its sidepane.
hist_chart = self.hist_linked.chart
if hist_chart:
hist_chart.qframe.set_sidepane(self.search)
# NOTE: this is really stupid/hard to follow.
# we have to reposition the active position nav
# **AFTER** applying the search bar as a sidepane
# to the newly switched to symbol.
await trio.sleep(0)
# TODO: probably stick this in some kinda `LooknFeel` API?
for tracker in self.rt_linked.mode.trackers.values():
pp_nav = tracker.nav
if tracker.live_pp.size:
pp_nav.show()
pp_nav.hide_info()
else:
pp_nav.hide()
# set window titlebar info
symbol = self.rt_linked.symbol
self.linkedsplits = linkedsplits
symbol = linkedsplits.symbol
if symbol is not None:
self.window.setWindowTitle(
f'{symbol.front_fqsn()} '
@ -323,23 +268,11 @@ class GodWidget(QWidget):
'''
# go back to view-mode focus (aka chart focus)
self.clearFocus()
chart = self.rt_linked.chart
if chart:
chart.setFocus()
self.linkedsplits.chart.setFocus()
def reg_for_resize(
self,
widget: QWidget,
) -> None:
getattr(widget, 'on_resize')
self._widgets[widget.mode_name] = widget
def on_win_resize(self, event: QtCore.QEvent) -> None:
def resizeEvent(self, event: QtCore.QEvent) -> None:
'''
Top level god widget handler from window (the real yaweh) resize
events such that any registered widgets which wish to be
notified are invoked using our pythonic `.on_resize()` method
api.
Top level god widget resize handler.
Where we do UX magic to make things not suck B)
@ -355,28 +288,6 @@ class GodWidget(QWidget):
self._resizing = False
# on_resize = on_win_resize
def get_cursor(self) -> Cursor:
return self._active_cursor
def iter_linked(self) -> Iterator[LinkedSplits]:
for linked in [self.hist_linked, self.rt_linked]:
yield linked
def resize_all(self) -> None:
'''
Dynamic resize sequence: adjusts all sub-widgets/charts to
sensible default ratios of what space is detected as available
on the display / window.
'''
rt_linked = self.rt_linked
rt_linked.set_split_sizes()
self.rt_linked.resize_sidepanes()
self.hist_linked.resize_sidepanes(from_linked=rt_linked)
self.search.on_resize()
class ChartnPane(QFrame):
'''
@ -389,9 +300,9 @@ class ChartnPane(QFrame):
https://doc.qt.io/qt-5/qwidget.html#composite-widgets
'''
sidepane: FieldsForm | SearchWidget
sidepane: FieldsForm
hbox: QHBoxLayout
chart: Optional[ChartPlotWidget] = None
chart: Optional['ChartPlotWidget'] = None
def __init__(
self,
@ -403,7 +314,7 @@ class ChartnPane(QFrame):
super().__init__(parent)
self._sidepane = sidepane
self.sidepane = sidepane
self.chart = None
hbox = self.hbox = QHBoxLayout(self)
@ -411,21 +322,6 @@ class ChartnPane(QFrame):
hbox.setContentsMargins(0, 0, 0, 0)
hbox.setSpacing(3)
def set_sidepane(
self,
sidepane: FieldsForm | SearchWidget,
) -> None:
# add sidepane **after** chart; place it on axis side
self.hbox.addWidget(
sidepane,
alignment=Qt.AlignTop
)
self._sidepane = sidepane
def sidepane(self) -> FieldsForm | SearchWidget:
return self._sidepane
class LinkedSplits(QWidget):
'''
@ -460,7 +356,6 @@ class LinkedSplits(QWidget):
self.splitter = QSplitter(QtCore.Qt.Vertical)
self.splitter.setMidLineWidth(0)
self.splitter.setHandleWidth(2)
self.splitter.splitterMoved.connect(self.on_splitter_adjust)
self.layout = QVBoxLayout(self)
self.layout.setContentsMargins(0, 0, 0, 0)
@ -473,16 +368,6 @@ class LinkedSplits(QWidget):
self._symbol: Symbol = None
def on_splitter_adjust(
self,
pos: int,
index: int,
) -> None:
# print(f'splitter moved pos:{pos}, index:{index}')
godw = self.godwidget
if self is godw.rt_linked:
godw.search.on_resize()
def graphics_cycle(self, **kwargs) -> None:
from . import _display
ds = self.display_state
@ -498,31 +383,27 @@ class LinkedSplits(QWidget):
prop: Optional[float] = None,
) -> None:
'''
Set the proportion of space allocated for linked subcharts.
'''Set the proportion of space allocated for linked subcharts.
'''
ln = len(self.subplots) or 1
ln = len(self.subplots)
# proportion allocated to consumer subcharts
if not prop:
prop = 3/8
prop = 3/8*5/8
h = self.height()
histview_h = h * (6/16)
h = h - histview_h
# if ln < 2:
# prop = 3/8*5/8
# elif ln >= 2:
# prop = 3/8
major = 1 - prop
min_h_ind = int((h * prop) / ln)
sizes = [
int(histview_h),
int(h * major),
]
min_h_ind = int((self.height() * prop) / ln)
# give all subcharts the same remaining proportional height
sizes = [int(self.height() * major)]
sizes.extend([min_h_ind] * ln)
if self.godwidget.rt_linked is self:
self.splitter.setSizes(sizes)
def focus(self) -> None:
@ -571,6 +452,13 @@ class LinkedSplits(QWidget):
# add crosshair graphic
self.chart.addItem(self.cursor)
# axis placement
if (
_xaxis_at == 'bottom' and
'bottom' in self.chart.plotItem.axes
):
self.chart.hideAxis('bottom')
# style?
self.chart.setFrameStyle(
QFrame.StyledPanel |
@ -616,15 +504,10 @@ class LinkedSplits(QWidget):
'bottom': xaxis,
}
if sidepane is not False:
parent = qframe = ChartnPane(
qframe = ChartnPane(
sidepane=sidepane,
parent=self.splitter,
)
else:
parent = self.splitter
qframe = None
cpw = ChartPlotWidget(
# this name will be used to register the primary
@ -632,7 +515,7 @@ class LinkedSplits(QWidget):
name=name,
data_key=array_key or name,
parent=parent,
parent=qframe,
linkedsplits=self,
axisItems=axes,
**cpw_kwargs,
@ -640,15 +523,6 @@ class LinkedSplits(QWidget):
cpw.hideAxis('left')
cpw.hideAxis('bottom')
if (
_xaxis_at == 'bottom' and (
self.xaxis_chart
or (
not self.subplots
and self.xaxis_chart is None
)
)
):
if self.xaxis_chart:
self.xaxis_chart.hideAxis('bottom')
@ -657,10 +531,13 @@ class LinkedSplits(QWidget):
# https://github.com/pikers/pyqtgraph/tree/plotitemoverlay_onto_pg_master
# _ = self.xaxis_chart.removeAxis('bottom', unlink=False)
# assert 'bottom' not in self.xaxis_chart.plotItem.axes
self.xaxis_chart = cpw
cpw.showAxis('bottom')
if qframe is not None:
if self.xaxis_chart is None:
self.xaxis_chart = cpw
qframe.chart = cpw
qframe.hbox.addWidget(cpw)
@ -670,15 +547,13 @@ class LinkedSplits(QWidget):
assert cpw.parent() == qframe
# add sidepane **after** chart; place it on axis side
qframe.set_sidepane(sidepane)
# qframe.hbox.addWidget(
# sidepane,
# alignment=Qt.AlignTop
# )
qframe.hbox.addWidget(
sidepane,
alignment=Qt.AlignTop
)
cpw.sidepane = sidepane
cpw.plotItem.vb.linked = self
cpw.plotItem.vb.linkedsplits = self
cpw.setFrameStyle(
QtWidgets.QFrame.StyledPanel
# | QtWidgets.QFrame.Plain
@ -739,8 +614,9 @@ class LinkedSplits(QWidget):
if not _is_main:
# track by name
self.subplots[name] = cpw
if qframe is not None:
self.splitter.addWidget(qframe)
# scale split regions
self.set_split_sizes()
else:
assert style == 'bar', 'main chart must be OHLC'
@ -766,28 +642,19 @@ class LinkedSplits(QWidget):
def resize_sidepanes(
self,
from_linked: Optional[LinkedSplits] = None,
) -> None:
'''
Size all sidepanes based on the OHLC "main" plot and its
sidepane width.
'''
if from_linked:
main_chart = from_linked.chart
else:
main_chart = self.chart
if main_chart and main_chart.sidepane:
if main_chart:
sp_w = main_chart.sidepane.width()
for name, cpw in self.subplots.items():
cpw.sidepane.setMinimumWidth(sp_w)
cpw.sidepane.setMaximumWidth(sp_w)
if from_linked:
self.chart.sidepane.setMinimumWidth(sp_w)
class ChartPlotWidget(pg.PlotWidget):
'''
@ -814,8 +681,7 @@ class ChartPlotWidget(pg.PlotWidget):
# a better one?
def mk_vb(self, name: str) -> ChartView:
cv = ChartView(name)
# link new view to chart's view set
cv.linked = self.linked
cv.linkedsplits = self.linked
return cv
def __init__(
@ -834,7 +700,6 @@ class ChartPlotWidget(pg.PlotWidget):
static_yrange: Optional[tuple[float, float]] = None,
parent=None,
**kwargs,
):
'''
@ -847,20 +712,16 @@ class ChartPlotWidget(pg.PlotWidget):
# NOTE: must be set bfore calling ``.mk_vb()``
self.linked = linkedsplits
self.sidepane: Optional[FieldsForm] = None
# source of our custom interactions
self.cv = cv = self.mk_vb(name)
pi = pgo.PlotItem(viewBox=cv, **kwargs)
super().__init__(
background=hcolor(view_color),
viewBox=cv,
# parent=None,
# plotItem=None,
# antialias=True,
parent=parent,
plotItem=pi,
**kwargs
)
# give viewbox as reference to chart
@ -899,18 +760,9 @@ class ChartPlotWidget(pg.PlotWidget):
self.pi_overlay: PlotItemOverlay = PlotItemOverlay(self.plotItem)
# indempotent startup flag for auto-yrange subsys
# to detect the "first time" y-domain graphics begin
# to be shown in the (main) graphics view.
self._on_screen: bool = False
def resume_all_feeds(self):
try:
for feed in self._feeds.values():
self.linked.godwidget._root_n.start_soon(feed.resume)
except RuntimeError:
# TODO: cancel the qtractor runtime here?
raise
def pause_all_feeds(self):
for feed in self._feeds.values():
@ -1007,9 +859,7 @@ class ChartPlotWidget(pg.PlotWidget):
def default_view(
self,
bars_from_y: int = int(616 * 3/8),
y_offset: int = 0,
do_ds: bool = True,
bars_from_y: int = 3000,
) -> None:
'''
@ -1047,12 +897,8 @@ class ChartPlotWidget(pg.PlotWidget):
# terms now that we've scaled either by user control
# or to the default set of bars as per the immediate block
# above.
if not y_offset:
marker_pos, l1_len = self.pre_l1_xs()
end = xlast + l1_len + 1
else:
end = xlast + y_offset + 1
begin = end - (r - l)
# for debugging
@ -1074,11 +920,8 @@ class ChartPlotWidget(pg.PlotWidget):
max=end,
padding=0,
)
if do_ds:
self.view.maybe_downsample_graphics()
view._set_yrange()
try:
self.linked.graphics_cycle()
except IndexError:
@ -1151,7 +994,7 @@ class ChartPlotWidget(pg.PlotWidget):
axis_side: str = 'right',
axis_kwargs: dict = {},
) -> pgo.PlotItem:
) -> pg.PlotItem:
# Custom viewbox impl
cv = self.mk_vb(name)
@ -1160,14 +1003,13 @@ class ChartPlotWidget(pg.PlotWidget):
allowed_sides = {'left', 'right'}
if axis_side not in allowed_sides:
raise ValueError(f'``axis_side``` must be in {allowed_sides}')
yaxis = PriceAxis(
orientation=axis_side,
linkedsplits=self.linked,
**axis_kwargs,
)
pi = pgo.PlotItem(
pi = pg.PlotItem(
parent=self.plotItem,
name=name,
enableMenu=False,
@ -1180,27 +1022,19 @@ class ChartPlotWidget(pg.PlotWidget):
)
pi.hideButtons()
# cv.enable_auto_yrange(self.view)
cv.enable_auto_yrange()
# compose this new plot's graphics with the current chart's
# existing one but with separate axes as neede and specified.
self.pi_overlay.add_plotitem(
pi,
index=index,
# only link x-axes and
# don't relay any ``ViewBox`` derived event
# handlers since we only care about keeping charts
# x-synced on interaction (at least for now).
# only link x-axes,
link_axes=(0,),
)
# connect auto-yrange callbacks *from* this new
# view **to** this parent and likewise *from* the
# main/parent chart back *to* the created overlay.
cv.enable_auto_yrange(src_vb=self.view)
# makes it so that interaction on the new overlay will reflect
# back on the main chart (which overlay was added to).
self.view.enable_auto_yrange(src_vb=cv)
# add axis title
# TODO: do we want this API to still work?
# raxis = pi.getAxis('right')
@ -1262,7 +1096,7 @@ class ChartPlotWidget(pg.PlotWidget):
# TODO: this probably needs its own method?
if overlay:
if isinstance(overlay, pgo.PlotItem):
if isinstance(overlay, pg.PlotItem):
if overlay not in self.pi_overlay.overlays:
raise RuntimeError(
f'{overlay} must be from `.plotitem_overlay()`'
@ -1421,7 +1255,8 @@ class ChartPlotWidget(pg.PlotWidget):
If ``bars_range`` is provided use that range.
'''
profiler = Profiler(
# print(f'Chart[{self.name}].maxmin()')
profiler = pg.debug.Profiler(
msg=f'`{str(self)}.maxmin(name={name})`: `{self.name}`',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
@ -1452,18 +1287,11 @@ class ChartPlotWidget(pg.PlotWidget):
key = round(lbar), round(rbar)
res = flow.maxmin(*key)
if (
res is None
):
log.warning(
if res == (None, None):
log.error(
f"{flow_key} no mxmn for bars_range => {key} !?"
)
res = 0, 0
if not self._on_screen:
self.default_view(do_ds=False)
self._on_screen = True
profiler(f'yrange mxmn: {key} -> {res}')
# print(f'{flow_key} yrange mxmn: {key} -> {res}')
return res

View File

@ -223,20 +223,14 @@ def ds_m4(
assert frames >= (xrange / uppx)
# call into ``numba``
(
nb,
x_out,
y_out,
ymn,
ymx,
) = _m4(
nb, i_win, y_out = _m4(
x,
y,
frames,
# TODO: see func below..
# x_out,
# i_win,
# y_out,
# first index in x data to start at
@ -249,11 +243,10 @@ def ds_m4(
# filter out any overshoot in the input allocation arrays by
# removing zero-ed tail entries which should start at a certain
# index.
x_out = x_out[x_out != 0]
y_out = y_out[:x_out.size]
i_win = i_win[i_win != 0]
y_out = y_out[:i_win.size]
# print(f'M4 output ymn, ymx: {ymn},{ymx}')
return nb, x_out, y_out, ymn, ymx
return nb, i_win, y_out
@jit(
@ -267,8 +260,8 @@ def _m4(
frames: int,
# TODO: using this approach, having the ``.zeros()`` alloc lines
# below in pure python, there were segs faults and alloc crashes..
# TODO: using this approach by having the ``.zeros()`` alloc lines
# below, in put python was causing segs faults and alloc crashes..
# we might need to see how it behaves with shm arrays and consider
# allocating them once at startup?
@ -281,22 +274,14 @@ def _m4(
x_start: int,
step: float,
) -> tuple[
int,
np.ndarray,
np.ndarray,
float,
float,
]:
'''
Implementation of the m4 algorithm in ``numba``:
http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
) -> int:
# nbins = len(i_win)
# count = len(xs)
'''
# these are pre-allocated and mutated by ``numba``
# code in-place.
y_out = np.zeros((frames, 4), ys.dtype)
x_out = np.zeros(frames, xs.dtype)
i_win = np.zeros(frames, xs.dtype)
bincount = 0
x_left = x_start
@ -310,34 +295,24 @@ def _m4(
# set all bins in the left-most entry to the starting left-most x value
# (aka a row broadcast).
x_out[bincount] = x_left
i_win[bincount] = x_left
# set all y-values to the first value passed in.
y_out[bincount] = ys[0]
# full input y-data mx and mn
mx: float = -np.inf
mn: float = np.inf
# compute OHLC style max / min values per window sized x-frame.
for i in range(len(xs)):
x = xs[i]
y = ys[i]
if x < x_left + step: # the current window "step" is [bin, bin+1)
ymn = y_out[bincount, 1] = min(y, y_out[bincount, 1])
ymx = y_out[bincount, 2] = max(y, y_out[bincount, 2])
y_out[bincount, 1] = min(y, y_out[bincount, 1])
y_out[bincount, 2] = max(y, y_out[bincount, 2])
y_out[bincount, 3] = y
mx = max(mx, ymx)
mn = min(mn, ymn)
else:
# Find the next bin
while x >= x_left + step:
x_left += step
bincount += 1
x_out[bincount] = x_left
i_win[bincount] = x_left
y_out[bincount] = y
return bincount, x_out, y_out, mn, mx
return bincount, i_win, y_out

View File

@ -18,13 +18,8 @@
Mouse interaction graphics
"""
from __future__ import annotations
from functools import partial
from typing import (
Optional,
Callable,
TYPE_CHECKING,
)
from typing import Optional, Callable
import inspect
import numpy as np
@ -41,12 +36,6 @@ from ._style import (
from ._axes import YAxisLabel, XAxisLabel
from ..log import get_logger
if TYPE_CHECKING:
from ._chart import (
ChartPlotWidget,
LinkedSplits,
)
log = get_logger(__name__)
@ -69,7 +58,7 @@ class LineDot(pg.CurvePoint):
curve: pg.PlotCurveItem,
index: int,
plot: ChartPlotWidget, # type: ingore # noqa
plot: 'ChartPlotWidget', # type: ingore # noqa
pos=None,
color: str = 'default_light',
@ -162,7 +151,7 @@ class ContentsLabel(pg.LabelItem):
def __init__(
self,
# chart: ChartPlotWidget, # noqa
# chart: 'ChartPlotWidget', # noqa
view: pg.ViewBox,
anchor_at: str = ('top', 'right'),
@ -255,7 +244,7 @@ class ContentsLabels:
'''
def __init__(
self,
linkedsplits: LinkedSplits, # type: ignore # noqa
linkedsplits: 'LinkedSplits', # type: ignore # noqa
) -> None:
@ -300,7 +289,7 @@ class ContentsLabels:
def add_label(
self,
chart: ChartPlotWidget, # type: ignore # noqa
chart: 'ChartPlotWidget', # type: ignore # noqa
name: str,
anchor_at: tuple[str, str] = ('top', 'left'),
update_func: Callable = ContentsLabel.update_from_value,
@ -327,7 +316,7 @@ class Cursor(pg.GraphicsObject):
def __init__(
self,
linkedsplits: LinkedSplits, # noqa
linkedsplits: 'LinkedSplits', # noqa
digits: int = 0
) -> None:
@ -336,8 +325,6 @@ class Cursor(pg.GraphicsObject):
self.linked = linkedsplits
self.graphics: dict[str, pg.GraphicsObject] = {}
self.xaxis_label: Optional[XAxisLabel] = None
self.always_show_xlabel: bool = True
self.plots: list['PlotChartWidget'] = [] # type: ignore # noqa
self.active_plot = None
self.digits: int = digits
@ -398,7 +385,7 @@ class Cursor(pg.GraphicsObject):
def add_plot(
self,
plot: ChartPlotWidget, # noqa
plot: 'ChartPlotWidget', # noqa
digits: int = 0,
) -> None:
@ -482,7 +469,7 @@ class Cursor(pg.GraphicsObject):
def add_curve_cursor(
self,
plot: ChartPlotWidget, # noqa
plot: 'ChartPlotWidget', # noqa
curve: 'PlotCurveItem', # noqa
) -> LineDot:
@ -504,29 +491,17 @@ class Cursor(pg.GraphicsObject):
log.debug(f"{(action, plot.name)}")
if action == 'Enter':
self.active_plot = plot
plot.linked.godwidget._active_cursor = self
# show horiz line and y-label
self.graphics[plot]['hl'].show()
self.graphics[plot]['yl'].show()
if (
not self.always_show_xlabel
and not self.xaxis_label.isVisible()
):
self.xaxis_label.show()
else: # Leave
# Leave: hide horiz line and y-label
else:
# hide horiz line and y-label
self.graphics[plot]['hl'].hide()
self.graphics[plot]['yl'].hide()
if (
not self.always_show_xlabel
and self.xaxis_label.isVisible()
):
self.xaxis_label.hide()
def mouseMoved(
self,
coords: tuple[QPointF], # noqa
@ -615,10 +590,6 @@ class Cursor(pg.GraphicsObject):
left_axis_width += left.width()
# map back to abs (label-local) coordinates
if (
self.always_show_xlabel
or self.xaxis_label.isVisible()
):
self.xaxis_label.update_label(
abs_pos=(
plot.mapFromView(QPointF(vl_x, iy)) -

View File

@ -44,7 +44,6 @@ from ._style import hcolor
# ds_m4,
# )
from ..log import get_logger
from .._profile import Profiler
log = get_logger(__name__)
@ -332,7 +331,7 @@ class Curve(pg.GraphicsObject):
) -> None:
profiler = Profiler(
profiler = pg.debug.Profiler(
msg=f'Curve.paint(): `{self._name}`',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
@ -467,7 +466,7 @@ class StepCurve(Curve):
def sub_paint(
self,
p: QPainter,
profiler: Profiler,
profiler: pg.debug.Profiler,
) -> None:
# p.drawLines(*tuple(filter(bool, self._last_step_lines)))

View File

@ -21,20 +21,19 @@ this module ties together quote and computational (fsp) streams with
graphics update methods via our custom ``pyqtgraph`` charting api.
'''
from dataclasses import dataclass
from functools import partial
import time
from typing import Optional, Any, Callable
import numpy as np
import tractor
import trio
import pendulum
import pyqtgraph as pg
# from .. import brokers
from ..data.feed import (
open_feed,
Feed,
)
from ..data.types import Struct
from ..data.feed import open_feed
from ._axes import YAxisLabel
from ._chart import (
ChartPlotWidget,
@ -42,36 +41,29 @@ from ._chart import (
GodWidget,
)
from ._l1 import L1Labels
from ._style import hcolor
from ._fsp import (
update_fsp_chart,
start_fsp_displays,
has_vlm,
open_vlm_displays,
)
from ..data._sharedmem import (
ShmArray,
)
from ..data._sharedmem import ShmArray
from ..data._source import tf_in_1s
from ._forms import (
FieldsForm,
mk_order_pane_layout,
)
from .order_mode import (
open_order_mode,
OrderMode,
)
from .order_mode import open_order_mode
from .._profile import (
pg_profile_enabled,
ms_slower_then,
)
from ..log import get_logger
from .._profile import Profiler
log = get_logger(__name__)
# TODO: load this from a config.toml!
_quote_throttle_rate: int = 16 # Hz
_quote_throttle_rate: int = 22 # Hz
# a working tick-type-classes template
@ -113,10 +105,6 @@ def chart_maxmin(
mn, mx = out
mx_vlm_in_view = 0
# TODO: we need to NOT call this to avoid a manual
# np.max/min trigger and especially on the vlm_chart
# flows which aren't shown.. like vlm?
if vlm_chart:
out = vlm_chart.maxmin()
if out:
@ -130,105 +118,39 @@ def chart_maxmin(
)
class DisplayState(Struct):
@dataclass
class DisplayState:
'''
Chart-local real-time graphics state container.
'''
godwidget: GodWidget
quotes: dict[str, Any]
maxmin: Callable
ohlcv: ShmArray
hist_ohlcv: ShmArray
# high level chart handles
linked: LinkedSplits
chart: ChartPlotWidget
vlm_chart: ChartPlotWidget
# axis labels
l1: L1Labels
last_price_sticky: YAxisLabel
hist_last_price_sticky: YAxisLabel
vlm_sticky: YAxisLabel
# misc state tracking
vars: dict[str, Any] = {
'tick_margin': 0,
'i_last': 0,
'i_last_append': 0,
'last_mx_vlm': 0,
'last_mx': 0,
'last_mn': 0,
}
vars: dict[str, Any]
vlm_chart: Optional[ChartPlotWidget] = None
vlm_sticky: Optional[YAxisLabel] = None
wap_in_history: bool = False
def incr_info(
self,
chart: Optional[ChartPlotWidget] = None,
shm: Optional[ShmArray] = None,
state: Optional[dict] = None, # pass in a copy if you don't
update_state: bool = True,
update_uppx: float = 16,
) -> tuple:
shm = shm or self.ohlcv
chart = chart or self.chart
state = state or self.vars
if not update_state:
state = state.copy()
# compute the first available graphic's x-units-per-pixel
uppx = chart.view.x_uppx()
# NOTE: this used to be implemented in a dedicated
# "increment task": ``check_for_new_bars()`` but it doesn't
# make sense to do a whole task switch when we can just do
# this simple index-diff and all the fsp sub-curve graphics
# are diffed on each draw cycle anyway; so updates to the
# "curve" length is already automatic.
# increment the view position by the sample offset.
i_step = shm.index
i_diff = i_step - state['i_last']
state['i_last'] = i_step
append_diff = i_step - state['i_last_append']
# update the "last datum" (aka extending the flow graphic with
# new data) only if the number of unit steps is >= the number of
# such unit steps per pixel (aka uppx). Iow, if the zoom level
# is such that a datum(s) update to graphics wouldn't span
# to a new pixel, we don't update yet.
do_append = (append_diff >= uppx)
if do_append:
state['i_last_append'] = i_step
do_rt_update = uppx < update_uppx
_, _, _, r = chart.bars_range()
liv = r >= i_step
# TODO: pack this into a struct
return (
uppx,
liv,
do_append,
i_diff,
append_diff,
do_rt_update,
)
async def graphics_update_loop(
nurse: trio.Nursery,
godwidget: GodWidget,
feed: Feed,
linked: LinkedSplits,
stream: tractor.MsgStream,
ohlcv: np.ndarray,
wap_in_history: bool = False,
vlm_chart: Optional[ChartPlotWidget] = None,
@ -249,29 +171,22 @@ async def graphics_update_loop(
# of copying it from last bar's close
# - 1-5 sec bar lookback-autocorrection like tws does?
# (would require a background history checker task)
linked: LinkedSplits = godwidget.rt_linked
display_rate = godwidget.window.current_screen().refreshRate()
display_rate = linked.godwidget.window.current_screen().refreshRate()
fast_chart = linked.chart
hist_chart = godwidget.hist_linked.chart
ohlcv = feed.rt_shm
hist_ohlcv = feed.hist_shm
chart = linked.chart
# update last price sticky
last_price_sticky = fast_chart._ysticks[fast_chart.name]
last_price_sticky = chart._ysticks[chart.name]
last_price_sticky.update_from_data(
*ohlcv.array[-1][['index', 'close']]
)
hist_last_price_sticky = hist_chart._ysticks[hist_chart.name]
hist_last_price_sticky.update_from_data(
*hist_ohlcv.array[-1][['index', 'close']]
)
if vlm_chart:
vlm_sticky = vlm_chart._ysticks['volume']
maxmin = partial(
chart_maxmin,
fast_chart,
chart,
ohlcv,
vlm_chart,
)
@ -285,15 +200,15 @@ async def graphics_update_loop(
last, volume = ohlcv.array[-1][['close', 'volume']]
symbol = fast_chart.linked.symbol
symbol = chart.linked.symbol
l1 = L1Labels(
fast_chart,
chart,
# determine precision/decimal lengths
digits=symbol.tick_size_digits,
size_digits=symbol.lot_size_digits,
)
fast_chart._l1_labels = l1
chart._l1_labels = l1
# TODO:
# - in theory we should be able to read buffer data faster
@ -303,22 +218,46 @@ async def graphics_update_loop(
# levels this might be dark volume we need to
# present differently -> likely dark vlm
tick_size = fast_chart.linked.symbol.tick_size
tick_size = chart.linked.symbol.tick_size
tick_margin = 3 * tick_size
fast_chart.show()
chart.show()
# view = chart.view
last_quote = time.time()
i_last = ohlcv.index
# async def iter_drain_quotes():
# # NOTE: all code below this loop is expected to be synchronous
# # and thus draw instructions are not picked up jntil the next
# # wait / iteration.
# async for quotes in stream:
# while True:
# try:
# moar = stream.receive_nowait()
# except trio.WouldBlock:
# yield quotes
# break
# else:
# for sym, quote in moar.items():
# ticks_frame = quote.get('ticks')
# if ticks_frame:
# quotes[sym].setdefault(
# 'ticks', []).extend(ticks_frame)
# print('pulled extra')
# yield quotes
# async for quotes in iter_drain_quotes():
ds = linked.display_state = DisplayState(**{
'godwidget': godwidget,
'quotes': {},
'linked': linked,
'maxmin': maxmin,
'ohlcv': ohlcv,
'hist_ohlcv': hist_ohlcv,
'chart': fast_chart,
'chart': chart,
'last_price_sticky': last_price_sticky,
'hist_last_price_sticky': hist_last_price_sticky,
'vlm_chart': vlm_chart,
'vlm_sticky': vlm_sticky,
'l1': l1,
'vars': {
@ -331,69 +270,9 @@ async def graphics_update_loop(
}
})
if vlm_chart:
vlm_sticky = vlm_chart._ysticks['volume']
ds.vlm_chart = vlm_chart
ds.vlm_sticky = vlm_sticky
fast_chart.default_view()
# TODO: probably factor this into some kinda `DisplayState`
# API that can be reused at least in terms of pulling view
# params (eg ``.bars_range()``).
async def increment_history_view():
i_last = hist_ohlcv.index
state = ds.vars.copy() | {
'i_last_append': i_last,
'i_last': i_last,
}
_, hist_step_size_s, _ = feed.get_ds_info()
async with feed.index_stream(
# int(hist_step_size_s)
# TODO: seems this is more reliable at keeping the slow
# chart incremented in view more correctly?
# - It might make sense to just inline this logic with the
# main display task? => it's a tradeoff of slower task
# wakeups/ctx switches verus logic checks (as normal)
# - we need increment logic that only does the view shift
# call when the uppx permits/needs it
int(1),
) as istream:
async for msg in istream:
# check if slow chart needs an x-domain shift and/or
# y-range resize.
(
uppx,
liv,
do_append,
i_diff,
append_diff,
do_rt_update,
) = ds.incr_info(
chart=hist_chart,
shm=ds.hist_ohlcv,
state=state,
# update_state=False,
)
# print(
# f'liv: {liv}\n'
# f'do_append: {do_append}\n'
# f'append_diff: {append_diff}\n'
# )
if (
do_append
and liv
):
hist_chart.increment_view(steps=i_diff)
hist_chart.view._set_yrange(yrange=hist_chart.maxmin())
nurse.start_soon(increment_history_view)
chart.default_view()
# main real-time quotes update loop
stream: tractor.MsgStream = feed.stream
async for quotes in stream:
ds.quotes = quotes
@ -413,16 +292,15 @@ async def graphics_update_loop(
last_quote = time.time()
# chart isn't active/shown so skip render cycle and pause feed(s)
if fast_chart.linked.isHidden():
# print('skipping update')
fast_chart.pause_all_feeds()
if chart.linked.isHidden():
chart.pause_all_feeds()
continue
# ic = fast_chart.view._ic
# if ic:
# fast_chart.pause_all_feeds()
# await ic.wait()
# fast_chart.resume_all_feeds()
ic = chart.view._ic
if ic:
chart.pause_all_feeds()
await ic.wait()
chart.resume_all_feeds()
# sync call to update all graphics/UX components.
graphics_update_cycle(ds)
@ -439,10 +317,8 @@ def graphics_update_cycle(
# hopefully XD
chart = ds.chart
# TODO: just pass this as a direct ref to avoid so many attr accesses?
hist_chart = ds.godwidget.hist_linked.chart
profiler = Profiler(
profiler = pg.debug.Profiler(
msg=f'Graphics loop cycle for: `{chart.name}`',
delayed=True,
disabled=not pg_profile_enabled(),
@ -454,24 +330,53 @@ def graphics_update_cycle(
# unpack multi-referenced components
vlm_chart = ds.vlm_chart
# rt "HFT" chart
l1 = ds.l1
ohlcv = ds.ohlcv
array = ohlcv.array
vars = ds.vars
tick_margin = vars['tick_margin']
update_uppx = 16
for sym, quote in ds.quotes.items():
(
uppx,
liv,
do_append,
i_diff,
append_diff,
do_rt_update,
) = ds.incr_info()
# compute the first available graphic's x-units-per-pixel
uppx = vlm_chart.view.x_uppx()
# NOTE: vlm may be written by the ``brokerd`` backend
# event though a tick sample is not emitted.
# TODO: show dark trades differently
# https://github.com/pikers/piker/issues/116
# NOTE: this used to be implemented in a dedicated
# "increment task": ``check_for_new_bars()`` but it doesn't
# make sense to do a whole task switch when we can just do
# this simple index-diff and all the fsp sub-curve graphics
# are diffed on each draw cycle anyway; so updates to the
# "curve" length is already automatic.
# increment the view position by the sample offset.
i_step = ohlcv.index
i_diff = i_step - vars['i_last']
vars['i_last'] = i_step
append_diff = i_step - vars['i_last_append']
# update the "last datum" (aka extending the flow graphic with
# new data) only if the number of unit steps is >= the number of
# such unit steps per pixel (aka uppx). Iow, if the zoom level
# is such that a datum(s) update to graphics wouldn't span
# to a new pixel, we don't update yet.
do_append = (append_diff >= uppx)
if do_append:
vars['i_last_append'] = i_step
do_rt_update = uppx < update_uppx
# print(
# f'append_diff:{append_diff}\n'
# f'uppx:{uppx}\n'
# f'do_append: {do_append}'
# )
# TODO: we should only run mxmn when we know
# an update is due via ``do_append`` above.
@ -487,6 +392,8 @@ def graphics_update_cycle(
profiler('`ds.maxmin()` call')
liv = r >= i_step # the last datum is in view
if (
prepend_update_index is not None
and lbar > prepend_update_index
@ -501,11 +408,18 @@ def graphics_update_cycle(
# don't real-time "shift" the curve to the
# left unless we get one of the following:
if (
(do_append and liv)
(
# i_diff > 0 # no new sample step
do_append
# and uppx < 4 # chart is zoomed out very far
and liv
)
or trigger_all
):
# TODO: we should track and compute whether the last
# pixel in a curve should show new data based on uppx
# and then iff update curves and shift?
chart.increment_view(steps=i_diff)
chart.view._set_yrange(yrange=(mn, mx))
if vlm_chart:
vlm_chart.increment_view(steps=i_diff)
@ -563,10 +477,7 @@ def graphics_update_cycle(
):
chart.update_graphics_from_flow(
chart.name,
do_append=do_append,
)
hist_chart.update_graphics_from_flow(
chart.name,
# do_append=uppx < update_uppx,
do_append=do_append,
)
@ -606,9 +517,6 @@ def graphics_update_cycle(
ds.last_price_sticky.update_from_data(
*end[['index', 'close']]
)
ds.hist_last_price_sticky.update_from_data(
*end[['index', 'close']]
)
if wap_in_history:
# update vwap overlay line
@ -656,12 +564,10 @@ def graphics_update_cycle(
l1.bid_label.update_fields({'level': price, 'size': size})
# check for y-range re-size
if (mx > vars['last_mx']) or (mn < vars['last_mn']):
# fast chart resize case
if (
liv
(mx > vars['last_mx']) or (mn < vars['last_mn'])
and not chart._static_yrange == 'axis'
and liv
):
main_vb = chart.view
if (
@ -679,22 +585,6 @@ def graphics_update_cycle(
yrange=(mn, mx),
)
# check if slow chart needs a resize
(
_,
hist_liv,
_,
_,
_,
_,
) = ds.incr_info(
chart=hist_chart,
shm=ds.hist_ohlcv,
update_state=False,
)
if hist_liv:
hist_chart.view._set_yrange(yrange=hist_chart.maxmin())
# XXX: update this every draw cycle to make L1-always-in-view work.
vars['last_mx'], vars['last_mn'] = mx, mn
@ -810,140 +700,6 @@ def graphics_update_cycle(
flow.draw_last(array_key=curve_name)
async def link_views_with_region(
rt_chart: ChartPlotWidget,
hist_chart: ChartPlotWidget,
feed: Feed,
) -> None:
# these value are be only pulled once during shm init/startup
izero_hist = feed.izero_hist
izero_rt = feed.izero_rt
# Add the LinearRegionItem to the ViewBox, but tell the ViewBox
# to exclude this item when doing auto-range calculations.
rt_pi = rt_chart.plotItem
hist_pi = hist_chart.plotItem
region = pg.LinearRegionItem(
movable=False,
# color scheme that matches sidepane styling
pen=pg.mkPen(hcolor('gunmetal')),
brush=pg.mkBrush(hcolor('default_darkest')),
)
region.setZValue(10) # put linear region "in front" in layer terms
hist_pi.addItem(region, ignoreBounds=True)
flow = rt_chart._flows[hist_chart.name]
assert flow
# XXX: no idea why this doesn't work but it's causing
# a weird placement of the region on the way-far-left..
# region.setClipItem(flow.graphics)
# poll for datums load and timestep detection
for _ in range(100):
try:
_, _, ratio = feed.get_ds_info()
break
except IndexError:
await trio.sleep(0.01)
continue
else:
raise RuntimeError(
'Failed to detect sampling periods from shm!?')
# sampling rate transform math:
# -----------------------------
# define the fast chart to slow chart as a linear mapping
# over the fast index domain `i` to the slow index domain
# `j` as:
#
# j = i - i_offset
# ------------ + j_offset
# j/i
#
# conversely the inverse function is:
#
# i = j/i * (j - j_offset) + i_offset
#
# Where `j_offset` is our ``izero_hist`` and `i_offset` is our
# `izero_rt`, the ``ShmArray`` offsets which correspond to the
# indexes in each array where the "current" time is indexed at init.
# AKA the index where new data is "appended to" and historical data
# if "prepended from".
#
# more practically (and by default) `i` is normally an index
# into 1s samples and `j` is an index into 60s samples (aka 1m).
# in the below handlers ``ratio`` is the `j/i` and ``mn``/``mx``
# are the low and high index input from the source index domain.
def update_region_from_pi(
window,
viewRange: tuple[tuple, tuple],
is_manual: bool = True,
) -> None:
# put linear region "in front" in layer terms
region.setZValue(10)
# set the region on the history chart
# to the range currently viewed in the
# HFT/real-time chart.
mn, mx = viewRange[0]
ds_mn = (mn - izero_rt)/ratio
ds_mx = (mx - izero_rt)/ratio
lhmn = ds_mn + izero_hist
lhmx = ds_mx + izero_hist
# print(
# f'rt_view_range: {(mn, mx)}\n'
# f'ds_mn, ds_mx: {(ds_mn, ds_mx)}\n'
# f'lhmn, lhmx: {(lhmn, lhmx)}\n'
# )
region.setRegion((
lhmn,
lhmx,
))
# TODO: if we want to have the slow chart adjust range to
# match the fast chart's selection -> results in the
# linear region expansion never can go "outside of view".
# hmn, hmx = hvr = hist_chart.view.state['viewRange'][0]
# print((hmn, hmx))
# if (
# hvr
# and (lhmn < hmn or lhmx > hmx)
# ):
# hist_pi.setXRange(
# lhmn,
# lhmx,
# padding=0,
# )
# hist_linked.graphics_cycle()
# connect region to be updated on plotitem interaction.
rt_pi.sigRangeChanged.connect(update_region_from_pi)
def update_pi_from_region():
region.setZValue(10)
mn, mx = region.getRegion()
# print(f'region_x: {(mn, mx)}')
rt_pi.setXRange(
((mn - izero_hist) * ratio) + izero_rt,
((mx - izero_hist) * ratio) + izero_rt,
padding=0,
)
# TODO BUG XXX: seems to cause a real perf hit and a recursion error
# (but used to work before generalizing for 1s ohlc offset?)..
# something to do with the label callback handlers?
# region.sigRegionChanged.connect(update_pi_from_region)
# region.sigRegionChangeFinished.connect(update_pi_from_region)
async def display_symbol_data(
godwidget: GodWidget,
provider: str,
@ -985,13 +741,15 @@ async def display_symbol_data(
tick_throttle=_quote_throttle_rate,
) as feed:
ohlcv: ShmArray = feed.rt_shm
hist_ohlcv: ShmArray = feed.hist_shm
ohlcv: ShmArray = feed.shm
bars = ohlcv.array
symbol = feed.symbols[sym]
fqsn = symbol.front_fqsn()
step_size_s = 1
times = bars['time']
end = pendulum.from_timestamp(times[-1])
start = pendulum.from_timestamp(times[times != times[-1]][-1])
step_size_s = (end - start).seconds
tf_key = tf_in_1s[step_size_s]
# load in symbol's ohlc data
@ -1001,48 +759,33 @@ async def display_symbol_data(
f'step:{tf_key} '
)
rt_linked = godwidget.rt_linked
rt_linked._symbol = symbol
# create top history view chart above the "main rt chart".
hist_linked = godwidget.hist_linked
hist_linked._symbol = symbol
hist_chart = hist_linked.plot_ohlc_main(
symbol,
feed.hist_shm,
# in the case of history chart we explicitly set `False`
# to avoid internal pane creation.
# sidepane=False,
sidepane=godwidget.search,
)
# don't show when not focussed
hist_linked.cursor.always_show_xlabel = False
linked = godwidget.linkedsplits
linked._symbol = symbol
# generate order mode side-pane UI
# A ``FieldsForm`` form to configure order entry
# and add as next-to-y-axis singleton pane
pp_pane: FieldsForm = mk_order_pane_layout(godwidget)
# add as next-to-y-axis singleton pane
godwidget.pp_pane = pp_pane
# create main OHLC chart
chart = rt_linked.plot_ohlc_main(
chart = linked.plot_ohlc_main(
symbol,
ohlcv,
# in the case of history chart we explicitly set `False`
# to avoid internal pane creation.
sidepane=pp_pane,
)
chart.default_view()
chart._feeds[symbol.key] = feed
chart.setFocus()
# XXX: FOR SOME REASON THIS IS CAUSING HANGZ!?!
# plot historical vwap if available
wap_in_history = False
# if (
# brokermod._show_wap_in_history
# and 'bar_wap' in bars.dtype.fields
# ):
# XXX: FOR SOME REASON THIS IS CAUSING HANGZ!?!
# if brokermod._show_wap_in_history:
# if 'bar_wap' in bars.dtype.fields:
# wap_in_history = True
# chart.draw_curve(
# name='bar_wap',
@ -1051,34 +794,24 @@ async def display_symbol_data(
# add_label=False,
# )
# size view to data once at outset
chart.cv._set_yrange()
# NOTE: we must immediately tell Qt to show the OHLC chart
# to avoid a race where the subplots get added/shown to
# the linked set *before* the main price chart!
rt_linked.show()
rt_linked.focus()
linked.show()
linked.focus()
await trio.sleep(0)
# NOTE: here we insert the slow-history chart set into
# the fast chart's splitter -> so it's a splitter of charts
# inside the first widget slot of a splitter of charts XD
rt_linked.splitter.insertWidget(0, hist_linked)
# XXX: if we wanted it at the bottom?
# rt_linked.splitter.addWidget(hist_linked)
rt_linked.focus()
godwidget.resize_all()
vlm_chart: Optional[ChartPlotWidget] = None
async with trio.open_nursery() as ln:
# if available load volume related built-in display(s)
if (
not symbol.broker_info[provider].get('no_vlm', False)
and has_vlm(ohlcv)
):
if has_vlm(ohlcv):
vlm_chart = await ln.start(
open_vlm_displays,
rt_linked,
linked,
ohlcv,
)
@ -1086,7 +819,7 @@ async def display_symbol_data(
# from an input config.
ln.start_soon(
start_fsp_displays,
rt_linked,
linked,
ohlcv,
loading_sym_key,
loglevel,
@ -1095,79 +828,36 @@ async def display_symbol_data(
# start graphics update loop after receiving first live quote
ln.start_soon(
graphics_update_loop,
ln,
godwidget,
feed,
linked,
feed.stream,
ohlcv,
wap_in_history,
vlm_chart,
)
await trio.sleep(0)
# size view to data prior to order mode init
chart.default_view()
rt_linked.graphics_cycle()
await trio.sleep(0)
hist_chart.default_view(
bars_from_y=int(len(hist_ohlcv.array)), # size to data
y_offset=6116*2, # push it a little away from the y-axis
)
hist_linked.graphics_cycle()
await trio.sleep(0)
godwidget.resize_all()
await link_views_with_region(
chart,
hist_chart,
feed,
)
mode: OrderMode
async with (
open_order_mode(
feed,
godwidget,
chart,
fqsn,
order_mode_started
) as mode
)
):
if not vlm_chart:
# trigger another view reset if no sub-chart
chart.default_view()
rt_linked.mode = mode
# let Qt run to render all widgets and make sure the
# sidepanes line up vertically.
await trio.sleep(0)
linked.resize_sidepanes()
# dynamic resize steps
godwidget.resize_all()
# TODO: look into this because not sure why it was
# commented out / we ever needed it XD
# NOTE: we pop the volume chart from the subplots set so
# that it isn't double rendered in the display loop
# above since we do a maxmin calc on the volume data to
# determine if auto-range adjustements should be made.
# rt_linked.subplots.pop('volume', None)
# linked.subplots.pop('volume', None)
# TODO: make this not so shit XD
# close group status
sbar._status_groups[loading_sym_key][1]()
hist_linked.graphics_cycle()
await trio.sleep(0)
bars_in_mem = int(len(hist_ohlcv.array))
hist_chart.default_view(
bars_from_y=bars_in_mem, # size to data
# push it 1/16th away from the y-axis
y_offset=round(bars_in_mem / 16),
)
godwidget.resize_all()
# let the app run.. bby
# linked.graphics_cycle()
await trio.sleep_forever()

View File

@ -18,27 +18,11 @@
Higher level annotation editors.
"""
from __future__ import annotations
from collections import defaultdict
from typing import (
Optional,
TYPE_CHECKING
)
from dataclasses import dataclass, field
from typing import Optional
import pyqtgraph as pg
from pyqtgraph import (
ViewBox,
Point,
QtCore,
QtWidgets,
)
from PyQt5.QtGui import (
QColor,
)
from PyQt5.QtWidgets import (
QLabel,
)
from pyqtgraph import ViewBox, Point, QtCore, QtGui
from pyqtgraph import functions as fn
from PyQt5.QtCore import QPointF
import numpy as np
@ -46,34 +30,28 @@ import numpy as np
from ._style import hcolor, _font
from ._lines import LevelLine
from ..log import get_logger
from ..data.types import Struct
if TYPE_CHECKING:
from ._chart import GodWidget
log = get_logger(__name__)
class ArrowEditor(Struct):
@dataclass
class ArrowEditor:
godw: GodWidget = None # type: ignore # noqa
_arrows: dict[str, list[pg.ArrowItem]] = {}
chart: 'ChartPlotWidget' # noqa
_arrows: field(default_factory=dict)
def add(
self,
plot: pg.PlotItem,
uid: str,
x: float,
y: float,
color='default',
pointing: Optional[str] = None,
) -> pg.ArrowItem:
'''
Add an arrow graphic to view at given (x, y).
"""Add an arrow graphic to view at given (x, y).
'''
"""
angle = {
'up': 90,
'down': -90,
@ -96,25 +74,25 @@ class ArrowEditor(Struct):
brush=pg.mkBrush(hcolor(color)),
)
arrow.setPos(x, y)
self._arrows.setdefault(uid, []).append(arrow)
self._arrows[uid] = arrow
# render to view
plot.addItem(arrow)
self.chart.plotItem.addItem(arrow)
return arrow
def remove(self, arrow) -> bool:
for linked in self.godw.iter_linked():
linked.chart.plotItem.removeItem(arrow)
self.chart.plotItem.removeItem(arrow)
class LineEditor(Struct):
'''
The great editor of linez.
@dataclass
class LineEditor:
'''The great editor of linez.
'''
godw: GodWidget = None # type: ignore # noqa
_order_lines: defaultdict[str, LevelLine] = defaultdict(list)
chart: 'ChartPlotWidget' = None # type: ignore # noqa
_order_lines: dict[str, LevelLine] = field(default_factory=dict)
_active_staged_line: LevelLine = None
def stage_line(
@ -122,11 +100,11 @@ class LineEditor(Struct):
line: LevelLine,
) -> LevelLine:
'''
Stage a line at the current chart's cursor position
"""Stage a line at the current chart's cursor position
and return it.
'''
"""
# add a "staged" cursor-tracking line to view
# and cash it in a a var
if self._active_staged_line:
@ -137,25 +115,17 @@ class LineEditor(Struct):
return line
def unstage_line(self) -> LevelLine:
'''
Inverse of ``.stage_line()``.
"""Inverse of ``.stage_line()``.
'''
cursor = self.godw.get_cursor()
if not cursor:
return None
"""
# chart = self.chart._cursor.active_plot
# # chart.setCursor(QtCore.Qt.ArrowCursor)
cursor = self.chart.linked.cursor
# delete "staged" cursor tracking line from view
line = self._active_staged_line
if line:
try:
cursor._trackers.remove(line)
except KeyError:
# when the current cursor doesn't have said line
# registered (probably means that user held order mode
# key while panning to another view) then we just
# ignore the remove error.
pass
line.delete()
self._active_staged_line = None
@ -163,58 +133,55 @@ class LineEditor(Struct):
# show the crosshair y line and label
cursor.show_xhair()
def submit_lines(
def submit_line(
self,
lines: list[LevelLine],
line: LevelLine,
uuid: str,
) -> LevelLine:
# staged_line = self._active_staged_line
# if not staged_line:
# raise RuntimeError("No line is currently staged!?")
staged_line = self._active_staged_line
if not staged_line:
raise RuntimeError("No line is currently staged!?")
# for now, until submission reponse arrives
for line in lines:
line.hide_labels()
# register for later lookup/deletion
self._order_lines[uuid] += lines
self._order_lines[uuid] = line
return lines
return line
def commit_line(self, uuid: str) -> list[LevelLine]:
'''
Commit a "staged line" to view.
def commit_line(self, uuid: str) -> LevelLine:
"""Commit a "staged line" to view.
Submits the line graphic under the cursor as a (new) permanent
graphic in view.
'''
lines = self._order_lines[uuid]
if lines:
for line in lines:
"""
try:
line = self._order_lines[uuid]
except KeyError:
log.warning(f'No line for {uuid} could be found?')
return
else:
line.show_labels()
line.hide_markers()
log.debug(f'Level active for level: {line.value()}')
# TODO: other flashy things to indicate the order is active
return lines
log.debug(f'Level active for level: {line.value()}')
return line
def lines_under_cursor(self) -> list[LevelLine]:
'''
Get the line(s) under the cursor position.
"""Get the line(s) under the cursor position.
'''
"""
# Delete any hoverable under the cursor
return self.godw.get_cursor()._hovered
return self.chart.linked.cursor._hovered
def all_lines(self) -> list[LevelLine]:
all_lines = []
for lines in list(self._order_lines.values()):
all_lines.extend(lines)
return all_lines
def all_lines(self) -> tuple[LevelLine]:
return tuple(self._order_lines.values())
def remove_line(
self,
@ -229,30 +196,29 @@ class LineEditor(Struct):
'''
# try to look up line from our registry
lines = self._order_lines.pop(uuid, None)
if lines:
cursor = self.godw.get_cursor()
if cursor:
for line in lines:
line = self._order_lines.pop(uuid, line)
if line:
# if hovered remove from cursor set
cursor = self.chart.linked.cursor
hovered = cursor._hovered
if line in hovered:
hovered.remove(line)
log.debug(f'deleting {line} with oid: {uuid}')
line.delete()
# make sure the xhair doesn't get left off
# just because we never got a un-hover event
cursor.show_xhair()
log.debug(f'deleting {line} with oid: {uuid}')
line.delete()
else:
log.warning(f'Could not find line for {line}')
return lines
return line
class SelectRect(QtWidgets.QGraphicsRectItem):
class SelectRect(QtGui.QGraphicsRectItem):
def __init__(
self,
@ -261,12 +227,12 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
) -> None:
super().__init__(0, 0, 1, 1)
# self.rbScaleBox = QGraphicsRectItem(0, 0, 1, 1)
# self.rbScaleBox = QtGui.QGraphicsRectItem(0, 0, 1, 1)
self.vb = viewbox
self._chart: 'ChartPlotWidget' = None # noqa
# override selection box color
color = QColor(hcolor(color))
color = QtGui.QColor(hcolor(color))
self.setPen(fn.mkPen(color, width=1))
color.setAlpha(66)
self.setBrush(fn.mkBrush(color))
@ -274,7 +240,7 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
self.hide()
self._label = None
label = self._label = QLabel()
label = self._label = QtGui.QLabel()
label.setTextFormat(0) # markdown
label.setFont(_font.font)
label.setMargin(0)
@ -311,8 +277,8 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
# TODO: get bg color working
palette.setColor(
self._label.backgroundRole(),
# QColor(chart.backgroundBrush()),
QColor(hcolor('papas_special')),
# QtGui.QColor(chart.backgroundBrush()),
QtGui.QColor(hcolor('papas_special')),
)
def update_on_resize(self, vr, r):
@ -360,7 +326,7 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
self.setPos(r.topLeft())
self.resetTransform()
self.setRect(r)
self.scale(r.width(), r.height())
self.show()
y1, y2 = start_pos.y(), end_pos.y()

View File

@ -18,11 +18,11 @@
Qt event proxying and processing using ``trio`` mem chans.
"""
from contextlib import asynccontextmanager as acm
from contextlib import asynccontextmanager, AsyncExitStack
from typing import Callable
from pydantic import BaseModel
import trio
from tractor.trionics import gather_contexts
from PyQt5 import QtCore
from PyQt5.QtCore import QEvent, pyqtBoundSignal
from PyQt5.QtWidgets import QWidget
@ -30,8 +30,6 @@ from PyQt5.QtWidgets import (
QGraphicsSceneMouseEvent as gs_mouse,
)
from ..data.types import Struct
MOUSE_EVENTS = {
gs_mouse.GraphicsSceneMousePress,
@ -45,10 +43,13 @@ MOUSE_EVENTS = {
# TODO: maybe consider some constrained ints down the road?
# https://pydantic-docs.helpmanual.io/usage/types/#constrained-types
class KeyboardMsg(Struct):
class KeyboardMsg(BaseModel):
'''Unpacked Qt keyboard event data.
'''
class Config:
arbitrary_types_allowed = True
event: QEvent
etype: int
key: int
@ -56,13 +57,16 @@ class KeyboardMsg(Struct):
txt: str
def to_tuple(self) -> tuple:
return tuple(self.to_dict().values())
return tuple(self.dict().values())
class MouseMsg(Struct):
class MouseMsg(BaseModel):
'''Unpacked Qt keyboard event data.
'''
class Config:
arbitrary_types_allowed = True
event: QEvent
etype: int
button: int
@ -156,7 +160,7 @@ class EventRelay(QtCore.QObject):
return False
@acm
@asynccontextmanager
async def open_event_stream(
source_widget: QWidget,
@ -182,7 +186,7 @@ async def open_event_stream(
source_widget.removeEventFilter(kc)
@acm
@asynccontextmanager
async def open_signal_handler(
signal: pyqtBoundSignal,
@ -207,7 +211,7 @@ async def open_signal_handler(
yield
@acm
@asynccontextmanager
async def open_handlers(
source_widgets: list[QWidget],
@ -216,14 +220,16 @@ async def open_handlers(
**kwargs,
) -> None:
async with (
trio.open_nursery() as n,
gather_contexts([
open_event_stream(widget, event_types, **kwargs)
for widget in source_widgets
]) as streams,
AsyncExitStack() as stack,
):
for widget, event_recv_stream in zip(source_widgets, streams):
for widget in source_widgets:
event_recv_stream = await stack.enter_async_context(
open_event_stream(widget, event_types, **kwargs)
)
n.start_soon(async_handler, widget, event_recv_stream)
yield

View File

@ -20,24 +20,16 @@ Trio - Qt integration
Run ``trio`` in guest mode on top of the Qt event loop.
All global Qt runtime settings are mostly defined here.
"""
from __future__ import annotations
from typing import (
Callable,
Any,
Type,
TYPE_CHECKING,
)
from typing import Tuple, Callable, Dict, Any
import platform
import traceback
# Qt specific
import PyQt5 # noqa
from PyQt5.QtWidgets import (
QWidget,
QMainWindow,
QApplication,
)
import pyqtgraph as pg
from pyqtgraph import QtGui
from PyQt5 import QtCore
# from PyQt5.QtGui import QLabel, QStatusBar
from PyQt5.QtCore import (
pyqtRemoveInputHook,
Qt,
@ -45,7 +37,7 @@ from PyQt5.QtCore import (
)
import qdarkstyle
from qdarkstyle import DarkPalette
# import qdarkgraystyle # TODO: play with it
# import qdarkgraystyle
import trio
from outcome import Error
@ -54,7 +46,6 @@ from ..log import get_logger
from ._pg_overrides import _do_overrides
from . import _style
log = get_logger(__name__)
# pyqtgraph global config
@ -81,18 +72,17 @@ if platform.system() == "Windows":
def run_qtractor(
func: Callable,
args: tuple,
main_widget_type: Type[QWidget],
tractor_kwargs: dict[str, Any] = {},
window_type: QMainWindow = None,
args: Tuple,
main_widget: QtGui.QWidget,
tractor_kwargs: Dict[str, Any] = {},
window_type: QtGui.QMainWindow = None,
) -> None:
# avoids annoying message when entering debugger from qt loop
pyqtRemoveInputHook()
app = QApplication.instance()
app = QtGui.QApplication.instance()
if app is None:
app = QApplication([])
app = PyQt5.QtWidgets.QApplication([])
# TODO: we might not need this if it's desired
# to cancel the tractor machinery on Qt loop
@ -166,7 +156,7 @@ def run_qtractor(
# hook into app focus change events
app.focusChanged.connect(window.on_focus_change)
instance = main_widget_type()
instance = main_widget()
instance.window = window
# override tractor's defaults
@ -188,7 +178,7 @@ def run_qtractor(
# restrict_keyboard_interrupt_to_checkpoints=True,
)
window.godwidget: GodWidget = instance
window.main_widget = main_widget
window.setCentralWidget(instance)
if is_windows:
window.configure_to_desktop()

View File

@ -59,7 +59,6 @@ from ._curve import (
FlattenedOHLC,
)
from ..log import get_logger
from .._profile import Profiler
log = get_logger(__name__)
@ -131,7 +130,7 @@ def render_baritems(
int, int, np.ndarray,
int, int, np.ndarray,
],
profiler: Profiler,
profiler: pg.debug.Profiler,
**kwargs,
) -> None:
@ -338,7 +337,6 @@ class Flow(msgspec.Struct): # , frozen=True):
name: str
plot: pg.PlotItem
graphics: Union[Curve, BarItems]
yrange: tuple[float, float] = None
# in some cases a flow may want to change its
# graphical "type" or, "form" when downsampling,
@ -388,11 +386,10 @@ class Flow(msgspec.Struct): # , frozen=True):
lbar: int,
rbar: int,
) -> Optional[tuple[float, float]]:
) -> tuple[float, float]:
'''
Compute the cached max and min y-range values for a given
x-range determined by ``lbar`` and ``rbar`` or ``None``
if no range can be determined (yet).
x-range determined by ``lbar`` and ``rbar``.
'''
rkey = (lbar, rbar)
@ -402,8 +399,9 @@ class Flow(msgspec.Struct): # , frozen=True):
shm = self.shm
if shm is None:
return None
mxmn = None
else: # new block for profiling?..
arr = shm.array
# build relative indexes into shm array
@ -416,11 +414,7 @@ class Flow(msgspec.Struct): # , frozen=True):
]
if not slice_view.size:
return None
elif self.yrange:
mxmn = self.yrange
# print(f'{self.name} M4 maxmin: {mxmn}')
mxmn = None
else:
if self.is_ohlc:
@ -433,10 +427,9 @@ class Flow(msgspec.Struct): # , frozen=True):
yhigh = np.max(view)
mxmn = ylow, yhigh
# print(f'{self.name} MANUAL maxmin: {mxmin}')
# cache result for input range
assert mxmn
if mxmn is not None:
# cache new mxmn result
self._mxmns[rkey] = mxmn
return mxmn
@ -518,7 +511,7 @@ class Flow(msgspec.Struct): # , frozen=True):
render: bool = True,
array_key: Optional[str] = None,
profiler: Optional[Profiler] = None,
profiler: Optional[pg.debug.Profiler] = None,
do_append: bool = True,
**kwargs,
@ -529,7 +522,7 @@ class Flow(msgspec.Struct): # , frozen=True):
render to graphics.
'''
profiler = Profiler(
profiler = pg.debug.Profiler(
msg=f'Flow.update_graphics() for {self.name}',
disabled=not pg_profile_enabled(),
ms_threshold=4,
@ -635,13 +628,10 @@ class Flow(msgspec.Struct): # , frozen=True):
# source data so we clear our path data in prep
# to generate a new one from original source data.
new_sample_rate = True
showing_src_data = True
should_ds = False
should_redraw = True
showing_src_data = True
# reset yrange to be computed from source data
self.yrange = None
# MAIN RENDER LOGIC:
# - determine in view data and redraw on range change
# - determine downsampling ops if needed
@ -667,10 +657,6 @@ class Flow(msgspec.Struct): # , frozen=True):
**rkwargs,
)
if showing_src_data:
# print(f"{self.name} SHOWING SOURCE")
# reset yrange to be computed from source data
self.yrange = None
if not out:
log.warning(f'{self.name} failed to render!?')
@ -678,9 +664,6 @@ class Flow(msgspec.Struct): # , frozen=True):
path, data, reset = out
# if self.yrange:
# print(f'flow {self.name} yrange from m4: {self.yrange}')
# XXX: SUPER UGGGHHH... without this we get stale cache
# graphics that don't update until you downsampler again..
if reset:
@ -949,7 +932,7 @@ class Renderer(msgspec.Struct):
new_read,
array_key: str,
profiler: Profiler,
profiler: pg.debug.Profiler,
uppx: float = 1,
# redraw and ds flags
@ -1075,7 +1058,6 @@ class Renderer(msgspec.Struct):
# xy-path data transform: convert source data to a format
# able to be passed to a `QPainterPath` rendering routine.
if not len(hist):
# XXX: this might be why the profiler only has exits?
return
x_out, y_out, connect = self.format_xy(
@ -1162,14 +1144,11 @@ class Renderer(msgspec.Struct):
elif should_ds and uppx > 1:
x_out, y_out, ymn, ymx = xy_downsample(
x_out, y_out = xy_downsample(
x_out,
y_out,
uppx,
)
self.flow.yrange = ymn, ymx
# print(f'{self.flow.name} post ds: ymn, ymx: {ymn},{ymx}')
reset = True
profiler(f'FULL PATH downsample redraw={should_ds}')
self._in_ds = True

View File

@ -619,7 +619,7 @@ class FillStatusBar(QProgressBar):
# color: #19232D;
# width: 10px;
self.setRange(0, int(slots))
self.setRange(0, slots)
self.setValue(value)
@ -644,7 +644,7 @@ def mk_fill_status_bar(
# TODO: calc this height from the ``ChartnPane``
chart_h = round(parent_pane.height() * 5/8)
bar_h = chart_h * 0.375*0.9
bar_h = chart_h * 0.375
# TODO: once things are sized to screen
bar_label_font_size = label_font_size or _font.px_size - 2

View File

@ -27,13 +27,12 @@ from itertools import cycle
from typing import Optional, AsyncGenerator, Any
import numpy as np
import msgspec
from pydantic import create_model
import tractor
import pyqtgraph as pg
import trio
from trio_typing import TaskStatus
from piker.data.types import Struct
from ._axes import PriceAxis
from .._cacheables import maybe_open_context
from ..calc import humanize
@ -54,12 +53,11 @@ from ._forms import (
from ..fsp._api import maybe_mk_fsp_shm, Fsp
from ..fsp import cascade
from ..fsp._volume import (
# tina_vwap,
tina_vwap,
dolla_vlm,
flow_rates,
)
from ..log import get_logger
from .._profile import Profiler
log = get_logger(__name__)
@ -155,13 +153,12 @@ async def open_fsp_sidepane(
)
# https://pydantic-docs.helpmanual.io/usage/models/#dynamic-model-creation
FspConfig = msgspec.defstruct(
"Point",
[('name', name)] + list(params.items()),
bases=(Struct,),
FspConfig = create_model(
'FspConfig',
name=name,
**params,
)
model = FspConfig(name=name, **params)
sidepane.model = model
sidepane.model = FspConfig()
# just a logger for now until we get fsp configs up and running.
async def settings_change(
@ -191,7 +188,7 @@ async def open_fsp_actor_cluster(
from tractor._clustering import open_actor_cluster
# profiler = Profiler(
# profiler = pg.debug.Profiler(
# delayed=False,
# disabled=False
# )
@ -213,7 +210,7 @@ async def run_fsp_ui(
target: Fsp,
conf: dict[str, dict],
loglevel: str,
# profiler: Profiler,
# profiler: pg.debug.Profiler,
# _quote_throttle_rate: int = 58,
) -> None:
@ -443,9 +440,7 @@ class FspAdmin:
# if the chart isn't hidden try to update
# the data on screen.
if not self.linked.isHidden():
log.debug(
f'Re-syncing graphics for fsp: {ns_path}'
)
log.debug(f'Re-syncing graphics for fsp: {ns_path}')
self.linked.graphics_cycle(
trigger_all=True,
prepend_update_index=info['first'],
@ -474,10 +469,9 @@ class FspAdmin:
target=target,
readonly=True,
)
self._flow_registry[(
self.src_shm._token,
target.name
)] = dst_shm._token
self._flow_registry[
(self.src_shm._token, target.name)
] = dst_shm._token
# if not opened:
# raise RuntimeError(
@ -624,8 +618,6 @@ async def open_vlm_displays(
# built-in vlm which we plot ASAP since it's
# usually data provided directly with OHLC history.
shm = ohlcv
ohlc_chart = linked.chart
chart = linked.add_plot(
name='volume',
shm=shm,
@ -641,34 +633,26 @@ async def open_vlm_displays(
# the curve item internals are pretty convoluted.
style='step',
)
ohlc_chart.view.enable_auto_yrange(
src_vb=chart.view,
)
# force 0 to always be in view
def multi_maxmin(
names: list[str],
) -> tuple[float, float]:
'''
Flows "group" maxmin loop; assumes all named flows
are in the same co-domain and thus can be sorted
as one set.
Iterates all the named flows and calls the chart
api to find their range values and return.
TODO: really we should probably have a more built-in API
for this?
'''
mx = 0
for name in names:
ymn, ymx = chart.maxmin(name=name)
mx = max(mx, ymx)
mxmn = chart.maxmin(name=name)
if mxmn:
ymax = mxmn[1]
if ymax > mx:
mx = ymax
return 0, mx
chart.view.maxmin = partial(multi_maxmin, names=['volume'])
# TODO: fix the x-axis label issue where if you put
# the axis on the left it's totally not lined up...
# show volume units value on LHS (for dinkus)
@ -752,8 +736,6 @@ async def open_vlm_displays(
},
)
dvlm_pi.hideAxis('left')
dvlm_pi.hideAxis('bottom')
# all to be overlayed curve names
fields = [
'dolla_vlm',
@ -794,7 +776,6 @@ async def open_vlm_displays(
) -> None:
for name in names:
if 'dark' in name:
color = dark_vlm_color
elif 'rate' in name:
@ -886,7 +867,6 @@ async def open_vlm_displays(
# keep both regular and dark vlm in view
names=trade_rate_fields,
)
tr_pi.hideAxis('bottom')
chart_curves(
trade_rate_fields,
@ -960,7 +940,7 @@ async def start_fsp_displays(
# },
# },
}
profiler = Profiler(
profiler = pg.debug.Profiler(
delayed=False,
disabled=False
)

View File

@ -33,7 +33,6 @@ import numpy as np
import trio
from ..log import get_logger
from .._profile import Profiler
from .._profile import pg_profile_enabled, ms_slower_then
# from ._style import _min_points_to_show
from ._editors import SelectRect
@ -142,16 +141,13 @@ async def handle_viewmode_kb_inputs(
Qt.Key_Space,
}
):
godw = view._chart.linked.godwidget
godw.hist_linked.resize_sidepanes(from_linked=godw.rt_linked)
godw.search.focus()
view._chart.linked.godwidget.search.focus()
# esc and ctrl-c
if key == Qt.Key_Escape or (ctrl and key == Qt.Key_C):
# ctrl-c as cancel
# https://forum.qt.io/topic/532/how-to-catch-ctrl-c-on-a-widget/9
view.select_box.clear()
view.linked.focus()
# cancel order or clear graphics
if key == Qt.Key_C or key == Qt.Key_Delete:
@ -182,17 +178,17 @@ async def handle_viewmode_kb_inputs(
if key in pressed:
pressed.remove(key)
# QUERY/QUOTE MODE
# ----------------
# QUERY/QUOTE MODE #
if {Qt.Key_Q}.intersection(pressed):
view.linked.cursor.in_query_mode = True
view.linkedsplits.cursor.in_query_mode = True
else:
view.linked.cursor.in_query_mode = False
view.linkedsplits.cursor.in_query_mode = False
# SELECTION MODE
# --------------
if shift:
if view.state['mouseMode'] == ViewBox.PanMode:
view.setMouseMode(ViewBox.RectMode)
@ -213,27 +209,18 @@ async def handle_viewmode_kb_inputs(
# ORDER MODE
# ----------
# live vs. dark trigger + an action {buy, sell, alert}
order_keys_pressed = ORDER_MODE.intersection(pressed)
if order_keys_pressed:
# TODO: it seems like maybe the composition should be
# reversed here? Like, maybe we should have the nav have
# access to the pos state and then make encapsulated logic
# that shows the right stuff on screen instead or order mode
# and position-related abstractions doing this?
# show the pp size label only if there is
# a non-zero pos existing
tracker = order_mode.current_pp
if tracker.live_pp.size:
tracker.nav.show()
# show the pp size label
order_mode.current_pp.show()
# TODO: show pp config mini-params in status bar widget
# mode.pp_config.show()
trigger_type: str = 'dark'
if (
# 's' for "submit" to activate "live" order
Qt.Key_S in pressed or
@ -241,6 +228,9 @@ async def handle_viewmode_kb_inputs(
):
trigger_type: str = 'live'
else:
trigger_type: str = 'dark'
# order mode trigger "actions"
if Qt.Key_D in pressed: # for "damp eet"
action = 'sell'
@ -269,8 +259,8 @@ async def handle_viewmode_kb_inputs(
Qt.Key_S in pressed or
order_keys_pressed or
Qt.Key_O in pressed
)
and key in NUMBER_LINE
) and
key in NUMBER_LINE
):
# hot key to set order slots size.
# change edit field to current number line value,
@ -288,7 +278,7 @@ async def handle_viewmode_kb_inputs(
else: # none active
# hide pp label
order_mode.current_pp.nav.hide_info()
order_mode.current_pp.hide_info()
# if none are pressed, remove "staged" level
# line under cursor position
@ -329,6 +319,7 @@ async def handle_viewmode_mouse(
):
# when in order mode, submit execution
# msg.event.accept()
# breakpoint()
view.order_mode.submit_order()
@ -345,6 +336,16 @@ class ChartView(ViewBox):
'''
mode_name: str = 'view'
# "relay events" for making overlaid views work.
# NOTE: these MUST be defined here (and can't be monkey patched
# on later) due to signal construction requiring refs to be
# in place during the run of meta-class machinery.
mouseDragEventRelay = QtCore.Signal(object, object, object)
wheelEventRelay = QtCore.Signal(object, object, object)
event_relay_source: 'Optional[ViewBox]' = None
relays: dict[str, QtCore.Signal] = {}
def __init__(
self,
@ -374,7 +375,7 @@ class ChartView(ViewBox):
y=True,
)
self.linked = None
self.linkedsplits = None
self._chart: 'ChartPlotWidget' = None # noqa
# add our selection box annotator
@ -396,11 +397,8 @@ class ChartView(ViewBox):
'''
if self._ic is None:
try:
self.chart.pause_all_feeds()
self._ic = trio.Event()
except RuntimeError:
pass
def signal_ic(
self,
@ -413,12 +411,9 @@ class ChartView(ViewBox):
'''
if self._ic:
try:
self._ic.set()
self._ic = None
self.chart.resume_all_feeds()
except RuntimeError:
pass
@asynccontextmanager
async def open_async_input_handler(
@ -468,7 +463,7 @@ class ChartView(ViewBox):
self,
ev,
axis=None,
# relayed_from: ChartView = None,
relayed_from: ChartView = None,
):
'''
Override "center-point" location for scrolling.
@ -479,20 +474,13 @@ class ChartView(ViewBox):
TODO: PR a method into ``pyqtgraph`` to make this configurable
'''
linked = self.linked
if (
not linked
):
# print(f'{self.name} not linked but relay from {relayed_from.name}')
return
if axis in (0, 1):
mask = [False, False]
mask[axis] = self.state['mouseEnabled'][axis]
else:
mask = self.state['mouseEnabled'][:]
chart = self.linked.chart
chart = self.linkedsplits.chart
# don't zoom more then the min points setting
l, lbar, rbar, r = chart.bars_range()
@ -605,20 +593,9 @@ class ChartView(ViewBox):
self,
ev,
axis: Optional[int] = None,
# relayed_from: ChartView = None,
relayed_from: ChartView = None,
) -> None:
# if relayed_from:
# print(f'PAN: {self.name} -> RELAYED FROM: {relayed_from.name}')
# NOTE since in the overlay case axes are already
# "linked" any x-range change will already be mirrored
# in all overlaid ``PlotItems``, so we need to simply
# ignore the signal here since otherwise we get N-calls
# from N-overlays resulting in an "accelerated" feeling
# panning motion instead of the expect linear shift.
# if relayed_from:
# return
pos = ev.pos()
lastPos = ev.lastPos()
@ -692,10 +669,7 @@ class ChartView(ViewBox):
# XXX: WHY
ev.accept()
try:
self.start_ic()
except RuntimeError:
pass
# if self._ic is None:
# self.chart.pause_all_feeds()
# self._ic = trio.Event()
@ -787,7 +761,7 @@ class ChartView(ViewBox):
'''
name = self.name
# print(f'YRANGE ON {name}')
profiler = Profiler(
profiler = pg.debug.Profiler(
msg=f'`ChartView._set_yrange()`: `{name}`',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
@ -856,33 +830,29 @@ class ChartView(ViewBox):
) -> None:
'''
Assign callbacks for rescaling and resampling y-axis data
automatically based on data contents and ``ViewBox`` state.
Assign callback for rescaling y-axis automatically
based on data contents and ``ViewBox`` state.
'''
if src_vb is None:
src_vb = self
# widget-UIs/splitter(s) resizing
# splitter(s) resizing
src_vb.sigResized.connect(self._set_yrange)
# re-sampling trigger:
# TODO: a smarter way to avoid calling this needlessly?
# 2 things i can think of:
# - register downsample-able graphics specially and only
# iterate those.
# - only register this when certain downsample-able graphics are
# - only register this when certain downsampleable graphics are
# "added to scene".
src_vb.sigRangeChangedManually.connect(
self.maybe_downsample_graphics
)
# mouse wheel doesn't emit XRangeChanged
src_vb.sigRangeChangedManually.connect(self._set_yrange)
# XXX: enabling these will cause "jittery"-ness
# on zoom where sharp diffs in the y-range will
# not re-size right away until a new sample update?
# if src_vb is not self:
# src_vb.sigXRangeChanged.connect(self._set_yrange)
# src_vb.sigXRangeChanged.connect(
# self.maybe_downsample_graphics
@ -927,7 +897,8 @@ class ChartView(ViewBox):
self,
autoscale_overlays: bool = True,
):
profiler = Profiler(
profiler = pg.debug.Profiler(
msg=f'ChartView.maybe_downsample_graphics() for {self.name}',
disabled=not pg_profile_enabled(),
@ -941,12 +912,8 @@ class ChartView(ViewBox):
# TODO: a faster single-loop-iterator way of doing this XD
chart = self._chart
plots = {chart.name: chart}
linked = self.linked
if linked:
plots |= linked.subplots
linked = self.linkedsplits
plots = linked.subplots | {chart.name: chart}
for chart_name, chart in plots.items():
for name, flow in chart._flows.items():
@ -956,7 +923,6 @@ class ChartView(ViewBox):
# XXX: super important to be aware of this.
# or not flow.graphics.isVisible()
):
# print(f'skipping {flow.name}')
continue
# pass in no array which will read and render from the last

View File

@ -18,14 +18,9 @@
Lines for orders, alerts, L2.
"""
from __future__ import annotations
from functools import partial
from math import floor
from typing import (
Optional,
Callable,
TYPE_CHECKING,
)
from typing import Optional, Callable
import pyqtgraph as pg
from pyqtgraph import Point, functions as fn
@ -42,9 +37,6 @@ from ..calc import humanize
from ._label import Label
from ._style import hcolor, _font
if TYPE_CHECKING:
from ._cursor import Cursor
# TODO: probably worth investigating if we can
# make .boundingRect() faster:
@ -92,7 +84,7 @@ class LevelLine(pg.InfiniteLine):
self._marker = None
self.only_show_markers_on_hover = only_show_markers_on_hover
self.track_marker_pos: bool = False
self.show_markers: bool = True # presuming the line is hovered at init
# should line go all the way to far end or leave a "margin"
# space for other graphics (eg. L1 book)
@ -130,9 +122,6 @@ class LevelLine(pg.InfiniteLine):
self._y_incr_mult = 1 / chart.linked.symbol.tick_size
self._right_end_sc: float = 0
# use px caching
self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
def txt_offsets(self) -> tuple[int, int]:
return 0, 0
@ -227,23 +216,20 @@ class LevelLine(pg.InfiniteLine):
y: float
) -> None:
'''
Chart coordinates cursor tracking callback.
'''Chart coordinates cursor tracking callback.
this is called by our ``Cursor`` type once this line is set to
track the cursor: for every movement this callback is invoked to
reposition the line with the current view coordinates.
'''
self.movable = True
self.set_level(y) # implictly calls reposition handler
def mouseDragEvent(self, ev):
'''
Override the ``InfiniteLine`` handler since we need more
"""Override the ``InfiniteLine`` handler since we need more
detailed control and start end signalling.
'''
"""
cursor = self._chart.linked.cursor
# hide y-crosshair
@ -295,20 +281,10 @@ class LevelLine(pg.InfiniteLine):
# show y-crosshair again
cursor.show_xhair()
def get_cursor(self) -> Optional[Cursor]:
chart = self._chart
cur = chart.linked.cursor
if self in cur._hovered:
return cur
return None
def delete(self) -> None:
'''
Remove this line from containing chart/view/scene.
"""Remove this line from containing chart/view/scene.
'''
"""
scene = self.scene()
if scene:
for label in self._labels:
@ -322,8 +298,9 @@ class LevelLine(pg.InfiniteLine):
# remove from chart/cursor states
chart = self._chart
cur = self.get_cursor()
if cur:
cur = chart.linked.cursor
if self in cur._hovered:
cur._hovered.remove(self)
chart.plotItem.removeItem(self)
@ -331,8 +308,8 @@ class LevelLine(pg.InfiniteLine):
def mouseDoubleClickEvent(
self,
ev: QtGui.QMouseEvent,
) -> None:
# TODO: enter labels edit mode
print(f'double click {ev}')
@ -357,22 +334,30 @@ class LevelLine(pg.InfiniteLine):
line_end, marker_right, r_axis_x = self._chart.marker_right_points()
# (legacy) NOTE: at one point this seemed slower when moving around
# order lines.. not sure if that's still true or why but we've
# dropped the original hacky `.pain()` transform stuff for inf
# line markers now - check the git history if it needs to be
# reverted.
if self._marker:
if self.track_marker_pos:
# make the line end at the marker's x pos
line_end = marker_right = self._marker.pos().x()
if self.show_markers and self.markers:
p.setPen(self.pen)
qgo_draw_markers(
self.markers,
self.pen.color(),
p,
vb_left,
vb_right,
marker_right,
)
# marker_size = self.markers[0][2]
self._maxMarkerSize = max([m[2] / 2. for m in self.markers])
# this seems slower when moving around
# order lines.. not sure wtf is up with that.
# for now we're just using it on the position line.
elif self._marker:
# TODO: make this label update part of a scene-aware-marker
# composed annotation
self._marker.setPos(
QPointF(marker_right, self.scene_y())
)
if hasattr(self._marker, 'label'):
self._marker.label.update()
@ -394,14 +379,16 @@ class LevelLine(pg.InfiniteLine):
def hide(self) -> None:
super().hide()
mkr = self._marker
if mkr:
mkr.hide()
if self._marker:
self._marker.hide()
# needed for ``order_line()`` lines currently
self._marker.label.hide()
def show(self) -> None:
super().show()
if self._marker:
self._marker.show()
# self._marker.label.show()
def scene_y(self) -> float:
return self.getViewBox().mapFromView(
@ -434,10 +421,6 @@ class LevelLine(pg.InfiniteLine):
return path
@property
def marker(self) -> LevelMarker:
return self._marker
def hoverEvent(self, ev):
'''
Mouse hover callback.
@ -446,16 +429,17 @@ class LevelLine(pg.InfiniteLine):
cur = self._chart.linked.cursor
# hovered
if (
not ev.isExit()
and ev.acceptDrags(QtCore.Qt.LeftButton)
):
if (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.LeftButton):
# if already hovered we don't need to run again
if self.mouseHovering is True:
return
if self.only_show_markers_on_hover:
self.show_markers()
self.show_markers = True
if self._marker:
self._marker.show()
# highlight if so configured
if self.highlight_on_hover:
@ -498,7 +482,11 @@ class LevelLine(pg.InfiniteLine):
cur._hovered.remove(self)
if self.only_show_markers_on_hover:
self.hide_markers()
self.show_markers = False
if self._marker:
self._marker.hide()
self._marker.label.hide()
if self not in cur._trackers:
cur.show_xhair(y_label_level=self.value())
@ -510,15 +498,6 @@ class LevelLine(pg.InfiniteLine):
self.update()
def hide_markers(self) -> None:
if self._marker:
self._marker.hide()
self._marker.label.hide()
def show_markers(self) -> None:
if self._marker:
self._marker.show()
def level_line(
@ -539,10 +518,9 @@ def level_line(
**kwargs,
) -> LevelLine:
'''
Convenience routine to add a styled horizontal line to a plot.
"""Convenience routine to add a styled horizontal line to a plot.
'''
"""
hl_color = color + '_light' if highlight_on_hover else color
line = LevelLine(
@ -724,7 +702,7 @@ def order_line(
marker = LevelMarker(
chart=chart,
style=marker_style,
get_level=line.value, # callback
get_level=line.value,
size=marker_size,
keep_in_view=False,
)
@ -733,8 +711,7 @@ def order_line(
marker = line.add_marker(marker)
# XXX: DON'T COMMENT THIS!
# this fixes it the artifact issue!
# .. of course, bounding rect stuff
# this fixes it the artifact issue! .. of course, bounding rect stuff
line._maxMarkerSize = marker_size
assert line._marker is marker
@ -755,8 +732,7 @@ def order_line(
if action != 'alert':
# add a partial position label if we also added a level
# marker
# add a partial position label if we also added a level marker
pp_size_label = Label(
view=view,
color=line.color,
@ -790,9 +766,9 @@ def order_line(
# XXX: without this the pp proportion label next the marker
# seems to lag? this is the same issue we had with position
# lines which we handle with ``.update_graphcis()``.
# marker._on_paint=lambda marker: pp_size_label.update()
marker._on_paint = lambda marker: pp_size_label.update()
# XXX: THIS IS AN UNTYPED MONKEY PATCH!?!?!
marker.label = label
# sanity check

View File

@ -1,98 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Notifications utils.
"""
import os
import platform
import subprocess
from typing import Optional
import trio
from ..log import get_logger
from ..clearing._messages import (
Status,
)
log = get_logger(__name__)
_dbus_uid: Optional[str] = ''
async def notify_from_ems_status_msg(
msg: Status,
duration: int = 3000,
is_subproc: bool = False,
) -> None:
'''
Send a linux desktop notification.
Handle subprocesses by discovering the dbus user id
on first call.
'''
if platform.system() != "Linux":
return
# TODO: this in another task?
# not sure if this will ever be a bottleneck,
# we probably could do graphics stuff first tho?
if is_subproc:
global _dbus_uid
su = os.environ.get('SUDO_USER')
if (
not _dbus_uid
and su
):
# TODO: use `trio` but we need to use nursery.start()
# to use pipes?
# result = await trio.run_process(
result = subprocess.run(
[
'id',
'-u',
su,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# check=True
)
_dbus_uid = result.stdout.decode("utf-8").replace('\n', '')
os.environ['DBUS_SESSION_BUS_ADDRESS'] = (
f'unix:path=/run/user/{_dbus_uid}/bus'
)
result = await trio.run_process(
[
'notify-send',
'-u', 'normal',
'-t', f'{duration}',
'piker',
# TODO: add in standard fill/exec info that maybe we
# pack in a broker independent way?
f"'{msg.pformat()}'",
],
)
log.runtime(result)

View File

@ -32,7 +32,6 @@ from PyQt5.QtGui import QPainterPath
from .._profile import pg_profile_enabled, ms_slower_then
from ._style import hcolor
from ..log import get_logger
from .._profile import Profiler
if TYPE_CHECKING:
from ._chart import LinkedSplits
@ -171,7 +170,7 @@ class BarItems(pg.GraphicsObject):
) -> None:
profiler = Profiler(
profiler = pg.debug.Profiler(
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
)

View File

@ -22,9 +22,12 @@ from __future__ import annotations
from typing import (
Optional, Generic,
TypeVar, Callable,
Literal,
)
import enum
import sys
# from pydantic import BaseModel, validator
from pydantic import BaseModel, validator
from pydantic.generics import GenericModel
from PyQt5.QtWidgets import (
QWidget,
@ -35,7 +38,6 @@ from ._forms import (
# FontScaledDelegate,
Edit,
)
from ..data.types import Struct
DataType = TypeVar('DataType')
@ -60,7 +62,7 @@ class Selection(Field[DataType], Generic[DataType]):
options: dict[str, DataType]
# value: DataType = None
# @validator('value') # , always=True)
@validator('value') # , always=True)
def set_value_first(
cls,
@ -98,7 +100,7 @@ class Edit(Field[DataType], Generic[DataType]):
widget_factory = Edit
class AllocatorPane(Struct):
class AllocatorPane(BaseModel):
account = Selection[str](
options=dict.fromkeys(

View File

@ -18,27 +18,23 @@
Charting overlay helpers.
'''
from collections import defaultdict
from functools import partial
from typing import (
Callable,
Optional,
from typing import Callable, Optional
from pyqtgraph.Qt.QtCore import (
# QObject,
# Signal,
Qt,
# QEvent,
)
from pyqtgraph.graphicsItems.AxisItem import AxisItem
from pyqtgraph.graphicsItems.ViewBox import ViewBox
# from pyqtgraph.graphicsItems.GraphicsWidget import GraphicsWidget
from pyqtgraph.graphicsItems.GraphicsWidget import GraphicsWidget
from pyqtgraph.graphicsItems.PlotItem.PlotItem import PlotItem
from pyqtgraph.Qt.QtCore import (
QObject,
Signal,
QEvent,
Qt,
)
from pyqtgraph.Qt.QtWidgets import (
# QGraphicsGridLayout,
QGraphicsLinearLayout,
)
from pyqtgraph.Qt.QtCore import QObject, Signal, QEvent
from pyqtgraph.Qt.QtWidgets import QGraphicsGridLayout, QGraphicsLinearLayout
from ._interaction import ChartView
__all__ = ["PlotItemOverlay"]
@ -84,8 +80,8 @@ class ComposedGridLayout:
``<axis_name>i`` in the layout.
The ``item: PlotItem`` passed to the constructor's grid layout is
used verbatim as the "main plot" who's view box is given precedence
for input handling. The main plot's axes are removed from its
used verbatim as the "main plot" who's view box is give precedence
for input handling. The main plot's axes are removed from it's
layout and placed in the surrounding exterior layouts to allow for
re-ordering if desired.
@ -93,11 +89,16 @@ class ComposedGridLayout:
def __init__(
self,
item: PlotItem,
grid: QGraphicsGridLayout,
reverse: bool = False, # insert items to the "center"
) -> None:
self.items: list[PlotItem] = []
self._pi2axes: dict[ # TODO: use a ``bidict`` here?
# self.grid = grid
self.reverse = reverse
# TODO: use a ``bidict`` here?
self._pi2axes: dict[
int,
dict[str, AxisItem],
] = {}
@ -119,13 +120,12 @@ class ComposedGridLayout:
if name in ('top', 'bottom'):
orient = Qt.Vertical
elif name in ('left', 'right'):
orient = Qt.Horizontal
layout.setOrientation(orient)
self.insert_plotitem(0, item)
self.insert(0, item)
# insert surrounding linear layouts into the parent pi's layout
# such that additional axes can be appended arbitrarily without
@ -159,7 +159,7 @@ class ComposedGridLayout:
# enter plot into list for index tracking
self.items.insert(index, plotitem)
def insert_plotitem(
def insert(
self,
index: int,
plotitem: PlotItem,
@ -171,9 +171,7 @@ class ComposedGridLayout:
'''
if index < 0:
raise ValueError(
'`.insert_plotitem()` only supports an index >= 0'
)
raise ValueError('`insert()` only supports an index >= 0')
# add plot's axes in sequence to the embedded linear layouts
# for each "side" thus avoiding graphics collisions.
@ -222,7 +220,7 @@ class ComposedGridLayout:
return index
def append_plotitem(
def append(
self,
item: PlotItem,
@ -234,7 +232,7 @@ class ComposedGridLayout:
'''
# for left and bottom axes we have to first remove
# items and re-insert to maintain a list-order.
return self.insert_plotitem(len(self.items), item)
return self.insert(len(self.items), item)
def get_axis(
self,
@ -251,16 +249,16 @@ class ComposedGridLayout:
named = self._pi2axes[name]
return named.get(index)
# def pop(
# self,
# item: PlotItem,
def pop(
self,
item: PlotItem,
# ) -> PlotItem:
# '''
# Remove item and restack all axes in list-order.
) -> PlotItem:
'''
Remove item and restack all axes in list-order.
# '''
# raise NotImplementedError
'''
raise NotImplementedError
# Unimplemented features TODO:
@ -281,6 +279,194 @@ class ComposedGridLayout:
# axis?
# TODO: we might want to enabled some kind of manual flag to disable
# this method wrapping during type creation? As example a user could
# definitively decide **not** to enable broadcasting support by
# setting something like ``ViewBox.disable_relays = True``?
def mk_relay_method(
signame: str,
slot: Callable[
[ViewBox,
'QEvent',
Optional[AxisItem]],
None,
],
) -> Callable[
[
ViewBox,
# lol, there isn't really a generic type thanks
# to the rewrite of Qt's event system XD
'QEvent',
'Optional[AxisItem]',
'Optional[ViewBox]', # the ``relayed_from`` arg we provide
],
None,
]:
def maybe_broadcast(
vb: 'ViewBox',
ev: 'QEvent',
axis: 'Optional[int]' = None,
relayed_from: 'ViewBox' = None,
) -> None:
'''
(soon to be) Decorator which makes an event handler
"broadcastable" to overlayed ``GraphicsWidget``s.
Adds relay signals based on the decorated handler's name
and conducts a signal broadcast of the relay signal if there
are consumers registered.
'''
# When no relay source has been set just bypass all
# the broadcast machinery.
if vb.event_relay_source is None:
ev.accept()
return slot(
vb,
ev,
axis=axis,
)
if relayed_from:
assert axis is None
# this is a relayed event and should be ignored (so it does not
# halt/short circuit the graphicscene loop). Further the
# surrounding handler for this signal must be allowed to execute
# and get processed by **this consumer**.
# print(f'{vb.name} rx relayed from {relayed_from.name}')
ev.ignore()
return slot(
vb,
ev,
axis=axis,
)
if axis is not None:
# print(f'{vb.name} handling axis event:\n{str(ev)}')
ev.accept()
return slot(
vb,
ev,
axis=axis,
)
elif (
relayed_from is None
and vb.event_relay_source is vb # we are the broadcaster
and axis is None
):
# Broadcast case: this is a source event which will be
# relayed to attached consumers and accepted after all
# consumers complete their own handling followed by this
# routine's processing. Sequence is,
# - pre-relay to all consumers *first* - ``.emit()`` blocks
# until all downstream relay handlers have run.
# - run the source handler for **this** event and accept
# the event
# Access the "bound signal" that is created
# on the widget type as part of instantiation.
signal = getattr(vb, signame)
# print(f'{vb.name} emitting {signame}')
# TODO/NOTE: we could also just bypass a "relay" signal
# entirely and instead call the handlers manually in
# a loop? This probably is a lot simpler and also doesn't
# have any downside, and allows not touching target widget
# internals.
signal.emit(
ev,
axis,
# passing this demarks a broadcasted/relayed event
vb,
)
# accept event so no more relays are fired.
ev.accept()
# call underlying wrapped method with an extra
# ``relayed_from`` value to denote that this is a relayed
# event handling case.
return slot(
vb,
ev,
axis=axis,
)
return maybe_broadcast
# XXX: :( can't define signals **after** class compile time
# so this is not really useful.
# def mk_relay_signal(
# func,
# name: str = None,
# ) -> Signal:
# (
# args,
# varargs,
# varkw,
# defaults,
# kwonlyargs,
# kwonlydefaults,
# annotations
# ) = inspect.getfullargspec(func)
# # XXX: generate a relay signal with 1 extra
# # argument for a ``relayed_from`` kwarg. Since
# # ``'self'`` is already ignored by signals we just need
# # to count the arguments since we're adding only 1 (and
# # ``args`` will capture that).
# numargs = len(args + list(defaults))
# signal = Signal(*tuple(numargs * [object]))
# signame = name or func.__name__ + 'Relay'
# return signame, signal
def enable_relays(
widget: GraphicsWidget,
handler_names: list[str],
) -> list[Signal]:
'''
Method override helper which enables relay of a particular
``Signal`` from some chosen broadcaster widget to a set of
consumer widgets which should operate their event handlers normally
but instead of signals "relayed" from the broadcaster.
Mostly useful for overlaying widgets that handle user input
that you want to overlay graphically. The target ``widget`` type must
define ``QtCore.Signal``s each with a `'Relay'` suffix for each
name provided in ``handler_names: list[str]``.
'''
signals = []
for name in handler_names:
handler = getattr(widget, name)
signame = name + 'Relay'
# ensure the target widget defines a relay signal
relay = getattr(widget, signame)
widget.relays[signame] = name
signals.append(relay)
method = mk_relay_method(signame, handler)
setattr(widget, name, method)
return signals
enable_relays(
ChartView,
['wheelEvent', 'mouseDragEvent']
)
class PlotItemOverlay:
'''
A composite for managing overlaid ``PlotItem`` instances such that
@ -296,18 +482,16 @@ class PlotItemOverlay:
) -> None:
self.root_plotitem: PlotItem = root_plotitem
self.relay_handlers: defaultdict[
str,
list[Callable],
] = defaultdict(list)
# NOTE: required for scene layering/relaying; this guarantees
# the "root" plot receives priority for interaction
# events/signals.
root_plotitem.vb.setZValue(1000)
vb = root_plotitem.vb
vb.event_relay_source = vb # TODO: maybe change name?
vb.setZValue(1000) # XXX: critical for scene layering/relaying
self.overlays: list[PlotItem] = []
self.layout = ComposedGridLayout(root_plotitem)
self.layout = ComposedGridLayout(
root_plotitem,
root_plotitem.layout,
)
self._relays: dict[str, Signal] = {}
def add_plotitem(
@ -315,10 +499,8 @@ class PlotItemOverlay:
plotitem: PlotItem,
index: Optional[int] = None,
# event/signal names which will be broadcasted to all added
# (relayee) ``PlotItem``s (eg. ``ViewBox.mouseDragEvent``).
relay_events: list[str] = [],
# TODO: we could also put the ``ViewBox.XAxis``
# style enum here?
# (0,), # link x
# (1,), # link y
# (0, 1), # link both
@ -328,155 +510,58 @@ class PlotItemOverlay:
index = index or len(self.overlays)
root = self.root_plotitem
# layout: QGraphicsGridLayout = root.layout
self.overlays.insert(index, plotitem)
vb: ViewBox = plotitem.vb
# mark this consumer overlay as ready to expect relayed events
# from the root plotitem.
vb.event_relay_source = root.vb
# TODO: some sane way to allow menu event broadcast XD
# vb.setMenuEnabled(False)
# wire up any relay signal(s) from the source plot to added
# "overlays". We use a plain loop instead of mucking with
# re-connecting signal/slots which tends to be more invasive and
# harder to implement and provides no measurable performance
# gain.
if relay_events:
for ev_name in relay_events:
relayee_handler: Callable[
[
ViewBox,
# lol, there isn't really a generic type thanks
# to the rewrite of Qt's event system XD
QEvent,
# TODO: inside the `maybe_broadcast()` (soon to be) decorator
# we need have checks that consumers have been attached to
# these relay signals.
if link_axes != (0, 1):
AxisItem | None,
],
None,
] = getattr(vb, ev_name)
sub_handlers: list[Callable] = self.relay_handlers[ev_name]
# on the first registry of a relayed event we pop the
# root's handler and override it to a custom broadcaster
# routine.
if not sub_handlers:
src_handler = getattr(
root.vb,
ev_name,
)
def broadcast(
ev: 'QEvent',
# TODO: drop this viewbox specific input and
# allow a predicate to be passed in by user.
axis: 'Optional[int]' = None,
*,
# these are bound in by the ``partial`` below
# and ensure a unique broadcaster per event.
ev_name: str = None,
src_handler: Callable = None,
relayed_from: 'ViewBox' = None,
# remaining inputs the source handler expects
**kwargs,
) -> None:
'''
Broadcast signal or event: this is a source
event which will be relayed to attached
"relayee" plot item consumers.
The event is accepted halting any further
handlers from being triggered.
Sequence is,
- pre-relay to all consumers *first* - exactly
like how a ``Signal.emit()`` blocks until all
downstream relay handlers have run.
- run the event's source handler event
'''
ev.accept()
# broadcast first to relayees *first*. trigger
# relay of event to all consumers **before**
# processing/consumption in the source handler.
relayed_handlers = self.relay_handlers[ev_name]
assert getattr(vb, ev_name).__name__ == ev_name
# TODO: generalize as an input predicate
if axis is None:
for handler in relayed_handlers:
handler(
ev,
axis=axis,
**kwargs,
)
# run "source" widget's handler last
src_handler(
ev,
axis=axis,
)
# dynamic handler override on the publisher plot
setattr(
root.vb,
ev_name,
partial(
broadcast,
ev_name=ev_name,
src_handler=src_handler
),
)
else:
assert getattr(root.vb, ev_name)
assert relayee_handler not in sub_handlers
# append relayed-to widget's handler to relay table
sub_handlers.append(relayee_handler)
# wire up relay signals
for relay_signal_name, handler_name in vb.relays.items():
# print(handler_name)
# XXX: Signal class attrs are bound after instantiation
# of the defining type, so we need to access that bound
# version here.
signal = getattr(root.vb, relay_signal_name)
handler = getattr(vb, handler_name)
signal.connect(handler)
# link dim-axes to root if requested by user.
# TODO: solve more-then-wanted scaled panning on click drag
# which seems to be due to broadcast. So we probably need to
# disable broadcast when axes are linked in a particular
# dimension?
for dim in link_axes:
# link x and y axes to new view box such that the top level
# viewbox propagates to the root (and whatever other
# plotitem overlays that have been added).
vb.linkView(dim, root.vb)
# => NOTE: in order to prevent "more-then-linear" scaled
# panning moves on (for eg. click-drag) certain range change
# signals (i.e. ``.sigXRangeChanged``), the user needs to be
# careful that any broadcasted ``relay_events`` are are short
# circuited in sub-handlers (aka relayee's) implementations. As
# an example if a ``ViewBox.mouseDragEvent`` is broadcasted, the
# overlayed implementations need to be sure they either don't
# also link the x-axes (by not providing ``link_axes=(0,)``
# above) or that the relayee ``.mouseDragEvent()`` handlers are
# ready to "``return`` early" in the case that
# ``.sigXRangeChanged`` is emitted as part of linked axes.
# For more details on such signalling mechanics peek in
# ``ViewBox.linkView()``.
# make overlaid viewbox impossible to focus since the top
# level should handle all input and relay to overlays.
# NOTE: this was solved with the `setZValue()` above!
# make overlaid viewbox impossible to focus since the top level
# should handle all input and relay to overlays. Note that the
# "root" plot item gettingn interaction priority is configured
# with the ``.setZValue()`` during init.
# TODO: we will probably want to add a "focus" api such that
# a new "top level" ``PlotItem`` can be selected dynamically
# (and presumably the axes dynamically sorted to match).
vb.setFlag(
vb.GraphicsItemFlag.ItemIsFocusable,
False
)
vb.setFocusPolicy(Qt.NoFocus)
# => TODO: add a "focus" api for switching the "top level"
# ``PlotItem`` dynamically.
# append-compose into the layout all axes from this plot
self.layout.insert_plotitem(index, plotitem)
self.layout.insert(index, plotitem)
plotitem.setGeometry(root.vb.sceneBoundingRect())
@ -494,6 +579,25 @@ class PlotItemOverlay:
root.vb.setFocus()
assert root.vb.focusWidget()
# XXX: do we need this? Why would you build then destroy?
def remove_plotitem(self, plotItem: PlotItem) -> None:
'''
Remove this ``PlotItem`` from the overlayed set making not shown
and unable to accept input.
'''
...
# TODO: i think this would be super hot B)
def focus_item(self, plotitem: PlotItem) -> PlotItem:
'''
Apply focus to a contained PlotItem thus making it the "top level"
item in the overlay able to accept peripheral's input from the user
and responsible for zoom and panning control via its ``ViewBox``.
'''
...
def get_axis(
self,
plot: PlotItem,
@ -526,9 +630,8 @@ class PlotItemOverlay:
return axes
# XXX: untested as of now.
# TODO: need this as part of selecting a different root/source
# plot to rewire interaction event broadcast dynamically.
# TODO: i guess we need this if you want to detach existing plots
# dynamically? XXX: untested as of now.
def _disconnect_all(
self,
plotitem: PlotItem,
@ -543,22 +646,3 @@ class PlotItemOverlay:
disconnected.append(sig)
return disconnected
# XXX: do we need this? Why would you build then destroy?
# def remove_plotitem(self, plotItem: PlotItem) -> None:
# '''
# Remove this ``PlotItem`` from the overlayed set making not shown
# and unable to accept input.
# '''
# ...
# TODO: i think this would be super hot B)
# def focus_plotitem(self, plotitem: PlotItem) -> PlotItem:
# '''
# Apply focus to a contained PlotItem thus making it the "top level"
# item in the overlay able to accept peripheral's input from the user
# and responsible for zoom and panning control via its ``ViewBox``.
# '''
# ...

View File

@ -49,17 +49,12 @@ def xy_downsample(
x_spacer: float = 0.5,
) -> tuple[
np.ndarray,
np.ndarray,
float,
float,
]:
) -> tuple[np.ndarray, np.ndarray]:
# downsample whenever more then 1 pixels per datum can be shown.
# always refresh data bounds until we get diffing
# working properly, see above..
bins, x, y, ymn, ymx = ds_m4(
bins, x, y = ds_m4(
x,
y,
uppx,
@ -72,7 +67,7 @@ def xy_downsample(
)).flatten()
y = y.flatten()
return x, y, ymn, ymx
return x, y
@njit(

View File

@ -15,15 +15,11 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Customization of ``pyqtgraph`` core routines and various types normally
for speedups.
Generally, our does not require "scentific precision" for pixel perfect
view transforms.
Customization of ``pyqtgraph`` core routines to speed up our use mostly
based on not requiring "scentific precision" for pixel perfect view
transforms.
"""
from typing import Optional
import pyqtgraph as pg
@ -50,211 +46,3 @@ def _do_overrides() -> None:
"""
# we don't care about potential fp issues inside Qt
pg.functions.invertQTransform = invertQTransform
pg.PlotItem = PlotItem
# NOTE: the below customized type contains all our changes on a method
# by method basis as per the diff:
# https://github.com/pyqtgraph/pyqtgraph/commit/8e60bc14234b6bec1369ff4192dbfb82f8682920#diff-a2b5865955d2ba703dbc4c35ff01aa761aa28d2aeaac5e68d24e338bc82fb5b1R500
class PlotItem(pg.PlotItem):
'''
Overrides for the core plot object mostly pertaining to overlayed
multi-view management as it relates to multi-axis managment.
'''
def __init__(
self,
parent=None,
name=None,
labels=None,
title=None,
viewBox=None,
axisItems=None,
default_axes=['left', 'bottom'],
enableMenu=True,
**kargs
):
super().__init__(
parent=parent,
name=name,
labels=labels,
title=title,
viewBox=viewBox,
axisItems=axisItems,
# default_axes=default_axes,
enableMenu=enableMenu,
kargs=kargs,
)
# self.setAxisItems(
# axisItems,
# default_axes=default_axes,
# )
# NOTE: this is an entirely new method not in upstream.
def removeAxis(
self,
name: str,
unlink: bool = True,
) -> Optional[pg.AxisItem]:
"""
Remove an axis from the contained axis items
by ```name: str```.
This means the axis graphics object will be removed
from the ``.layout: QGraphicsGridLayout`` as well as unlinked
from the underlying associated ``ViewBox``.
If the ``unlink: bool`` is set to ``False`` then the axis will
stay linked to its view and will only be removed from the
layoutonly be removed from the layout.
If no axis with ``name: str`` is found then this is a noop.
Return the axis instance that was removed.
"""
entry = self.axes.pop(name, None)
if not entry:
return
axis = entry['item']
self.layout.removeItem(axis)
axis.scene().removeItem(axis)
if unlink:
axis.unlinkFromView()
self.update()
return axis
# Why do we need to always have all axes created?
#
# I don't understand this at all.
#
# Everything seems to work if you just always apply the
# set passed to this method **EXCEPT** for some super weird reason
# the view box geometry still computes as though the space for the
# `'bottom'` axis is always there **UNLESS** you always add that
# axis but hide it?
#
# Why in tf would this be the case!?!?
def setAxisItems(
self,
# XXX: yeah yeah, i know we can't use type annots like this yet.
axisItems: Optional[dict[str, pg.AxisItem]] = None,
add_to_layout: bool = True,
default_axes: list[str] = ['left', 'bottom'],
):
"""
Override axis item setting to only
"""
axisItems = axisItems or {}
# XXX: wth is is this even saying?!?
# Array containing visible axis items
# Also containing potentially hidden axes, but they are not
# touched so it does not matter
# visibleAxes = ['left', 'bottom']
# Note that it does not matter that this adds
# some values to visibleAxes a second time
# XXX: uhhh wat^ ..?
visibleAxes = list(default_axes) + list(axisItems.keys())
# TODO: we should probably invert the loop here to not loop the
# predefined "axis name set" and instead loop the `axisItems`
# input and lookup indices from a predefined map.
for name, pos in (
('top', (1, 1)),
('bottom', (3, 1)),
('left', (2, 0)),
('right', (2, 2))
):
if (
name in self.axes and
name in axisItems
):
# we already have an axis entry for this name
# so remove the existing entry.
self.removeAxis(name)
# elif name not in axisItems:
# # this axis entry is not provided in this call
# # so remove any old/existing entry.
# self.removeAxis(name)
# Create new axis
if name in axisItems:
axis = axisItems[name]
if axis.scene() is not None:
if (
name not in self.axes
or axis != self.axes[name]["item"]
):
raise RuntimeError(
"Can't add an axis to multiple plots. Shared axes"
" can be achieved with multiple AxisItem instances"
" and set[X/Y]Link.")
else:
# Set up new axis
# XXX: ok but why do we want to add axes for all entries
# if not desired by the user? The only reason I can see
# adding this is without it there's some weird
# ``ViewBox`` geometry bug.. where a gap for the
# 'bottom' axis is somehow left in?
axis = pg.AxisItem(orientation=name, parent=self)
axis.linkToView(self.vb)
# XXX: shouldn't you already know the ``pos`` from the name?
# Oh right instead of a global map that would let you
# reasily look that up it's redefined over and over and over
# again in methods..
self.axes[name] = {'item': axis, 'pos': pos}
# NOTE: in the overlay case the axis may be added to some
# other layout and should not be added here.
if add_to_layout:
self.layout.addItem(axis, *pos)
# place axis above images at z=0, items that want to draw
# over the axes should be placed at z>=1:
axis.setZValue(0.5)
axis.setFlag(
axis.GraphicsItemFlag.ItemNegativeZStacksBehindParent
)
if name in visibleAxes:
self.showAxis(name, True)
else:
# why do we need to insert all axes to ``.axes`` and
# only hide the ones the user doesn't specify? It all
# seems to work fine without doing this except for this
# weird gap for the 'bottom' axis that always shows up
# in the view box geometry??
self.hideAxis(name)
def updateGrid(
self,
*args,
):
alpha = self.ctrl.gridAlphaSlider.value()
x = alpha if self.ctrl.xGridCheck.isChecked() else False
y = alpha if self.ctrl.yGridCheck.isChecked() else False
for name, dim in (
('top', x),
('bottom', x),
('left', y),
('right', y)
):
if name in self.axes:
self.getAxis(name).setGrid(dim)
# self.getAxis('bottom').setGrid(x)
# self.getAxis('left').setGrid(y)
# self.getAxis('right').setGrid(y)

View File

@ -19,15 +19,10 @@ Position info and display
"""
from __future__ import annotations
from copy import copy
from dataclasses import dataclass
from functools import partial
from math import floor, copysign
from typing import (
Callable,
Optional,
TYPE_CHECKING,
)
from typing import Optional
# from PyQt5.QtWidgets import QStyle
@ -42,22 +37,15 @@ from ._anchors import (
gpath_pin,
)
from ..calc import humanize, pnl, puterize
from ..clearing._allocate import Allocator
from ..pp import Position
from ..clearing._allocate import Allocator, Position
from ..data._normalize import iterticks
from ..data.feed import Feed
from ..data.types import Struct
from ._label import Label
from ._lines import LevelLine, order_line
from ._style import _font
from ._forms import FieldsForm, FillStatusBar, QLabel
from ..log import get_logger
if TYPE_CHECKING:
from ._chart import (
ChartPlotWidget,
)
log = get_logger(__name__)
_pnl_tasks: dict[str, bool] = {}
@ -69,8 +57,7 @@ async def update_pnl_from_feed(
tracker: PositionTracker,
) -> None:
'''
Real-time display the current pp's PnL in the appropriate label.
'''Real-time display the current pp's PnL in the appropriate label.
``ValueError`` if this task is spawned where there is a net-zero pp.
@ -79,7 +66,7 @@ async def update_pnl_from_feed(
pp = order_mode.current_pp
live = pp.live_pp
key = live.symbol.front_fqsn()
key = live.symbol.key
log.info(f'Starting pnl display for {pp.alloc.account}')
@ -118,8 +105,8 @@ async def update_pnl_from_feed(
# compute and display pnl status
order_mode.pane.pnl_label.format(
pnl=copysign(1, size) * pnl(
# live.ppu,
order_mode.current_pp.live_pp.ppu,
# live.avg_price,
order_mode.current_pp.live_pp.avg_price,
tick['price'],
),
)
@ -178,36 +165,19 @@ class SettingsPane:
key: str,
value: str,
) -> None:
'''
Try to apply some input setting (by the user), revert to
previous setting if it fails display new value if applied.
'''
self.apply_setting(key, value)
self.update_status_ui(self.order_mode.current_pp)
def apply_setting(
self,
key: str,
value: str,
) -> bool:
'''
Called on any order pane edit field value change.
'''
mode = self.order_mode
tracker = mode.current_pp
alloc = tracker.alloc
# an account switch request
if key == 'account':
# hide details on the old selection
old_tracker = mode.current_pp
old_tracker.nav.hide_info()
old_tracker.hide_info()
# re-assign the order mode tracker
account_name = value
@ -217,7 +187,7 @@ class SettingsPane:
# a ``brokerd`) then error and switch back to the last
# selection.
if tracker is None:
sym = old_tracker.charts[0].linked.symbol.key
sym = old_tracker.chart.linked.symbol.key
log.error(
f'Account `{account_name}` can not be set for {sym}'
)
@ -228,44 +198,39 @@ class SettingsPane:
self.order_mode.current_pp = tracker
assert tracker.alloc.account == account_name
self.form.fields['account'].setCurrentText(account_name)
tracker.nav.show()
tracker.nav.hide_info()
tracker.show()
tracker.hide_info()
self.display_pnl(tracker)
# load the new account's allocator
alloc = tracker.alloc
else:
tracker = mode.current_pp
alloc = tracker.alloc
size_unit = alloc.size_unit
# WRITE any settings to current pp's allocator
try:
if key == 'size_unit':
# implicit re-write of value if input
# is the "text name" of the units.
# yah yah, i know this is badd..
alloc.size_unit = value
elif key != 'account': # numeric fields entry
try:
else:
value = puterize(value)
except ValueError as err:
log.error(err.args[0])
return False
if key == 'limit':
if value <= 0:
log.error('limit must be > 0')
return False
pp = mode.current_pp.live_pp
if alloc.size_unit == 'currency':
if size_unit == 'currency':
dsize = pp.dsize
if dsize > value:
log.error(
f'limit must > then current pp: {dsize}'
)
# reset position size value
alloc.currency_limit = dsize
return False
raise ValueError
alloc.currency_limit = value
@ -281,50 +246,30 @@ class SettingsPane:
elif key == 'slots':
if value <= 0:
# raise ValueError('slots must be > 0')
log.error('limit must be > 0')
return False
raise ValueError('slots must be > 0')
alloc.slots = int(value)
else:
log.error(f'Unknown setting {key}')
raise ValueError
# don't log account "change" case since it'll be submitted
# on every mouse interaction.
log.runtime(f'settings change: {key}: {value}')
log.info(f'settings change: {key}: {value}')
# TODO: maybe return a diff of settings so if we can an error we
# can have general input handling code to report it through the
# UI in some way?
return True
def update_status_ui(
self,
tracker: PositionTracker,
) -> None:
alloc = tracker.alloc
slots = alloc.slots
used = alloc.slots_used(tracker.live_pp)
size = tracker.live_pp.size
dsize = tracker.live_pp.dsize
except ValueError:
log.error(f'Invalid value for `{key}`: {value}')
# READ out settings and update the status UI / settings widgets
suffix = {'currency': ' $', 'units': ' u'}[alloc.size_unit]
size_unit, limit = alloc.limit_info()
suffix = {'currency': ' $', 'units': ' u'}[size_unit]
limit = alloc.limit()
# TODO: a reverse look up from the position to the equivalent
# account(s), if none then look to user config for default?
self.update_status_ui(pp=tracker)
step_size, currency_per_slot = alloc.step_sizes()
if alloc.size_unit == 'currency':
if size_unit == 'currency':
step_size = currency_per_slot
if dsize >= limit:
self.apply_setting('limit', limit)
elif size >= limit:
self.apply_setting('limit', limit)
self.step_label.format(
step_size=str(humanize(step_size)) + suffix
@ -343,6 +288,22 @@ class SettingsPane:
# update of level marker size label based on any new settings
tracker.update_from_pp()
# TODO: maybe return a diff of settings so if we can an error we
# can have general input handling code to report it through the
# UI in some way?
return True
def update_status_ui(
self,
pp: PositionTracker,
) -> None:
alloc = pp.alloc
slots = alloc.slots
used = alloc.slots_used(pp.live_pp)
# calculate proportion of position size limit
# that exists and display in fill bar
# TODO: what should we do for fractional slot pps?
@ -353,7 +314,7 @@ class SettingsPane:
# min(round(prop * slots), slots)
min(used, slots)
)
self.update_account_icons({alloc.account: tracker.live_pp})
self.update_account_icons({alloc.account: pp.live_pp})
def update_account_icons(
self,
@ -379,9 +340,7 @@ class SettingsPane:
tracker: PositionTracker,
) -> None:
'''
Display the PnL for the current symbol and personal positioning
(pp).
'''Display the PnL for the current symbol and personal positioning (pp).
If a position is open start a background task which will
real-time update the pnl label in the settings pane.
@ -395,17 +354,16 @@ class SettingsPane:
if size:
# last historical close price
last = feed.rt_shm.array[-1][['close']][0]
last = feed.shm.array[-1][['close']][0]
pnl_value = copysign(1, size) * pnl(
tracker.live_pp.ppu,
tracker.live_pp.avg_price,
last,
)
# maybe start update task
global _pnl_tasks
fqsn = sym.front_fqsn()
if fqsn not in _pnl_tasks:
_pnl_tasks[fqsn] = True
if sym.key not in _pnl_tasks:
_pnl_tasks[sym.key] = True
self.order_mode.nursery.start_soon(
update_pnl_from_feed,
feed,
@ -417,15 +375,15 @@ class SettingsPane:
self.pnl_label.format(pnl=pnl_value)
def pp_line(
def position_line(
chart: ChartPlotWidget, # noqa
chart: 'ChartPlotWidget', # noqa
size: float,
level: float,
color: str,
marker: LevelMarker,
orient_v: str = 'bottom',
marker: Optional[LevelMarker] = None,
) -> LevelLine:
'''
@ -456,7 +414,16 @@ def pp_line(
show_markers=False,
)
# TODO: use `LevelLine.add_marker()`` for this instead?
if marker:
# configure marker to position data
if size > 0: # long
style = '|<' # point "up to" the line
elif size < 0: # short
style = '>|' # point "down to" the line
marker.style = style
# set marker color to same as line
marker.setPen(line.currentPen)
marker.setBrush(fn.mkBrush(line.currentPen.color()))
@ -464,331 +431,77 @@ def pp_line(
marker.update()
marker.show()
line._marker = marker
line.track_marker_pos = True
# show position marker on view "edge" when out of view
vb = line.getViewBox()
vb.sigRangeChanged.connect(marker.position_in_view)
line.set_level(level)
return line
_derivs = (
'future',
'continuous_future',
'option',
'futures_option',
)
# TODO: move into annoate module?
def mk_level_marker(
chart: ChartPlotWidget,
size: float,
level: float,
on_paint: Callable,
) -> LevelMarker:
'''
Allocate and return nan arrow graphics element.
'''
# scale marker size with dpi-aware font size
font_size = _font.font.pixelSize()
arrow_size = floor(1.375 * font_size)
arrow = LevelMarker(
chart=chart,
style='|<', # actual style is set by caller based on size
get_level=level,
size=arrow_size,
on_paint=on_paint,
)
arrow.show()
return arrow
class Nav(Struct):
'''
Composite for holding a set of charts and respective (by order)
graphics-elements which display position information acting as sort
of "navigation" system for a position.
'''
charts: dict[int, ChartPlotWidget]
pp_labels: dict[str, Label] = {}
size_labels: dict[str, Label] = {}
lines: dict[str, Optional[LevelLine]] = {}
level_markers: dict[str, Optional[LevelMarker]] = {}
color: str = 'default_lightest'
def update_ui(
self,
account: str,
price: float,
size: float,
slots_used: float,
size_digits: Optional[int] = None,
) -> None:
'''
Update personal position level line.
'''
for key, chart in self.charts.items():
size_digits = size_digits or chart.linked.symbol.lot_size_digits
line = self.lines.get(key)
level_marker = self.level_markers[key]
pp_label = self.pp_labels[key]
if size:
# create and show a pp line if none yet exists
if line is None:
arrow = self.level_markers[key]
line = pp_line(
chart=chart,
level=price,
size=size,
color=self.color,
marker=arrow,
)
self.lines[key] = line
# modify existing indicator line
line.set_level(price)
# update LHS sizing label
line.update_labels({
'size': size,
'size_digits': size_digits,
'fiat_size': round(price * size, ndigits=2),
# TODO: per account lines on a single (or very
# related) symbol
'account': account,
})
line.show()
# always show arrow-marker when a non-zero
# pos size.
level_marker.show()
# configure marker to position data
if size > 0: # long
# point "up to" the line
level_marker.style = '|<'
elif size < 0: # short
# point "down to" the line
level_marker.style = '>|'
# remove line from view for a net-zero pos
else:
self.hide()
# label updates
size_label = self.size_labels[key]
size_label.fields['slots_used'] = slots_used
size_label.render()
# set arrow marker to correct level
level_marker.level = price
# these updates are critical to avoid lag on view/scene changes
# TODO: couldn't we integrate this into
# a ``.inter_ui_elements_and_update()``?
level_marker.update() # trigger paint
pp_label.update()
size_label.update()
def level(self) -> float:
'''
Return the "level" value from the underlying ``LevelLine`` which tracks
the "average position" price defined the represented position instance.
'''
if self.lines:
for key, line in self.lines.items():
if line:
return line.value()
return 0
def iter_ui_elements(self) -> tuple[
Label,
Label,
LevelLine,
LevelMarker,
]:
for key, chart in self.charts.items():
yield (
self.pp_labels[key],
self.size_labels[key],
self.lines.get(key),
self.level_markers[key],
)
def show(self) -> None:
'''
Show all UI elements on all managed charts.
'''
for (
pp_label,
size_label,
line,
level_marker,
) in self.iter_ui_elements():
# NOTE: be sure to re-trigger arrow/label placement in case
# a new sidepane or other widget (like the search bar) was
# dynamically swapped into the chart-row-widget-space in
# which case we want to reposition in the view but including
# the new x-distance added by that sidepane. See details in
# ``LevelMarker.position_in_view()`` but more less ``.
# ``ChartPlotWidget.self.marker_right_points()`` gets called
# which itself eventually calls `.getAxis.pos().x()` and
# it's THIS that needs to be called **AFTER** the sidepane
# has been added..
level_marker.show()
level_marker.position_in_view()
# labels
pp_label.show()
size_label.show()
if line:
line.show()
line.show_labels()
def hide(self) -> None:
for (
pp_label,
size_label,
line,
level_marker,
) in self.iter_ui_elements():
pp_label.hide()
level_marker.hide()
size_label.hide()
if line:
line.hide()
def update_graphics(
self,
marker: LevelMarker,
) -> None:
'''
Update all labels callback.
Meant to be called from the marker ``.paint()``
for immediate, lag free label draws.
'''
for (
pp_label,
size_label,
line,
level_marker,
) in self.iter_ui_elements():
pp_label.update()
size_label.update()
# XXX: can't call this because it causes a recursive paint/render
# level_marker.update()
def hide_info(self) -> None:
'''
Hide details (just size label?) of position nav elements.
'''
for (
pp_label,
size_label,
line,
level_marker,
) in self.iter_ui_elements():
size_label.hide()
if line:
line.hide_labels()
class PositionTracker:
'''
Track and display real-time positions for a single asset-symbol
held in a single account, normally shown on a single chart.
Track and display real-time positions for a single symbol
over multiple accounts on a single chart.
Graphically composed of a level line and marker as well as labels
for indcating current position information. Updates are made to the
corresponding "settings pane" for the chart's "order mode" UX.
'''
# inputs
chart: 'ChartPlotWidget' # noqa
alloc: Allocator
startup_pp: Position
live_pp: Position
nav: Nav # holds all UI elements across all charts
# allocated
pp_label: Label
size_label: Label
line: Optional[LevelLine] = None
_color: str = 'default_lightest'
def __init__(
self,
charts: list[ChartPlotWidget],
chart: 'ChartPlotWidget', # noqa
alloc: Allocator,
startup_pp: Position,
) -> None:
nav = self.nav = Nav(charts={id(chart): chart for chart in charts})
self.chart = chart
self.alloc = alloc
self.startup_pp = startup_pp
self.live_pp = copy(startup_pp)
self.live_pp = startup_pp.copy()
# TODO: maybe add this as a method ``Nav.add_chart()``
# init all UI elements
for key, chart in nav.charts.items():
view = chart.getViewBox()
arrow = mk_level_marker(
chart=chart,
size=1,
level=nav.level,
on_paint=nav.update_graphics,
)
# TODO: we really need some kinda "spacing" manager for all
# this stuff...
def offset_from_yaxis() -> float:
'''
If no L1 labels are present beside the x-axis place
the line label offset from the y-axis just enough to avoid
label overlap with any sticky labels.
'''
x = chart.marker_right_points()[1]
if chart._max_l1_line_len == 0:
mkw = pp_label.txt.boundingRect().width()
x -= 1.5 * mkw
return x
arrow.scene_x = offset_from_yaxis
view.scene().addItem(arrow)
arrow.hide() # never show on startup
nav.level_markers[key] = arrow
# literally the 'pp' (pee pee) "position price" label that's
# always in view
pp_label = Label(
# literally the 'pp' (pee pee) label that's always in view
self.pp_label = pp_label = Label(
view=view,
fmt_str='pp',
color=nav.color,
color=self._color,
update_on_range_change=False,
)
pp_label.render()
nav.pp_labels[key] = pp_label
size_label = Label(
# create placeholder 'up' level arrow
self._level_marker = None
self._level_marker = self.level_marker(size=1)
pp_label.scene_anchor = partial(
gpath_pin,
gpath=self._level_marker,
label=pp_label,
)
pp_label.render()
self.size_label = size_label = Label(
view=view,
color=self.nav.color,
color=self._color,
# this is "static" label
# update_on_range_change=False,
@ -801,19 +514,11 @@ class PositionTracker:
},
)
size_label.render()
size_label.scene_anchor = partial(
pp_tight_and_right,
label=pp_label,
label=self.pp_label,
)
nav.size_labels[key] = size_label
pp_label.scene_anchor = partial(
gpath_pin,
gpath=arrow,
label=pp_label,
)
nav.show()
@property
def pane(self) -> FieldsForm:
@ -823,74 +528,169 @@ class PositionTracker:
'''
return self.chart.linked.godwidget.pp_pane
def update_from_pp(
def update_graphics(
self,
position: Optional[Position] = None,
set_as_startup: bool = False,
marker: LevelMarker
) -> None:
'''
Update graphics and data from average price and size passed in
our EMS ``BrokerdPosition`` msg.
Update all labels.
Meant to be called from the maker ``.paint()``
for immediate, lag free label draws.
'''
self.pp_label.update()
self.size_label.update()
def update_from_pp(
self,
position: Optional[Position] = None,
) -> None:
'''Update graphics and data from average price and size passed in our
EMS ``BrokerdPosition`` msg.
'''
# live pp updates
pp = position or self.live_pp
if set_as_startup:
startup_pp = pp
else:
startup_pp = self.startup_pp
alloc = self.alloc
# update allocator settings
asset_type = pp.symbol.type_key
# specific configs by asset class / type
if asset_type in _derivs:
# since it's harder to know how currency "applies" in this case
# given leverage properties
alloc.size_unit = '# units'
# set units limit to slots size thus making make the next
# entry step 1.0
alloc.units_limit = alloc.slots
else:
alloc.size_unit = 'currency'
# if the current position is already greater then the limit
# settings, increase the limit to the current position
if alloc.size_unit == 'currency':
startup_size = self.startup_pp.size * startup_pp.ppu
if startup_size > alloc.currency_limit:
alloc.currency_limit = round(startup_size, ndigits=2)
else:
startup_size = abs(startup_pp.size)
if startup_size > alloc.units_limit:
alloc.units_limit = startup_size
if asset_type in _derivs:
alloc.slots = alloc.units_limit
self.nav.update_ui(
self.alloc.account,
pp.ppu,
self.update_line(
pp.avg_price,
pp.size,
round(alloc.slots_used(pp), ndigits=1), # slots used
self.chart.linked.symbol.lot_size_digits,
)
if self.live_pp.size:
# print("SHOWING NAV")
self.nav.show()
# label updates
self.size_label.fields['slots_used'] = round(
self.alloc.slots_used(pp), ndigits=1)
self.size_label.render()
if pp.size == 0:
self.hide()
# if pp.size == 0:
else:
# print("HIDING NAV")
self.nav.hide()
self._level_marker.level = pp.avg_price
# these updates are critical to avoid lag on view/scene changes
self._level_marker.update() # trigger paint
self.pp_label.update()
self.size_label.update()
self.show()
# don't show side and status widgets unless
# order mode is "engaged" (which done via input controls)
self.nav.hide_info()
self.hide_info()
def level(self) -> float:
if self.line:
return self.line.value()
else:
return 0
def show(self) -> None:
if self.live_pp.size:
self.line.show()
self.line.show_labels()
self._level_marker.show()
self.pp_label.show()
self.size_label.show()
def hide(self) -> None:
self.pp_label.hide()
self._level_marker.hide()
self.size_label.hide()
if self.line:
self.line.hide()
def hide_info(self) -> None:
'''Hide details (right now just size label?) of position.
'''
self.size_label.hide()
if self.line:
self.line.hide_labels()
# TODO: move into annoate module
def level_marker(
self,
size: float,
) -> LevelMarker:
if self._level_marker:
self._level_marker.delete()
# arrow marker
# scale marker size with dpi-aware font size
font_size = _font.font.pixelSize()
# scale marker size with dpi-aware font size
arrow_size = floor(1.375 * font_size)
if size > 0:
style = '|<'
elif size < 0:
style = '>|'
arrow = LevelMarker(
chart=self.chart,
style=style,
get_level=self.level,
size=arrow_size,
on_paint=self.update_graphics,
)
self.chart.getViewBox().scene().addItem(arrow)
arrow.show()
return arrow
def update_line(
self,
price: float,
size: float,
size_digits: int,
) -> None:
'''Update personal position level line.
'''
# do line update
line = self.line
if size:
if line is None:
# create and show a pp line
line = self.line = position_line(
chart=self.chart,
level=price,
size=size,
color=self._color,
marker=self._level_marker,
)
else:
line.set_level(price)
self._level_marker.level = price
self._level_marker.update()
# update LHS sizing label
line.update_labels({
'size': size,
'size_digits': size_digits,
'fiat_size': round(price * size, ndigits=2),
# TODO: per account lines on a single (or very related) symbol
'account': self.alloc.account,
})
line.show()
elif line: # remove pp line from view if it exists on a net-zero pp
line.delete()
self.line = None

View File

@ -35,13 +35,9 @@ from collections import defaultdict
from contextlib import asynccontextmanager
from functools import partial
from typing import (
Optional,
Callable,
Awaitable,
Sequence,
Any,
AsyncIterator,
Iterator,
Optional, Callable,
Awaitable, Sequence,
Any, AsyncIterator
)
import time
# from pprint import pformat
@ -123,7 +119,7 @@ class CompleterView(QTreeView):
# TODO: size this based on DPI font
self.setIndentation(_font.px_size)
self.setUniformRowHeights(True)
# self.setUniformRowHeights(True)
# self.setColumnWidth(0, 3)
# self.setVerticalBarPolicy(Qt.ScrollBarAlwaysOff)
# self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustIgnored)
@ -142,15 +138,13 @@ class CompleterView(QTreeView):
model.setHorizontalHeaderLabels(labels)
self._font_size: int = 0 # pixels
self._init: bool = False
async def on_pressed(self, idx: QModelIndex) -> None:
'''
Mouse pressed on view handler.
'''Mouse pressed on view handler.
'''
search = self.parent()
await search.chart_current_item()
await search.chart_current_item(clear_to_cache=False)
search.focus()
def set_font_size(self, size: int = 18):
@ -162,64 +156,56 @@ class CompleterView(QTreeView):
self.setStyleSheet(f"font: {size}px")
def resize_to_results(
self,
w: Optional[float] = 0,
h: Optional[float] = None,
# def resizeEvent(self, event: 'QEvent') -> None:
# event.accept()
# super().resizeEvent(event)
) -> None:
def on_resize(self) -> None:
'''
Resize relay event from god.
'''
self.resize_to_results()
def resize_to_results(self):
model = self.model()
cols = model.columnCount()
cidx = self.selectionModel().currentIndex()
rows = model.rowCount()
self.expandAll()
# compute the approx height in pixels needed to include
# all result rows in view.
row_h = rows_h = self.rowHeight(cidx) * (rows + 1)
for idx, item in self.iter_df_rows():
row_h = self.rowHeight(idx)
rows_h += row_h
# print(f'row_h: {row_h}\nrows_h: {rows_h}')
# TODO: could we just break early here on detection
# of ``rows_h >= h``?
# rows = model.rowCount()
col_w_tot = 0
for i in range(cols):
# only slap in a rows's height's worth
# of padding once at startup.. no idea
if (
not self._init
and row_h
):
col_w_tot = row_h
self._init = True
self.resizeColumnToContents(i)
col_w_tot += self.columnWidth(i)
# NOTE: if the heigh `h` set here is **too large** then the
# resize event will perpetually trigger as the window causes
# some kind of recompute of callbacks.. so we have to ensure
# it's limited.
if h:
h: int = round(h)
abs_mx = round(0.91 * h)
self.setMaximumHeight(abs_mx)
win = self.window()
win_h = win.height()
edit_h = self.parent().bar.height()
sb_h = win.statusBar().height()
if rows_h <= abs_mx:
# self.setMinimumHeight(rows_h)
self.setMinimumHeight(rows_h)
# self.setFixedHeight(rows_h)
# TODO: probably make this more general / less hacky
# we should figure out the exact number of rows to allow
# inclusive of search bar and header "rows", in pixel terms.
# Eventually when we have an "info" widget below the results we
# will want space for it and likely terminating the results-view
# space **exactly on a row** would be ideal.
# if row_px > 0:
# rows = ceil(window_h / row_px) - 4
# else:
# rows = 16
# self.setFixedHeight(rows * row_px)
# self.resize(self.width(), rows * row_px)
else:
self.setMinimumHeight(abs_mx)
# NOTE: if the heigh set here is **too large** then the resize
# event will perpetually trigger as the window causes some kind
# of recompute of callbacks.. so we have to ensure it's limited.
h = win_h - (edit_h + 1.666*sb_h)
assert h > 0
self.setFixedHeight(round(h))
# dyncamically size to width of longest result seen
curr_w = self.width()
if curr_w < col_w_tot:
self.setMinimumWidth(col_w_tot)
# size to width of longest result seen thus far
# TODO: should we always dynamically scale to longest result?
if self.width() < col_w_tot:
self.setFixedWidth(col_w_tot)
self.update()
@ -345,23 +331,6 @@ class CompleterView(QTreeView):
item = model.itemFromIndex(idx)
yield idx, item
def iter_df_rows(
self,
iparent: QModelIndex = QModelIndex(),
) -> Iterator[tuple[QModelIndex, QStandardItem]]:
model = self.model()
isections = model.rowCount(iparent)
for i in range(isections):
idx = model.index(i, 0, iparent)
item = model.itemFromIndex(idx)
yield idx, item
if model.hasChildren(idx):
# recursively yield child items depth-first
yield from self.iter_df_rows(idx)
def find_section(
self,
section: str,
@ -385,8 +354,7 @@ class CompleterView(QTreeView):
status_field: str = None,
) -> None:
'''
Clear all result-rows from under the depth = 1 section.
'''Clear all result-rows from under the depth = 1 section.
'''
idx = self.find_section(section)
@ -407,6 +375,8 @@ class CompleterView(QTreeView):
else:
model.setItem(idx.row(), 1, QStandardItem())
self.resize_to_results()
return idx
else:
return None
@ -474,22 +444,9 @@ class CompleterView(QTreeView):
self.show_matches()
def show_matches(
self,
wh: Optional[tuple[float, float]] = None,
) -> None:
if wh:
self.resize_to_results(*wh)
else:
# case where it's just an update from results and *NOT*
# a resize of some higher level parent-container widget.
search = self.parent()
w, h = search.space_dims()
self.resize_to_results(w=w, h=h)
def show_matches(self) -> None:
self.show()
self.resize_to_results()
class SearchBar(Edit):
@ -509,15 +466,18 @@ class SearchBar(Edit):
self.godwidget = godwidget
super().__init__(parent, **kwargs)
self.view: CompleterView = view
godwidget._widgets[view.mode_name] = view
def show(self) -> None:
super().show()
self.view.show_matches()
def unfocus(self) -> None:
self.parent().hide()
self.clearFocus()
def hide(self) -> None:
if self.view:
self.view.hide()
super().hide()
class SearchWidget(QtWidgets.QWidget):
@ -536,16 +496,15 @@ class SearchWidget(QtWidgets.QWidget):
parent=None,
) -> None:
super().__init__(parent)
super().__init__(parent or godwidget)
# size it as we specify
self.setSizePolicy(
QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Expanding,
)
self.godwidget = godwidget
godwidget.reg_for_resize(self)
self.vbox = QtWidgets.QVBoxLayout(self)
self.vbox.setContentsMargins(0, 4, 4, 0)
@ -595,23 +554,18 @@ class SearchWidget(QtWidgets.QWidget):
self.vbox.setAlignment(self.view, Qt.AlignTop | Qt.AlignLeft)
def focus(self) -> None:
self.show()
self.bar.focus()
def show_only_cache_entries(self) -> None:
'''
Clear the search results view and show only cached (aka recently
loaded with active data) feeds in the results section.
'''
godw = self.godwidget
if self.view.model().rowCount(QModelIndex()) == 0:
# fill cache list if nothing existing
self.view.set_section_entries(
'cache',
list(reversed(godw._chart_cache)),
# remove all other completion results except for cache
list(reversed(self.godwidget._chart_cache)),
clear_all=True,
)
self.bar.focus()
self.show()
def get_current_item(self) -> Optional[tuple[str, str]]:
'''Return the current completer tree selection as
a tuple ``(parent: str, child: str)`` if valid, else ``None``.
@ -649,8 +603,7 @@ class SearchWidget(QtWidgets.QWidget):
clear_to_cache: bool = True,
) -> Optional[str]:
'''
Attempt to load and switch the current selected
'''Attempt to load and switch the current selected
completion result to the affiliated chart app.
Return any loaded symbol.
@ -661,11 +614,11 @@ class SearchWidget(QtWidgets.QWidget):
return None
provider, symbol = value
godw = self.godwidget
chart = self.godwidget
log.info(f'Requesting symbol: {symbol}.{provider}')
await godw.load_symbol(
await chart.load_symbol(
provider,
symbol,
'info',
@ -682,46 +635,18 @@ class SearchWidget(QtWidgets.QWidget):
# Re-order the symbol cache on the chart to display in
# LIFO order. this is normally only done internally by
# the chart on new symbols being loaded into memory
godw.set_chart_symbol(
fqsn, (
godw.hist_linked,
godw.rt_linked,
)
)
self.show_only_cache_entries()
chart.set_chart_symbol(fqsn, chart.linkedsplits)
self.view.set_section_entries(
'cache',
values=list(reversed(chart._chart_cache)),
# remove all other completion results except for cache
clear_all=True,
)
self.bar.focus()
return fqsn
def space_dims(self) -> tuple[float, float]:
'''
Compute and return the "available space dimentions" for this
search widget in terms of px space for results by return the
pair of width and height.
'''
# XXX: dun need dis rite?
# win = self.window()
# win_h = win.height()
# sb_h = win.statusBar().height()
godw = self.godwidget
hl = godw.hist_linked
edit_h = self.bar.height()
h = hl.height() - edit_h
w = hl.width()
return w, h
def on_resize(self) -> None:
'''
Resize relay event from god, resize all child widgets.
Right now this is just view to contents and/or the fast chart
height.
'''
w, h = self.space_dims()
self.bar.view.show_matches(wh=(w, h))
_search_active: trio.Event = trio.Event()
_search_enabled: bool = False
@ -787,11 +712,10 @@ async def fill_results(
max_pause_time: float = 6/16 + 0.001,
) -> None:
'''
Task to search through providers and fill in possible
"""Task to search through providers and fill in possible
completion results.
'''
"""
global _search_active, _search_enabled, _searcher_cache
bar = search.bar
@ -805,10 +729,6 @@ async def fill_results(
matches = defaultdict(list)
has_results: defaultdict[str, set[str]] = defaultdict(set)
# show cached feed list at startup
search.show_only_cache_entries()
search.on_resize()
while True:
await _search_active.wait()
period = None
@ -822,7 +742,7 @@ async def fill_results(
pattern = await recv_chan.receive()
period = time.time() - wait_start
log.debug(f'{pattern} after {period}')
print(f'{pattern} after {period}')
# during fast multiple key inputs, wait until a pause
# (in typing) to initiate search
@ -921,7 +841,8 @@ async def handle_keyboard_input(
godwidget = search.godwidget
view = bar.view
view.set_font_size(bar.dpi_font.px_size)
send, recv = trio.open_memory_channel(616)
send, recv = trio.open_memory_channel(16)
async with trio.open_nursery() as n:
@ -936,10 +857,6 @@ async def handle_keyboard_input(
)
)
bar.focus()
search.show_only_cache_entries()
await trio.sleep(0)
async for kbmsg in recv_chan:
event, etype, key, mods, txt = kbmsg.to_tuple()
@ -950,11 +867,10 @@ async def handle_keyboard_input(
ctl = True
if key in (Qt.Key_Enter, Qt.Key_Return):
_search_enabled = False
await search.chart_current_item(clear_to_cache=True)
search.show_only_cache_entries()
view.show_matches()
search.focus()
_search_enabled = False
continue
elif not ctl and not bar.text():
# if nothing in search text show the cache
@ -971,7 +887,7 @@ async def handle_keyboard_input(
Qt.Key_Space, # i feel like this is the "native" one
Qt.Key_Alt,
}:
bar.unfocus()
search.bar.unfocus()
# kill the search and focus back on main chart
if godwidget:
@ -1019,10 +935,9 @@ async def handle_keyboard_input(
if item:
parent_item = item.parent()
# if we're in the cache section and thus the next
# selection is a cache item, switch and show it
# immediately since it should be very fast.
if parent_item and parent_item.text() == 'cache':
# if it's a cache item, switch and show it immediately
await search.chart_current_item(clear_to_cache=False)
elif not ctl:

View File

@ -21,29 +21,15 @@ Qt main window singletons and stuff.
import os
import signal
import time
from typing import (
Callable,
Optional,
Union,
)
from typing import Callable, Optional, Union
import uuid
from pyqtgraph import QtGui
from PyQt5 import QtCore
from PyQt5.QtWidgets import (
QWidget,
QMainWindow,
QApplication,
QLabel,
QStatusBar,
)
from PyQt5.QtWidgets import QLabel, QStatusBar
from PyQt5.QtGui import (
QScreen,
QCloseEvent,
)
from ..log import get_logger
from ._style import _font_small, hcolor
from ._chart import GodWidget
log = get_logger(__name__)
@ -162,13 +148,12 @@ class MultiStatus:
self.bar.clearMessage()
class MainWindow(QMainWindow):
class MainWindow(QtGui.QMainWindow):
# XXX: for tiling wms this should scale
# with the alloted window size.
# TODO: detect for tiling and if untrue set some size?
# size = (300, 500)
godwidget: GodWidget
size = (300, 500)
title = 'piker chart (ur symbol is loading bby)'
@ -177,20 +162,17 @@ class MainWindow(QMainWindow):
# self.setMinimumSize(*self.size)
self.setWindowTitle(self.title)
# set by runtime after `trio` is engaged.
self.godwidget: Optional[GodWidget] = None
self._status_bar: QStatusBar = None
self._status_label: QLabel = None
self._size: Optional[tuple[int, int]] = None
@property
def mode_label(self) -> QLabel:
def mode_label(self) -> QtGui.QLabel:
# init mode label
if not self._status_label:
self._status_label = label = QLabel()
self._status_label = label = QtGui.QLabel()
label.setStyleSheet(
f"""QLabel {{
color : {hcolor('gunmetal')};
@ -212,7 +194,8 @@ class MainWindow(QMainWindow):
def closeEvent(
self,
event: QCloseEvent,
event: QtGui.QCloseEvent,
) -> None:
'''Cancel the root actor asap.
@ -252,8 +235,8 @@ class MainWindow(QMainWindow):
def on_focus_change(
self,
last: QWidget,
current: QWidget,
last: QtGui.QWidget,
current: QtGui.QWidget,
) -> None:
@ -264,12 +247,11 @@ class MainWindow(QMainWindow):
name = getattr(current, 'mode_name', '')
self.set_mode_name(name)
def current_screen(self) -> QScreen:
'''
Get a frickin screen (if we can, gawd).
def current_screen(self) -> QtGui.QScreen:
"""Get a frickin screen (if we can, gawd).
'''
app = QApplication.instance()
"""
app = QtGui.QApplication.instance()
for _ in range(3):
screen = app.screenAt(self.pos())
@ -302,7 +284,7 @@ class MainWindow(QMainWindow):
'''
# https://stackoverflow.com/a/18975846
if not size and not self._size:
# app = QApplication.instance()
app = QtGui.QApplication.instance()
geo = self.current_screen().geometry()
h, w = geo.height(), geo.width()
# use approx 1/3 of the area of the screen by default
@ -310,36 +292,9 @@ class MainWindow(QMainWindow):
self.resize(*size or self._size)
def resizeEvent(self, event: QtCore.QEvent) -> None:
if (
# event.spontaneous()
event.oldSize().height == event.size().height
):
event.ignore()
return
# XXX: uncomment for debugging..
# attrs = {}
# for key in dir(event):
# if key == '__dir__':
# continue
# attr = getattr(event, key)
# try:
# attrs[key] = attr()
# except TypeError:
# attrs[key] = attr
# from pprint import pformat
# print(
# f'{pformat(attrs)}\n'
# f'WINDOW RESIZE: {self.size()}\n\n'
# )
self.godwidget.on_win_resize(event)
event.accept()
# singleton app per actor
_qt_win: QMainWindow = None
_qt_win: QtGui.QMainWindow = None
def main_window() -> MainWindow:

View File

@ -46,10 +46,8 @@ def _kivy_import_hack():
@click.argument('name', nargs=1, required=True)
@click.pass_obj
def monitor(config, rate, name, dhost, test, tl):
'''
Start a real-time watchlist UI
'''
"""Start a real-time watchlist UI
"""
# global opts
brokermod = config['brokermods'][0]
loglevel = config['loglevel']
@ -72,12 +70,8 @@ def monitor(config, rate, name, dhost, test, tl):
) as portal:
# run app "main"
await _async_main(
name,
portal,
tickers,
brokermod,
rate,
test=test,
name, portal, tickers,
brokermod, rate, test=test,
)
tractor.run(
@ -128,7 +122,7 @@ def optschain(config, symbol, date, rate, test):
@cli.command()
@click.option(
'--profile',
# '-p',
'-p',
default=None,
help='Enable pyqtgraph profiling'
)
@ -137,14 +131,9 @@ def optschain(config, symbol, date, rate, test):
is_flag=True,
help='Enable tractor debug mode'
)
@click.argument('symbols', nargs=-1, required=True)
@click.argument('symbol', required=True)
@click.pass_obj
def chart(
config,
symbols: list[str],
profile,
pdb: bool,
):
def chart(config, symbol, profile, pdb):
'''
Start a real-time chartng UI
@ -155,10 +144,8 @@ def chart(
_profile._pg_profile = True
_profile.ms_slower_then = float(profile)
# Qt UI entrypoint
from ._app import _main
for symbol in symbols:
if '.' not in symbol:
click.echo(click.style(
f'symbol: {symbol} must have a {symbol}.<provider> suffix',
@ -172,9 +159,8 @@ def chart(
tractorloglevel = config['tractorloglevel']
pikerloglevel = config['loglevel']
_main(
syms=symbols,
sym=symbol,
brokernames=brokernames,
piker_loglevel=pikerloglevel,
tractor_kwargs={
@ -184,6 +170,5 @@ def chart(
'enable_modules': [
'piker.clearing._client'
],
'registry_addr': config.get('registry_addr'),
},
)

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,8 @@
# `pyqtgraph` peeps keep breaking, fixing, improving so might as well
# pin this to a dev branch that we have more control over especially
# as more graphics stuff gets hashed out.
-e git+https://github.com/pikers/pyqtgraph.git@master#egg=pyqtgraph
-e git+https://github.com/pikers/pyqtgraph.git@piker_pin#egg=pyqtgraph
# our async client for ``marketstore`` (the tsdb)
-e git+https://github.com/pikers/anyio-marketstore.git@master#egg=anyio-marketstore
@ -17,7 +18,4 @@
# ``asyncvnc`` for sending interactions to ib-gw inside docker
-e git+https://github.com/pikers/asyncvnc.git@main#egg=asyncvnc
# ``cryptofeed`` for connecting to various crypto exchanges + custom fixes
-e git+https://github.com/pikers/cryptofeed.git@date_parsing#egg=cryptofeed
-e git+https://github.com/pikers/asyncvnc.git@vid_passthrough#egg=asyncvnc

View File

@ -41,24 +41,23 @@ setup(
},
install_requires=[
'toml',
'tomli', # fastest pure py reader
'click',
'colorlog',
'attrs',
'pygments',
'colorama', # numba traceback coloring
'msgspec', # performant IPC messaging and structs
'pydantic', # structured data
# async
'trio',
'trio-websocket',
'msgspec', # performant IPC messaging
'async_generator',
# from github currently (see requirements.txt)
# 'trimeter', # not released yet..
# 'tractor',
# asyncvnc,
# 'cryptofeed',
# brokers
'asks==2.4.8',

View File

@ -8,6 +8,7 @@ from trio.testing import trio_test
from piker.brokers import questrade as qt
import pytest
import tractor
from tractor.testing import tractor_test
import piker
from piker.brokers import get_brokermod
@ -22,12 +23,6 @@ pytestmark = pytest.mark.skipif(
reason="questrade tests can only be run locally with an API key",
)
# TODO: this module was removed from tractor into it's
# tests/conftest.py, we need to rewrite the below tests
# to use the `open_pikerd_runtime()` to make these work again
# (if we're not just gonna junk em).
# from tractor.testing import tractor_test
# stock quote
_ex_quotes = {
@ -111,7 +106,7 @@ def match_packet(symbols, quotes, feed_type='stock'):
assert not quotes
# @tractor_test
@tractor_test
async def test_concurrent_tokens_refresh(us_symbols, loglevel):
"""Verify that concurrent requests from mulitple tasks work alongside
random token refreshing which simulates an access token expiry + refresh
@ -342,7 +337,7 @@ async def stream_stocks(feed, symbols):
'options_and_options',
],
)
# @tractor_test
@tractor_test
async def test_quote_streaming(tmx_symbols, loglevel, stream_what):
"""Set up option streaming using the broker daemon.
"""