Compare commits

..

No commits in common. "backend_spec" and "310_plus" have entirely different histories.

101 changed files with 9627 additions and 20259 deletions

View File

@ -3,9 +3,10 @@ name: CI
on: on:
# Triggers the workflow on push or pull request events but only for the master branch # Triggers the workflow on push or pull request events but only for the master branch
pull_request:
push: push:
branches: [ master ] branches: [ master ]
pull_request:
branches: [ master ]
# Allows you to run this workflow manually from the Actions tab # Allows you to run this workflow manually from the Actions tab
workflow_dispatch: workflow_dispatch:
@ -13,27 +14,6 @@ on:
jobs: jobs:
# test that we can generate a software distribution and install it
# thus avoid missing file issues after packaging.
sdist-linux:
name: 'sdist'
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup python
uses: actions/setup-python@v2
with:
python-version: '3.10'
- name: Build sdist
run: python setup.py sdist --formats=zip
- name: Install sdist from .zips
run: python -m pip install dist/*.zip
testing: testing:
name: 'install + test-suite' name: 'install + test-suite'
runs-on: ubuntu-latest runs-on: ubuntu-latest

View File

@ -71,19 +71,6 @@ for a development install::
source ./env/bin/activate source ./env/bin/activate
pip install -r requirements.txt -e . pip install -r requirements.txt -e .
install for nixos
*****************
for users of `NixOS` we offer a development shell envoirment that can be
loaded with::
nix-shell develop.nix
this will setup the required python environment to run piker, make sure to
run::
pip install -r requirements.txt -e .
once after loading the shell
install for tinas install for tinas
***************** *****************

View File

@ -50,8 +50,3 @@ prefer_data_account = [
paper = "XX0000000" paper = "XX0000000"
margin = "X0000000" margin = "X0000000"
ira = "X0000000" ira = "X0000000"
[deribit]
key_id = 'XXXXXXXX'
key_secret = 'Xx_XxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXx'

View File

@ -1,32 +0,0 @@
with (import <nixpkgs> {});
with python310Packages;
stdenv.mkDerivation {
name = "pip-env";
buildInputs = [
# System requirements.
readline
# Python requirements (enough to get a virtualenv going).
python310Full
virtualenv
setuptools
pyqt5
pip
];
src = null;
shellHook = ''
# Allow the use of wheels.
SOURCE_DATE_EPOCH=$(date +%s)
# Augment the dynamic linker path
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${R}/lib/R/lib:${readline}/lib
export QT_QPA_PLATFORM_PLUGIN_PATH="${qt5.qtbase.bin}/lib/qt-${qt5.qtbase.version}/plugins";
if [ ! -d "venv" ]; then
virtualenv venv
fi
source venv/bin/activate
'';
}

View File

@ -3,12 +3,11 @@
version: "3.5" version: "3.5"
services: services:
ib_gw_paper: ib-gateway:
# other image tags available: # other image tags available:
# https://github.com/waytrade/ib-gateway-docker#supported-tags # https://github.com/waytrade/ib-gateway-docker#supported-tags
# image: waytrade/ib-gateway:981.3j image: waytrade/ib-gateway:981.3j
image: waytrade/ib-gateway:1012.2i restart: always
restart: 'no' # restart on boot whenev there's a crash or user clicsk
network_mode: 'host' network_mode: 'host'
volumes: volumes:
@ -40,12 +39,14 @@ services:
# this compose file which looks something like: # this compose file which looks something like:
# TWS_USERID='myuser' # TWS_USERID='myuser'
# TWS_PASSWORD='guest' # TWS_PASSWORD='guest'
# TRADING_MODE=paper (or live)
# VNC_SERVER_PASSWORD='diggity'
environment: environment:
TWS_USERID: ${TWS_USERID} TWS_USERID: ${TWS_USERID}
TWS_PASSWORD: ${TWS_PASSWORD} TWS_PASSWORD: ${TWS_PASSWORD}
TRADING_MODE: 'paper' TRADING_MODE: ${TRADING_MODE:-paper}
VNC_SERVER_PASSWORD: 'doggy' VNC_SERVER_PASSWORD: ${VNC_SERVER_PASSWORD:-}
VNC_SERVER_PORT: '3003'
# ports: # ports:
# - target: 4002 # - target: 4002
@ -61,40 +62,3 @@ services:
# - "127.0.0.1:4001:4001" # - "127.0.0.1:4001:4001"
# - "127.0.0.1:4002:4002" # - "127.0.0.1:4002:4002"
# - "127.0.0.1:5900:5900" # - "127.0.0.1:5900:5900"
# ib_gw_live:
# image: waytrade/ib-gateway:1012.2i
# restart: no
# network_mode: 'host'
# volumes:
# - type: bind
# source: ./jts_live.ini
# target: /root/jts/jts.ini
# # don't let ibc clobber this file for
# # the main reason of not having a stupid
# # timezone set..
# read_only: true
# # force our own ibc config
# - type: bind
# source: ./ibc.ini
# target: /root/ibc/config.ini
# # force our noop script - socat isn't needed in host mode.
# - type: bind
# source: ./fork_ports_delayed.sh
# target: /root/scripts/fork_ports_delayed.sh
# # force our noop script - socat isn't needed in host mode.
# - type: bind
# source: ./run_x11_vnc.sh
# target: /root/scripts/run_x11_vnc.sh
# read_only: true
# # NOTE: to fill these out, define an `.env` file in the same dir as
# # this compose file which looks something like:
# environment:
# TRADING_MODE: 'live'
# VNC_SERVER_PASSWORD: 'doggy'
# VNC_SERVER_PORT: '3004'

View File

@ -188,7 +188,7 @@ AcceptNonBrokerageAccountWarning=yes
# #
# The default value is 60. # The default value is 60.
LoginDialogDisplayTimeout=20 LoginDialogDisplayTimeout = 60
@ -292,7 +292,7 @@ ExistingSessionDetectedAction=primary
# be set dynamically at run-time: most users will never need it, # be set dynamically at run-time: most users will never need it,
# so don't use it unless you know you need it. # so don't use it unless you know you need it.
; OverrideTwsApiPort=4002 OverrideTwsApiPort=4002
# Read-only Login # Read-only Login

View File

@ -1,33 +0,0 @@
[IBGateway]
ApiOnly=true
LocalServerPort=4001
# NOTE: must be set if using IBC's "reject" mode
TrustedIPs=127.0.0.1
; RemoteHostOrderRouting=ndc1.ibllc.com
; WriteDebug=true
; RemotePortOrderRouting=4001
; useRemoteSettings=false
; tradingMode=p
; Steps=8
; colorPalletName=dark
# window geo, this may be useful for sending `xdotool` commands?
; MainWindow.Width=1986
; screenHeight=3960
[Logon]
Locale=en
# most markets are oriented around this zone
# so might as well hard code it.
TimeZone=America/New_York
UseSSL=true
displayedproxymsg=1
os_titlebar=true
s3store=true
useRemoteSettings=false
[Communication]
ctciAutoEncrypt=true
Region=usr
; Peer=cdc1.ibllc.com:4001

View File

@ -1,35 +1,16 @@
#!/bin/sh #!/bin/sh
# start vnc server and listen for connections
# on port specced in `$VNC_SERVER_PORT`
# start VNC server
x11vnc \ x11vnc \
-listen 127.0.0.1 \ -ncache_cr \
-allow 127.0.0.1 \ -listen localhost \
-rfbport "${VNC_SERVER_PORT}" \
-display :1 \ -display :1 \
-forever \ -forever \
-shared \ -shared \
-logappend /var/log/x11vnc.log \
-bg \ -bg \
-nowf \
-noxdamage \
-noxfixes \
-no6 \
-noipv6 \ -noipv6 \
-autoport 3003 \
# can't use this because of ``asyncvnc`` issue:
# -nowcr \
# TODO: can't use this because of ``asyncvnc`` issue:
# https://github.com/barneygale/asyncvnc/issues/1 # https://github.com/barneygale/asyncvnc/issues/1
# -passwd 'ibcansmbz' # -passwd 'ibcansmbz'
# XXX: optional graphics caching flags that seem to rekt the overlay
# of the 2 gw windows? When running a single gateway
# this seems to maybe optimize some memory usage?
# -ncache_cr \
# -ncache \
# NOTE: this will prevent logs from going to the console.
# -logappend /var/log/x11vnc.log \
# where to start allocating ports
# -autoport "${VNC_SERVER_PORT}" \

View File

@ -18,10 +18,3 @@
piker: trading gear for hackers. piker: trading gear for hackers.
""" """
from ._daemon import open_piker_runtime
from .data.feed import open_feed
__all__ = [
'open_piker_runtime',
'open_feed',
]

View File

@ -18,27 +18,16 @@
Structured, daemon tree service management. Structured, daemon tree service management.
""" """
from __future__ import annotations from typing import Optional, Union, Callable, Any
import os from contextlib import asynccontextmanager as acm
from typing import (
Optional,
Callable,
Any,
ClassVar,
)
from contextlib import (
asynccontextmanager as acm,
)
from collections import defaultdict from collections import defaultdict
import tractor from pydantic import BaseModel
import trio import trio
from trio_typing import TaskStatus from trio_typing import TaskStatus
import tractor
from .log import ( from .log import get_logger, get_console_log
get_logger,
get_console_log,
)
from .brokers import get_brokermod from .brokers import get_brokermod
@ -46,118 +35,28 @@ log = get_logger(__name__)
_root_dname = 'pikerd' _root_dname = 'pikerd'
_default_registry_host: str = '127.0.0.1' _registry_addr = ('127.0.0.1', 6116)
_default_registry_port: int = 6116 _tractor_kwargs: dict[str, Any] = {
_default_reg_addr: tuple[str, int] = ( # use a different registry addr then tractor's default
_default_registry_host, 'arbiter_addr': _registry_addr
_default_registry_port, }
)
# NOTE: this value is set as an actor-global once the first endpoint
# who is capable, spawns a `pikerd` service tree.
_registry: Registry | None = None
class Registry:
addr: None | tuple[str, int] = None
# TODO: table of uids to sockaddrs
peers: dict[
tuple[str, str],
tuple[str, int],
] = {}
_tractor_kwargs: dict[str, Any] = {}
@acm
async def open_registry(
addr: None | tuple[str, int] = None,
ensure_exists: bool = True,
) -> tuple[str, int]:
global _tractor_kwargs
actor = tractor.current_actor()
uid = actor.uid
if (
Registry.addr is not None
and addr
):
raise RuntimeError(
f'`{uid}` registry addr already bound @ {_registry.sockaddr}'
)
was_set: bool = False
if (
not tractor.is_root_process()
and Registry.addr is None
):
Registry.addr = actor._arb_addr
if (
ensure_exists
and Registry.addr is None
):
raise RuntimeError(
f"`{uid}` registry should already exist bug doesn't?"
)
if (
Registry.addr is None
):
was_set = True
Registry.addr = addr or _default_reg_addr
_tractor_kwargs['arbiter_addr'] = Registry.addr
try:
yield Registry.addr
finally:
# XXX: always clear the global addr if we set it so that the
# next (set of) calls will apply whatever new one is passed
# in.
if was_set:
Registry.addr = None
def get_tractor_runtime_kwargs() -> dict[str, Any]:
'''
Deliver ``tractor`` related runtime variables in a `dict`.
'''
return _tractor_kwargs
_root_modules = [ _root_modules = [
__name__, __name__,
'piker.clearing._ems', 'piker.clearing._ems',
'piker.clearing._client', 'piker.clearing._client',
'piker.data._sampling',
] ]
# TODO: factor this into a ``tractor.highlevel`` extension class Services(BaseModel):
# pack for the library.
class Services:
actor_n: tractor._supervise.ActorNursery actor_n: tractor._supervise.ActorNursery
service_n: trio.Nursery service_n: trio.Nursery
debug_mode: bool # tractor sub-actor debug mode flag debug_mode: bool # tractor sub-actor debug mode flag
service_tasks: dict[ service_tasks: dict[str, tuple[trio.CancelScope, tractor.Portal]] = {}
str,
tuple[ class Config:
trio.CancelScope, arbitrary_types_allowed = True
tractor.Portal,
trio.Event,
]
] = {}
locks = defaultdict(trio.Lock)
@classmethod
async def start_service_task( async def start_service_task(
self, self,
name: str, name: str,
@ -176,12 +75,7 @@ class Services:
''' '''
async def open_context_in_task( async def open_context_in_task(
task_status: TaskStatus[ task_status: TaskStatus[
tuple[ trio.CancelScope] = trio.TASK_STATUS_IGNORED,
trio.CancelScope,
trio.Event,
Any,
]
] = trio.TASK_STATUS_IGNORED,
) -> Any: ) -> Any:
@ -193,173 +87,143 @@ class Services:
) as (ctx, first): ) as (ctx, first):
# unblock once the remote context has started # unblock once the remote context has started
complete = trio.Event() task_status.started((cs, first))
task_status.started((cs, complete, first))
log.info( log.info(
f'`pikerd` service {name} started with value {first}' f'`pikerd` service {name} started with value {first}'
) )
try: try:
# wait on any context's return value # wait on any context's return value
# and any final portal result from the
# sub-actor.
ctx_res = await ctx.result() ctx_res = await ctx.result()
except tractor.ContextCancelled:
# NOTE: blocks indefinitely until cancelled return await self.cancel_service(name)
# either by error from the target context else:
# function or by being cancelled here by the # wait on any error from the sub-actor
# surrounding cancel scope. # NOTE: this will block indefinitely until
# cancelled either by error from the target
# context function or by being cancelled here by
# the surrounding cancel scope
return (await portal.result(), ctx_res) return (await portal.result(), ctx_res)
finally: cs, first = await self.service_n.start(open_context_in_task)
await portal.cancel_actor()
complete.set()
self.service_tasks.pop(name)
cs, complete, first = await self.service_n.start(open_context_in_task)
# store the cancel scope and portal for later cancellation or # store the cancel scope and portal for later cancellation or
# retstart if needed. # retstart if needed.
self.service_tasks[name] = (cs, portal, complete) self.service_tasks[name] = (cs, portal)
return cs, first return cs, first
@classmethod # TODO: per service cancellation by scope, we aren't using this
# anywhere right?
async def cancel_service( async def cancel_service(
self, self,
name: str, name: str,
) -> Any: ) -> Any:
'''
Cancel the service task and actor for the given ``name``.
'''
log.info(f'Cancelling `pikerd` service {name}') log.info(f'Cancelling `pikerd` service {name}')
cs, portal, complete = self.service_tasks[name] cs, portal = self.service_tasks[name]
# XXX: not entirely sure why this is required,
# and should probably be better fine tuned in
# ``tractor``?
cs.cancel() cs.cancel()
await complete.wait() return await portal.cancel_actor()
assert name not in self.service_tasks, \
f'Serice task for {name} not terminated?'
_services: Optional[Services] = None
@acm
async def open_pikerd(
start_method: str = 'trio',
loglevel: Optional[str] = None,
# XXX: you should pretty much never want debug mode
# for data daemons when running in production.
debug_mode: bool = False,
) -> Optional[tractor._portal.Portal]:
'''
Start a root piker daemon who's lifetime extends indefinitely
until cancelled.
A root actor nursery is created which can be used to create and keep
alive underling services (see below).
'''
global _services
assert _services is None
# XXX: this may open a root actor as well
async with (
tractor.open_root_actor(
# passed through to ``open_root_actor``
arbiter_addr=_registry_addr,
name=_root_dname,
loglevel=loglevel,
debug_mode=debug_mode,
start_method=start_method,
# TODO: eventually we should be able to avoid
# having the root have more then permissions to
# spawn other specialized daemons I think?
enable_modules=_root_modules,
) as _,
tractor.open_nursery() as actor_nursery,
):
async with trio.open_nursery() as service_nursery:
# # setup service mngr singleton instance
# async with AsyncExitStack() as stack:
# assign globally for future daemon/task creation
_services = Services(
actor_n=actor_nursery,
service_n=service_nursery,
debug_mode=debug_mode,
)
yield _services
@acm @acm
async def open_piker_runtime( async def open_piker_runtime(
name: str, name: str,
enable_modules: list[str] = [], enable_modules: list[str] = [],
loglevel: Optional[str] = None,
# XXX NOTE XXX: you should pretty much never want debug mode
# for data daemons when running in production.
debug_mode: bool = False,
registry_addr: None | tuple[str, int] = None,
# TODO: once we have `rsyscall` support we will read a config
# and spawn the service tree distributed per that.
start_method: str = 'trio', start_method: str = 'trio',
loglevel: Optional[str] = None,
**tractor_kwargs,
) -> tuple[
tractor.Actor,
tuple[str, int],
]:
'''
Start a piker actor who's runtime will automatically sync with
existing piker actors on the local link based on configuration.
Can be called from a subactor or any program that needs to start
a root actor.
'''
try:
# check for existing runtime
actor = tractor.current_actor().uid
except tractor._exceptions.NoRuntime:
registry_addr = registry_addr or _default_reg_addr
async with (
tractor.open_root_actor(
# passed through to ``open_root_actor``
arbiter_addr=registry_addr,
name=name,
loglevel=loglevel,
debug_mode=debug_mode,
start_method=start_method,
# TODO: eventually we should be able to avoid
# having the root have more then permissions to
# spawn other specialized daemons I think?
enable_modules=enable_modules,
**tractor_kwargs,
) as _,
open_registry(registry_addr, ensure_exists=False) as addr,
):
yield (
tractor.current_actor(),
addr,
)
else:
async with open_registry(registry_addr) as addr:
yield (
actor,
addr,
)
@acm
async def open_pikerd(
loglevel: str | None = None,
# XXX: you should pretty much never want debug mode # XXX: you should pretty much never want debug mode
# for data daemons when running in production. # for data daemons when running in production.
debug_mode: bool = False, debug_mode: bool = False,
registry_addr: None | tuple[str, int] = None,
) -> Services: ) -> Optional[tractor._portal.Portal]:
''' '''
Start a root piker daemon who's lifetime extends indefinitely until Start a piker actor who's runtime will automatically
cancelled. sync with existing piker actors in local network
based on configuration.
A root actor nursery is created which can be used to create and keep
alive underling services (see below).
''' '''
global _services
assert _services is None
# XXX: this may open a root actor as well
async with ( async with (
open_piker_runtime( tractor.open_root_actor(
# passed through to ``open_root_actor``
arbiter_addr=_registry_addr,
name=name,
loglevel=loglevel,
debug_mode=debug_mode,
start_method=start_method,
name=_root_dname,
# TODO: eventually we should be able to avoid # TODO: eventually we should be able to avoid
# having the root have more then permissions to # having the root have more then permissions to
# spawn other specialized daemons I think? # spawn other specialized daemons I think?
enable_modules=_root_modules, enable_modules=_root_modules,
) as _,
loglevel=loglevel,
debug_mode=debug_mode,
registry_addr=registry_addr,
) as (root_actor, reg_addr),
tractor.open_nursery() as actor_nursery,
trio.open_nursery() as service_nursery,
): ):
assert root_actor.accept_addr == reg_addr yield tractor.current_actor()
# assign globally for future daemon/task creation
Services.actor_n = actor_nursery
Services.service_n = service_nursery
Services.debug_mode = debug_mode
try:
yield Services
finally:
# TODO: is this more clever/efficient?
# if 'samplerd' in Services.service_tasks:
# await Services.cancel_service('samplerd')
service_nursery.cancel_scope.cancel()
@acm @acm
@ -368,89 +232,61 @@ async def maybe_open_runtime(
**kwargs, **kwargs,
) -> None: ) -> None:
''' """
Start the ``tractor`` runtime (a root actor) if none exists. Start the ``tractor`` runtime (a root actor) if none exists.
''' """
name = kwargs.pop('name') settings = _tractor_kwargs
settings.update(kwargs)
if not tractor.current_actor(err_on_no_runtime=False): if not tractor.current_actor(err_on_no_runtime=False):
async with open_piker_runtime( async with tractor.open_root_actor(
name,
loglevel=loglevel, loglevel=loglevel,
**kwargs, **settings,
) as (_, addr): ):
yield addr, yield
else: else:
async with open_registry() as addr: yield
yield addr
@acm @acm
async def maybe_open_pikerd( async def maybe_open_pikerd(
loglevel: Optional[str] = None, loglevel: Optional[str] = None,
registry_addr: None | tuple = None,
**kwargs, **kwargs,
) -> tractor._portal.Portal | ClassVar[Services]: ) -> Union[tractor._portal.Portal, Services]:
''' """If no ``pikerd`` daemon-root-actor can be found start it and
If no ``pikerd`` daemon-root-actor can be found start it and
yield up (we should probably figure out returning a portal to self yield up (we should probably figure out returning a portal to self
though). though).
''' """
if loglevel: if loglevel:
get_console_log(loglevel) get_console_log(loglevel)
# subtle, we must have the runtime up here or portal lookup will fail # subtle, we must have the runtime up here or portal lookup will fail
query_name = kwargs.pop('name', f'piker_query_{os.getpid()}') async with maybe_open_runtime(loglevel, **kwargs):
# TODO: if we need to make the query part faster we could not init async with tractor.find_actor(_root_dname) as portal:
# an actor runtime and instead just hit the socket? # assert portal is not None
# from tractor._ipc import _connect_chan, Channel if portal is not None:
# async with _connect_chan(host, port) as chan: yield portal
# async with open_portal(chan) as arb_portal: return
# yield arb_portal
async with (
open_piker_runtime(
name=query_name,
registry_addr=registry_addr,
loglevel=loglevel,
**kwargs,
) as _,
tractor.find_actor(
_root_dname,
arbiter_sockaddr=registry_addr,
) as portal
):
# connect to any existing daemon presuming
# its registry socket was selected.
if (
portal is not None
):
yield portal
return
# presume pikerd role since no daemon could be found at # presume pikerd role since no daemon could be found at
# configured address # configured address
async with open_pikerd( async with open_pikerd(
loglevel=loglevel, loglevel=loglevel,
debug_mode=kwargs.get('debug_mode', False), debug_mode=kwargs.get('debug_mode', False),
registry_addr=registry_addr,
) as service_manager: ) as _:
# in the case where we're starting up the # in the case where we're starting up the
# tractor-piker runtime stack in **this** process # tractor-piker runtime stack in **this** process
# we return no portal to self. # we return no portal to self.
assert service_manager yield None
yield service_manager
# `brokerd` enabled modules # brokerd enabled modules
# NOTE: keeping this list as small as possible is part of our caps-sec
# model and should be treated with utmost care!
_data_mods = [ _data_mods = [
'piker.brokers.core', 'piker.brokers.core',
'piker.brokers.data', 'piker.brokers.data',
@ -460,35 +296,37 @@ _data_mods = [
] ]
class Brokerd:
locks = defaultdict(trio.Lock)
@acm @acm
async def find_service( async def find_service(
service_name: str, service_name: str,
) -> tractor.Portal | None: ) -> Optional[tractor.Portal]:
async with open_registry() as reg_addr: log.info(f'Scanning for service `{service_name}`')
log.info(f'Scanning for service `{service_name}`') # attach to existing daemon by name if possible
# attach to existing daemon by name if possible async with tractor.find_actor(
async with tractor.find_actor( service_name,
service_name, arbiter_sockaddr=_registry_addr,
arbiter_sockaddr=reg_addr, ) as maybe_portal:
) as maybe_portal: yield maybe_portal
yield maybe_portal
async def check_for_service( async def check_for_service(
service_name: str, service_name: str,
) -> None | tuple[str, int]: ) -> bool:
''' '''
Service daemon "liveness" predicate. Service daemon "liveness" predicate.
''' '''
async with open_registry(ensure_exists=False) as reg_addr: async with tractor.query_actor(
async with tractor.query_actor( service_name,
service_name, arbiter_sockaddr=_registry_addr,
arbiter_sockaddr=reg_addr, ) as sockaddr:
) as sockaddr: return sockaddr
return sockaddr
@acm @acm
@ -498,8 +336,6 @@ async def maybe_spawn_daemon(
service_task_target: Callable, service_task_target: Callable,
spawn_args: dict[str, Any], spawn_args: dict[str, Any],
loglevel: Optional[str] = None, loglevel: Optional[str] = None,
singleton: bool = False,
**kwargs, **kwargs,
) -> tractor.Portal: ) -> tractor.Portal:
@ -520,7 +356,7 @@ async def maybe_spawn_daemon(
# serialize access to this section to avoid # serialize access to this section to avoid
# 2 or more tasks racing to create a daemon # 2 or more tasks racing to create a daemon
lock = Services.locks[service_name] lock = Brokerd.locks[service_name]
await lock.acquire() await lock.acquire()
async with find_service(service_name) as portal: async with find_service(service_name) as portal:
@ -531,9 +367,6 @@ async def maybe_spawn_daemon(
log.warning(f"Couldn't find any existing {service_name}") log.warning(f"Couldn't find any existing {service_name}")
# TODO: really shouldn't the actor spawning be part of the service
# starting method `Services.start_service()` ?
# ask root ``pikerd`` daemon to spawn the daemon we need if # ask root ``pikerd`` daemon to spawn the daemon we need if
# pikerd is not live we now become the root of the # pikerd is not live we now become the root of the
# process tree # process tree
@ -544,16 +377,15 @@ async def maybe_spawn_daemon(
) as pikerd_portal: ) as pikerd_portal:
# we are the root and thus are `pikerd`
# so spawn the target service directly by calling
# the provided target routine.
# XXX: this assumes that the target is well formed and will
# do the right things to setup both a sub-actor **and** call
# the ``_Services`` api from above to start the top level
# service task for that actor.
started: bool
if pikerd_portal is None: if pikerd_portal is None:
started = await service_task_target(**spawn_args) # we are the root and thus are `pikerd`
# so spawn the target service directly by calling
# the provided target routine.
# XXX: this assumes that the target is well formed and will
# do the right things to setup both a sub-actor **and** call
# the ``_Services`` api from above to start the top level
# service task for that actor.
await service_task_target(**spawn_args)
else: else:
# tell the remote `pikerd` to start the target, # tell the remote `pikerd` to start the target,
@ -562,14 +394,11 @@ async def maybe_spawn_daemon(
# non-blocking and the target task will persist running # non-blocking and the target task will persist running
# on `pikerd` after the client requesting it's start # on `pikerd` after the client requesting it's start
# disconnects. # disconnects.
started = await pikerd_portal.run( await pikerd_portal.run(
service_task_target, service_task_target,
**spawn_args, **spawn_args,
) )
if started:
log.info(f'Service {service_name} started!')
async with tractor.wait_for_actor(service_name) as portal: async with tractor.wait_for_actor(service_name) as portal:
lock.release() lock.release()
yield portal yield portal
@ -592,6 +421,9 @@ async def spawn_brokerd(
extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {}) extra_tractor_kwargs = getattr(brokermod, '_spawn_kwargs', {})
tractor_kwargs.update(extra_tractor_kwargs) tractor_kwargs.update(extra_tractor_kwargs)
global _services
assert _services
# ask `pikerd` to spawn a new sub-actor and manage it under its # ask `pikerd` to spawn a new sub-actor and manage it under its
# actor nursery # actor nursery
modpath = brokermod.__name__ modpath = brokermod.__name__
@ -604,18 +436,18 @@ async def spawn_brokerd(
subpath = f'{modpath}.{submodname}' subpath = f'{modpath}.{submodname}'
broker_enable.append(subpath) broker_enable.append(subpath)
portal = await Services.actor_n.start_actor( portal = await _services.actor_n.start_actor(
dname, dname,
enable_modules=_data_mods + broker_enable, enable_modules=_data_mods + broker_enable,
loglevel=loglevel, loglevel=loglevel,
debug_mode=Services.debug_mode, debug_mode=_services.debug_mode,
**tractor_kwargs **tractor_kwargs
) )
# non-blocking setup of brokerd service nursery # non-blocking setup of brokerd service nursery
from .data import _setup_persistent_brokerd from .data import _setup_persistent_brokerd
await Services.start_service_task( await _services.start_service_task(
dname, dname,
portal, portal,
_setup_persistent_brokerd, _setup_persistent_brokerd,
@ -661,21 +493,24 @@ async def spawn_emsd(
""" """
log.info('Spawning emsd') log.info('Spawning emsd')
portal = await Services.actor_n.start_actor( global _services
assert _services
portal = await _services.actor_n.start_actor(
'emsd', 'emsd',
enable_modules=[ enable_modules=[
'piker.clearing._ems', 'piker.clearing._ems',
'piker.clearing._client', 'piker.clearing._client',
], ],
loglevel=loglevel, loglevel=loglevel,
debug_mode=Services.debug_mode, # set by pikerd flag debug_mode=_services.debug_mode, # set by pikerd flag
**extra_tractor_kwargs **extra_tractor_kwargs
) )
# non-blocking setup of clearing service # non-blocking setup of clearing service
from .clearing._ems import _setup_persistent_emsd from .clearing._ems import _setup_persistent_emsd
await Services.start_service_task( await _services.start_service_task(
'emsd', 'emsd',
portal, portal,
_setup_persistent_emsd, _setup_persistent_emsd,
@ -702,3 +537,25 @@ async def maybe_open_emsd(
) as portal: ) as portal:
yield portal yield portal
# TODO: ideally we can start the tsdb "on demand" but it's
# probably going to require "rootless" docker, at least if we don't
# want to expect the user to start ``pikerd`` with root perms all the
# time.
# async def maybe_open_marketstored(
# loglevel: Optional[str] = None,
# **kwargs,
# ) -> tractor._portal.Portal: # noqa
# async with maybe_spawn_daemon(
# 'marketstored',
# service_task_target=spawn_emsd,
# spawn_args={'loglevel': loglevel},
# loglevel=loglevel,
# **kwargs,
# ) as portal:
# yield portal

View File

@ -18,10 +18,7 @@
Profiling wrappers for internal libs. Profiling wrappers for internal libs.
""" """
import os
import sys
import time import time
from time import perf_counter
from functools import wraps from functools import wraps
# NOTE: you can pass a flag to enable this: # NOTE: you can pass a flag to enable this:
@ -47,193 +44,3 @@ def timeit(fn):
return res return res
return wrapper return wrapper
# Modified version of ``pyqtgraph.debug.Profiler`` that
# core seems hesitant to land in:
# https://github.com/pyqtgraph/pyqtgraph/pull/2281
class Profiler(object):
'''
Simple profiler allowing measurement of multiple time intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `PYQTGRAPHPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `PYQTGRAPHPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "pyqtgraph." prefix from the module.
'''
_profilers = os.environ.get("PYQTGRAPHPROFILE", None)
_profilers = _profilers.split(",") if _profilers is not None else []
_depth = 0
# NOTE: without this defined at the class level
# you won't see apprpriately "nested" sub-profiler
# instance calls.
_msgs = []
# set this flag to disable all or individual profilers at runtime
disable = False
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabledProfiler = DisabledProfiler()
def __new__(
cls,
msg=None,
disabled='env',
delayed=True,
ms_threshold: float = 0.0,
):
"""Optionally create a new profiler based on caller's qualname.
``ms_threshold`` can be set to value in ms for which, if the
total measured time of the lifetime of this profiler is **less
than** this value, then no profiling messages will be printed.
Setting ``delayed=False`` disables this feature since messages
are emitted immediately.
"""
if (
disabled is True
or (
disabled == 'env'
and len(cls._profilers) == 0
)
):
return cls._disabledProfiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[-1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if disabled == 'env' and func_qualname not in cls._profilers:
# don't do anything
return cls._disabledProfiler
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._msgs = []
# create an actual profiling object
if cls._depth < 1:
cls._msgs = []
obj._name = msg or func_qualname
obj._delayed = delayed
obj._markCount = 0
obj._finished = False
obj._firstTime = obj._lastTime = perf_counter()
obj._mt = ms_threshold
obj._newMsg("> Entering " + obj._name)
return obj
def __call__(self, msg=None):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._markCount)
self._markCount += 1
newTime = perf_counter()
tot_ms = (newTime - self._firstTime) * 1000
ms = (newTime - self._lastTime) * 1000
self._newMsg(
f" {msg}: {ms:0.4f}, tot:{tot_ms:0.4f}"
)
self._lastTime = newTime
def mark(self, msg=None):
self(msg)
def _newMsg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
tot_ms = (perf_counter() - self._firstTime) * 1000
self._newMsg(
"< Exiting %s, total time: %0.4f ms",
self._name,
tot_ms,
)
if tot_ms < self._mt:
# print(f'{tot_ms} < {self._mt}, clearing')
# NOTE: this list **must** be an instance var to avoid
# deleting common messages during GC I think?
self._msgs.clear()
# else:
# print(f'{tot_ms} > {self._mt}, not clearing')
# XXX: why is this needed?
# don't we **want to show** nested profiler messages?
if self._msgs: # and self._depth < 1:
# if self._msgs:
print("\n".join([m[0] % m[1] for m in self._msgs]))
# clear all entries
self._msgs.clear()
# type(self)._msgs = []
type(self)._depth -= 1

View File

@ -1,174 +0,0 @@
provider "spec" (aka backends)
==============================
``piker`` abstracts and encapsulates real-time data feeds across a slew
of providers covering many (pretty much any) instrument class.
This doc is shoddy attempt as specifying what a backend must provide as
a basic api, per functionality-feature set, in order to be supported for
bny of real-time and historical data feeds and order control via the
``emsd`` clearing system.
"providers" must offer a top plevel namespace (normally exposed as
a python module) which offers to a certain set of (async) functions
to deliver info through a real-time, normalized data layer.
Generally speaking we break each ``piker.brokers.<backend_name>`` into
a python package containing 3 sub-modules:
- ``.api`` containing lowest level client code used to interact
specifically with the APIs of the exchange, broker or data provider.
- ``.feed`` which provides historical and real-time quote stream data
provider endpoints called by piker's data layer in
``piker.data.feed``.
- ``.broker`` which defines endpoints expected by
``pikerd.clearing._ems`` and which are expected to adhere to the msg
protocol defined in ``piker.clrearing._messages``.
Our current set of "production" grade backends includes:
- ``kraken``
- ``ib``
data feeds
----------
real-time quotes and tick streaming:
.. code:: python
async def stream_quotes(
send_chan: trio.abc.SendChannel,
symbols: List[str],
shm: ShmArray,
feed_is_live: trio.Event,
loglevel: str = None, # log level passed in from user config
# startup sync via ``trio``
task_status: TaskStatus[Tuple[Dict, Dict]] = trio.TASK_STATUS_IGNORED,
) -> None:
this routine must eventually deliver realt-time quote messages by sending them on
the passed in ``send_chan``; these messages must have specific format.
there is a very simple but required startup sequence:
message starup sequence:
************************
at a minimum, and asap, a first quote message should be returned for
each requested symbol in ``symbols``. the message should have a minimum
format:
.. code:: python
quote_msg: dict[str, Any] = {
'symbol': 'xbtusd', # or wtv symbol was requested
# this field is required in the initial first quote only (though
# is recommended in all follow up quotes) but can be
'last': <last clearing price>, # float
# tick stream fields (see below for schema/format)
'ticks': list[dict[str, Any]],
}
further streamed quote messages should be in this same format.
``ticks`` is an optional sequence
historical OHLCV sampling
-------------------------
Example endpoint copyed from the ``binance`` backend:
.. code:: python
@acm
async def open_history_client(
symbol: str,
) -> tuple[Callable, int]:
# TODO implement history getter for the new storage layer.
async with open_cached_client('binance') as client:
async def get_ohlc(
timeframe: float,
end_dt: datetime | None = None,
start_dt: datetime | None = None,
) -> tuple[
np.ndarray,
datetime, # start
datetime, # end
]:
if timeframe != 60:
raise DataUnavailable('Only 1m bars are supported')
array = await client.bars(
symbol,
start_dt=start_dt,
end_dt=end_dt,
)
times = array['time']
if (
end_dt is None
):
inow = round(time.time())
if (inow - times[-1]) > 60:
await tractor.breakpoint()
start_dt = pendulum.from_timestamp(times[0])
end_dt = pendulum.from_timestamp(times[-1])
return array, start_dt, end_dt
yield get_ohlc, {'erlangs': 3, 'rate': 3}
This `@acm` routine is responsible for setting up an async historical
data query routine for both charting and any local storage requirements.
The returned async func should retreive, normalize and deliver
a ``tuple[np.ndarray, pendulum.dateime, pendulum.dateime]`` of the the
``numpy``-ified data, the start and stop datetimes for the delivered
history "frame". The history backloading routines inside
``piker.data.feed`` expect this interface for both loading history into
``ShmArrayt`` real-time buffers as well as any configured
time-series-database (tsdb) and normally the format of this data is
OHLCV sampled price and volume data but in theory can be high
reslolution tick/trades/book times series in the future.
Currently sampling routines for charting and fsp processing expects
a max resolution of 1s (second) OHLCV sampled data.
OHLCV minmal schema
********************
ohlcv at a minimum is normally pushed to local shared memory (shm)
numpy compatible arrays which are read by both UI components for display
as well auto-strats and algorithmic trading engines. shm is obviously
used for speed. we also intend to eventually support pure shm tick
streams for ultra low latency processing by external processes/services.
the provider module at a minimum must define a ``numpy`` structured
array dtype ``ohlc_dtype = np.dtype(_ohlc_dtype)`` where the
``_ohlc_dtype`` is normally defined in standard list-tuple synatx as:
.. code:: python
# Broker specific ohlc schema which includes a vwap field
_ohlc_dtype = [
('index', int),
('time', int),
('open', float),
('high', float),
('low', float),
('close', float),
('volume', float),
('count', int),
('bar_wap', float),
]

View File

@ -20,41 +20,30 @@ Broker clients, daemons and general back end machinery.
from importlib import import_module from importlib import import_module
from types import ModuleType from types import ModuleType
# TODO: move to urllib3/requests once supported
import asks
asks.init('trio')
__brokers__ = [ __brokers__ = [
'binance', 'binance',
'questrade',
'robinhood',
'ib', 'ib',
'kraken', 'kraken',
# broken but used to work
# 'questrade',
# 'robinhood',
# TODO: we should get on these stat!
# alpaca
# wstrade
# iex
# deribit
# kucoin
# bitso
] ]
def get_brokermod(brokername: str) -> ModuleType: def get_brokermod(brokername: str) -> ModuleType:
''' """Return the imported broker module by name.
Return the imported broker module by name. """
'''
module = import_module('.' + brokername, 'piker.brokers') module = import_module('.' + brokername, 'piker.brokers')
# we only allow monkeying because it's for internal keying # we only allow monkeying because it's for internal keying
module.name = module.__name__.split('.')[-1] module.name = module.__name__.split('.')[-1]
return module return module
def iter_brokermods(): def iter_brokermods():
''' """Iterate all built-in broker modules.
Iterate all built-in broker modules. """
'''
for name in __brokers__: for name in __brokers__:
yield get_brokermod(name) yield get_brokermod(name)

View File

@ -33,23 +33,15 @@ import asks
from fuzzywuzzy import process as fuzzy from fuzzywuzzy import process as fuzzy
import numpy as np import numpy as np
import tractor import tractor
from pydantic.dataclasses import dataclass
from pydantic import BaseModel
import wsproto import wsproto
from .._cacheables import open_cached_client from .._cacheables import open_cached_client
from ._util import ( from ._util import resproc, SymbolNotFound
resproc, from ..log import get_logger, get_console_log
SymbolNotFound, from ..data import ShmArray
DataUnavailable, from ..data._web_bs import open_autorecon_ws, NoBsWs
)
from ..log import (
get_logger,
get_console_log,
)
from ..data.types import Struct
from ..data._web_bs import (
open_autorecon_ws,
NoBsWs,
)
log = get_logger(__name__) log = get_logger(__name__)
@ -87,14 +79,12 @@ _show_wap_in_history = False
# https://binance-docs.github.io/apidocs/spot/en/#exchange-information # https://binance-docs.github.io/apidocs/spot/en/#exchange-information
class Pair(Struct, frozen=True): class Pair(BaseModel):
symbol: str symbol: str
status: str status: str
baseAsset: str baseAsset: str
baseAssetPrecision: int baseAssetPrecision: int
cancelReplaceAllowed: bool
allowTrailingStop: bool
quoteAsset: str quoteAsset: str
quotePrecision: int quotePrecision: int
quoteAssetPrecision: int quoteAssetPrecision: int
@ -110,21 +100,18 @@ class Pair(Struct, frozen=True):
isSpotTradingAllowed: bool isSpotTradingAllowed: bool
isMarginTradingAllowed: bool isMarginTradingAllowed: bool
defaultSelfTradePreventionMode: str
allowedSelfTradePreventionModes: list[str]
filters: list[dict[str, Union[str, int, float]]] filters: list[dict[str, Union[str, int, float]]]
permissions: list[str] permissions: list[str]
class OHLC(Struct): @dataclass
''' class OHLC:
Description of the flattened OHLC quote format. """Description of the flattened OHLC quote format.
For schema details see: For schema details see:
https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-streams
''' """
time: int time: int
open: float open: float
@ -147,9 +134,7 @@ class OHLC(Struct):
# convert datetime obj timestamp to unixtime in milliseconds # convert datetime obj timestamp to unixtime in milliseconds
def binance_timestamp( def binance_timestamp(when):
when: datetime
) -> int:
return int((when.timestamp() * 1000) + (when.microsecond / 1000)) return int((when.timestamp() * 1000) + (when.microsecond / 1000))
@ -188,7 +173,7 @@ class Client:
params = {} params = {}
if sym is not None: if sym is not None:
sym = sym.lower() sym = sym.upper()
params = {'symbol': sym} params = {'symbol': sym}
resp = await self._api( resp = await self._api(
@ -245,7 +230,7 @@ class Client:
) -> dict: ) -> dict:
if end_dt is None: if end_dt is None:
end_dt = pendulum.now('UTC').add(minutes=1) end_dt = pendulum.now('UTC')
if start_dt is None: if start_dt is None:
start_dt = end_dt.start_of( start_dt = end_dt.start_of(
@ -275,7 +260,6 @@ class Client:
for i, bar in enumerate(bars): for i, bar in enumerate(bars):
bar = OHLC(*bar) bar = OHLC(*bar)
bar.typecast()
row = [] row = []
for j, (name, ftype) in enumerate(_ohlc_dtype[1:]): for j, (name, ftype) in enumerate(_ohlc_dtype[1:]):
@ -303,7 +287,7 @@ async def get_client() -> Client:
# validation type # validation type
class AggTrade(Struct): class AggTrade(BaseModel):
e: str # Event type e: str # Event type
E: int # Event time E: int # Event time
s: str # Symbol s: str # Symbol
@ -357,9 +341,7 @@ async def stream_messages(ws: NoBsWs) -> AsyncGenerator[NoBsWs, dict]:
elif msg.get('e') == 'aggTrade': elif msg.get('e') == 'aggTrade':
# NOTE: this is purely for a definition, ``msgspec.Struct`` # validate
# does not runtime-validate until you decode/encode.
# see: https://jcristharif.com/msgspec/structs.html#type-validation
msg = AggTrade(**msg) msg = AggTrade(**msg)
# TODO: type out and require this quote format # TODO: type out and require this quote format
@ -370,8 +352,8 @@ async def stream_messages(ws: NoBsWs) -> AsyncGenerator[NoBsWs, dict]:
'brokerd_ts': time.time(), 'brokerd_ts': time.time(),
'ticks': [{ 'ticks': [{
'type': 'trade', 'type': 'trade',
'price': float(msg.p), 'price': msg.p,
'size': float(msg.q), 'size': msg.q,
'broker_ts': msg.T, 'broker_ts': msg.T,
}], }],
} }
@ -402,39 +384,41 @@ async def open_history_client(
async with open_cached_client('binance') as client: async with open_cached_client('binance') as client:
async def get_ohlc( async def get_ohlc(
timeframe: float, end_dt: Optional[datetime] = None,
end_dt: datetime | None = None, start_dt: Optional[datetime] = None,
start_dt: datetime | None = None,
) -> tuple[ ) -> tuple[
np.ndarray, np.ndarray,
datetime, # start datetime, # start
datetime, # end datetime, # end
]: ]:
if timeframe != 60:
raise DataUnavailable('Only 1m bars are supported')
array = await client.bars( array = await client.bars(
symbol, symbol,
start_dt=start_dt, start_dt=start_dt,
end_dt=end_dt, end_dt=end_dt,
) )
times = array['time'] start_dt = pendulum.from_timestamp(array[0]['time'])
if ( end_dt = pendulum.from_timestamp(array[-1]['time'])
end_dt is None
):
inow = round(time.time())
if (inow - times[-1]) > 60:
await tractor.breakpoint()
start_dt = pendulum.from_timestamp(times[0])
end_dt = pendulum.from_timestamp(times[-1])
return array, start_dt, end_dt return array, start_dt, end_dt
yield get_ohlc, {'erlangs': 3, 'rate': 3} yield get_ohlc, {'erlangs': 3, 'rate': 3}
async def backfill_bars(
sym: str,
shm: ShmArray, # type: ignore # noqa
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
) -> None:
"""Fill historical bars into shared mem / storage afap.
"""
with trio.CancelScope() as cs:
async with open_cached_client('binance') as client:
bars = await client.bars(symbol=sym)
shm.push(bars)
task_status.started(cs)
async def stream_quotes( async def stream_quotes(
send_chan: trio.abc.SendChannel, send_chan: trio.abc.SendChannel,
@ -464,20 +448,12 @@ async def stream_quotes(
d = cache[sym.upper()] d = cache[sym.upper()]
syminfo = Pair(**d) # validation syminfo = Pair(**d) # validation
si = sym_infos[sym] = syminfo.to_dict() si = sym_infos[sym] = syminfo.dict()
filters = {}
for entry in syminfo.filters:
ftype = entry['filterType']
filters[ftype] = entry
# XXX: after manually inspecting the response format we # XXX: after manually inspecting the response format we
# just directly pick out the info we need # just directly pick out the info we need
si['price_tick_size'] = float( si['price_tick_size'] = float(syminfo.filters[0]['tickSize'])
filters['PRICE_FILTER']['tickSize'] si['lot_tick_size'] = float(syminfo.filters[2]['stepSize'])
)
si['lot_tick_size'] = float(
filters['LOT_SIZE']['stepSize']
)
si['asset_type'] = 'crypto' si['asset_type'] = 'crypto'
symbol = symbols[0] symbol = symbols[0]
@ -519,15 +495,14 @@ async def stream_quotes(
subs.append("{sym}@bookTicker") subs.append("{sym}@bookTicker")
# unsub from all pairs on teardown # unsub from all pairs on teardown
if ws.connected(): await ws.send_msg({
await ws.send_msg({ "method": "UNSUBSCRIBE",
"method": "UNSUBSCRIBE", "params": subs,
"params": subs, "id": uid,
"id": uid, })
})
# XXX: do we need to ack the unsub? # XXX: do we need to ack the unsub?
# await ws.recv_msg() # await ws.recv_msg()
async with open_autorecon_ws( async with open_autorecon_ws(
'wss://stream.binance.com/ws', 'wss://stream.binance.com/ws',

View File

@ -39,148 +39,6 @@ _config_dir = click.get_app_dir('piker')
_watchlists_data_path = os.path.join(_config_dir, 'watchlists.json') _watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
OK = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def print_ok(s: str, **kwargs):
print(OK + s + ENDC, **kwargs)
def print_error(s: str, **kwargs):
print(FAIL + s + ENDC, **kwargs)
def get_method(client, meth_name: str):
print(f'checking client for method \'{meth_name}\'...', end='', flush=True)
method = getattr(client, meth_name, None)
assert method
print_ok('found!.')
return method
async def run_method(client, meth_name: str, **kwargs):
method = get_method(client, meth_name)
print('running...', end='', flush=True)
result = await method(**kwargs)
print_ok(f'done! result: {type(result)}')
return result
async def run_test(broker_name: str):
brokermod = get_brokermod(broker_name)
total = 0
passed = 0
failed = 0
print(f'getting client...', end='', flush=True)
if not hasattr(brokermod, 'get_client'):
print_error('fail! no \'get_client\' context manager found.')
return
async with brokermod.get_client(is_brokercheck=True) as client:
print_ok(f'done! inside client context.')
# check for methods present on brokermod
method_list = [
'backfill_bars',
'get_client',
'trades_dialogue',
'open_history_client',
'open_symbol_search',
'stream_quotes',
]
for method in method_list:
print(
f'checking brokermod for method \'{method}\'...',
end='', flush=True)
if not hasattr(brokermod, method):
print_error(f'fail! method \'{method}\' not found.')
failed += 1
else:
print_ok('done!')
passed += 1
total += 1
# check for methods present con brokermod.Client and their
# results
# for private methods only check is present
method_list = [
'get_balances',
'get_assets',
'get_trades',
'get_xfers',
'submit_limit',
'submit_cancel',
'search_symbols',
]
for method_name in method_list:
try:
get_method(client, method_name)
passed += 1
except AssertionError:
print_error(f'fail! method \'{method_name}\' not found.')
failed += 1
total += 1
# check for methods present con brokermod.Client and their
# results
syms = await run_method(client, 'symbol_info')
total += 1
if len(syms) == 0:
raise BaseException('Empty Symbol list?')
passed += 1
first_sym = tuple(syms.keys())[0]
method_list = [
('cache_symbols', {}),
('search_symbols', {'pattern': first_sym[:-1]}),
('bars', {'symbol': first_sym})
]
for method_name, method_kwargs in method_list:
try:
await run_method(client, method_name, **method_kwargs)
passed += 1
except AssertionError:
print_error(f'fail! method \'{method_name}\' not found.')
failed += 1
total += 1
print(f'total: {total}, passed: {passed}, failed: {failed}')
@cli.command()
@click.argument('broker', nargs=1, required=True)
@click.pass_obj
def brokercheck(config, broker):
'''
Test broker apis for completeness.
'''
async def bcheck_main():
async with maybe_spawn_brokerd(broker) as portal:
await portal.run(run_test, broker)
await portal.cancel_actor()
trio.run(run_test, broker)
@cli.command() @cli.command()
@click.option('--keys', '-k', multiple=True, @click.option('--keys', '-k', multiple=True,
help='Return results only for these keys') help='Return results only for these keys')
@ -335,8 +193,6 @@ def contracts(ctx, loglevel, broker, symbol, ids):
brokermod = get_brokermod(broker) brokermod = get_brokermod(broker)
get_console_log(loglevel) get_console_log(loglevel)
contracts = trio.run(partial(core.contracts, brokermod, symbol)) contracts = trio.run(partial(core.contracts, brokermod, symbol))
if not ids: if not ids:
# just print out expiry dates which can be used with # just print out expiry dates which can be used with

View File

@ -227,28 +227,26 @@ async def get_cached_feed(
@tractor.stream @tractor.stream
async def start_quote_stream( async def start_quote_stream(
stream: tractor.Context, # marks this as a streaming func ctx: tractor.Context, # marks this as a streaming func
broker: str, broker: str,
symbols: List[Any], symbols: List[Any],
feed_type: str = 'stock', feed_type: str = 'stock',
rate: int = 3, rate: int = 3,
) -> None: ) -> None:
''' """Handle per-broker quote stream subscriptions using a "lazy" pub-sub
Handle per-broker quote stream subscriptions using a "lazy" pub-sub
pattern. pattern.
Spawns new quoter tasks for each broker backend on-demand. Spawns new quoter tasks for each broker backend on-demand.
Since most brokers seems to support batch quote requests we Since most brokers seems to support batch quote requests we
limit to one task per process (for now). limit to one task per process (for now).
"""
'''
# XXX: why do we need this again? # XXX: why do we need this again?
get_console_log(tractor.current_actor().loglevel) get_console_log(tractor.current_actor().loglevel)
# pull global vars from local actor # pull global vars from local actor
symbols = list(symbols) symbols = list(symbols)
log.info( log.info(
f"{stream.chan.uid} subscribed to {broker} for symbols {symbols}") f"{ctx.chan.uid} subscribed to {broker} for symbols {symbols}")
# another actor task may have already created it # another actor task may have already created it
async with get_cached_feed(broker) as feed: async with get_cached_feed(broker) as feed:
@ -292,13 +290,13 @@ async def start_quote_stream(
assert fquote['displayable'] assert fquote['displayable']
payload[sym] = fquote payload[sym] = fquote
await stream.send_yield(payload) await ctx.send_yield(payload)
await stream_poll_requests( await stream_poll_requests(
# ``trionics.msgpub`` required kwargs # ``trionics.msgpub`` required kwargs
task_name=feed_type, task_name=feed_type,
ctx=stream, ctx=ctx,
topics=symbols, topics=symbols,
packetizer=feed.mod.packetizer, packetizer=feed.mod.packetizer,
@ -321,11 +319,9 @@ async def call_client(
class DataFeed: class DataFeed:
''' """Data feed client for streaming symbol data from and making API client calls
Data feed client for streaming symbol data from and making API to a (remote) ``brokerd`` daemon.
client calls to a (remote) ``brokerd`` daemon. """
'''
_allowed = ('stock', 'option') _allowed = ('stock', 'option')
def __init__(self, portal, brokermod): def __init__(self, portal, brokermod):

View File

@ -1,70 +0,0 @@
``deribit`` backend
------------------
pretty good liquidity crypto derivatives, uses custom json rpc over ws for
client methods, then `cryptofeed` for data streams.
status
******
- supports option charts
- no order support yet
config
******
In order to get order mode support your ``brokers.toml``
needs to have something like the following:
.. code:: toml
[deribit]
key_id = 'XXXXXXXX'
key_secret = 'Xx_XxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXxXx'
To obtain an api id and secret you need to create an account, which can be a
real market account over at:
- deribit.com (requires KYC for deposit address)
Or a testnet account over at:
- test.deribit.com
For testnet once the account is created here is how you deposit fake crypto to
try it out:
1) Go to Wallet:
.. figure:: assets/0_wallet.png
:align: center
:target: assets/0_wallet.png
:alt: wallet page
2) Then click on the elipsis menu and select deposit
.. figure:: assets/1_wallet_select_deposit.png
:align: center
:target: assets/1_wallet_select_deposit.png
:alt: wallet deposit page
3) This will take you to the deposit address page
.. figure:: assets/2_gen_deposit_addr.png
:align: center
:target: assets/2_gen_deposit_addr.png
:alt: generate deposit address page
4) After clicking generate you should see the address, copy it and go to the
`coin faucet <https://test.deribit.com/dericoin/BTC/deposit>`_ and send fake
coins to that address.
.. figure:: assets/3_deposit_address.png
:align: center
:target: assets/3_deposit_address.png
:alt: generated address
5) Back in the deposit address page you should see the deposit in your history
.. figure:: assets/4_wallet_deposit_history.png
:align: center
:target: assets/4_wallet_deposit_history.png
:alt: wallet deposit history

View File

@ -1,65 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Deribit backend.
'''
from piker.log import get_logger
log = get_logger(__name__)
from .api import (
get_client,
)
from .feed import (
open_history_client,
open_symbol_search,
stream_quotes,
backfill_bars
)
# from .broker import (
# trades_dialogue,
# norm_trade_records,
# )
__all__ = [
'get_client',
# 'trades_dialogue',
'open_history_client',
'open_symbol_search',
'stream_quotes',
# 'norm_trade_records',
]
# tractor RPC enable arg
__enable_modules__: list[str] = [
'api',
'feed',
# 'broker',
]
# passed to ``tractor.ActorNursery.start_actor()``
_spawn_kwargs = {
'infect_asyncio': True,
}
# annotation to let backend agnostic code
# know if ``brokerd`` should be spawned with
# ``tractor``'s aio mode.
_infect_asyncio: bool = True

View File

@ -1,672 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Deribit backend.
'''
import json
import time
import asyncio
from contextlib import asynccontextmanager as acm, AsyncExitStack
from functools import partial
from datetime import datetime
from typing import Any, Optional, Iterable, Callable
import pendulum
import asks
import trio
from trio_typing import Nursery, TaskStatus
from fuzzywuzzy import process as fuzzy
import numpy as np
from piker.data.types import Struct
from piker.data._web_bs import (
NoBsWs,
open_autorecon_ws,
open_jsonrpc_session
)
from .._util import resproc
from piker import config
from piker.log import get_logger
from tractor.trionics import (
broadcast_receiver,
BroadcastReceiver,
maybe_open_context
)
from tractor import to_asyncio
from cryptofeed import FeedHandler
from cryptofeed.defines import (
DERIBIT,
L1_BOOK, TRADES,
OPTION, CALL, PUT
)
from cryptofeed.symbols import Symbol
log = get_logger(__name__)
_spawn_kwargs = {
'infect_asyncio': True,
}
_url = 'https://www.deribit.com'
_ws_url = 'wss://www.deribit.com/ws/api/v2'
_testnet_ws_url = 'wss://test.deribit.com/ws/api/v2'
# Broker specific ohlc schema (rest)
_ohlc_dtype = [
('index', int),
('time', int),
('open', float),
('high', float),
('low', float),
('close', float),
('volume', float),
('bar_wap', float), # will be zeroed by sampler if not filled
]
class JSONRPCResult(Struct):
jsonrpc: str = '2.0'
id: int
result: Optional[dict] = None
error: Optional[dict] = None
usIn: int
usOut: int
usDiff: int
testnet: bool
class JSONRPCChannel(Struct):
jsonrpc: str = '2.0'
method: str
params: dict
class KLinesResult(Struct):
close: list[float]
cost: list[float]
high: list[float]
low: list[float]
open: list[float]
status: str
ticks: list[int]
volume: list[float]
class Trade(Struct):
trade_seq: int
trade_id: str
timestamp: int
tick_direction: int
price: float
mark_price: float
iv: float
instrument_name: str
index_price: float
direction: str
combo_trade_id: Optional[int] = 0,
combo_id: Optional[str] = '',
amount: float
class LastTradesResult(Struct):
trades: list[Trade]
has_more: bool
# convert datetime obj timestamp to unixtime in milliseconds
def deribit_timestamp(when):
return int((when.timestamp() * 1000) + (when.microsecond / 1000))
def str_to_cb_sym(name: str) -> Symbol:
base, strike_price, expiry_date, option_type = name.split('-')
quote = base
if option_type == 'put':
option_type = PUT
elif option_type == 'call':
option_type = CALL
else:
raise Exception("Couldn\'t parse option type")
return Symbol(
base, quote,
type=OPTION,
strike_price=strike_price,
option_type=option_type,
expiry_date=expiry_date,
expiry_normalize=False)
def piker_sym_to_cb_sym(name: str) -> Symbol:
base, expiry_date, strike_price, option_type = tuple(
name.upper().split('-'))
quote = base
if option_type == 'P':
option_type = PUT
elif option_type == 'C':
option_type = CALL
else:
raise Exception("Couldn\'t parse option type")
return Symbol(
base, quote,
type=OPTION,
strike_price=strike_price,
option_type=option_type,
expiry_date=expiry_date.upper())
def cb_sym_to_deribit_inst(sym: Symbol):
# cryptofeed normalized
cb_norm = ['F', 'G', 'H', 'J', 'K', 'M', 'N', 'Q', 'U', 'V', 'X', 'Z']
# deribit specific
months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
exp = sym.expiry_date
# YYMDD
# 01234
year, month, day = (
exp[:2], months[cb_norm.index(exp[2:3])], exp[3:])
otype = 'C' if sym.option_type == CALL else 'P'
return f'{sym.base}-{day}{month}{year}-{sym.strike_price}-{otype}'
def get_config() -> dict[str, Any]:
conf, path = config.load()
section = conf.get('deribit')
# TODO: document why we send this, basically because logging params for cryptofeed
conf['log'] = {}
conf['log']['disabled'] = True
if section is None:
log.warning(f'No config section found for deribit in {path}')
return conf
class Client:
def __init__(self, json_rpc: Callable) -> None:
self._pairs: dict[str, Any] = None
config = get_config().get('deribit', {})
if ('key_id' in config) and ('key_secret' in config):
self._key_id = config['key_id']
self._key_secret = config['key_secret']
else:
self._key_id = None
self._key_secret = None
self.json_rpc = json_rpc
@property
def currencies(self):
return ['btc', 'eth', 'sol', 'usd']
async def get_balances(self, kind: str = 'option') -> dict[str, float]:
"""Return the set of positions for this account
by symbol.
"""
balances = {}
for currency in self.currencies:
resp = await self.json_rpc(
'private/get_positions', params={
'currency': currency.upper(),
'kind': kind})
balances[currency] = resp.result
return balances
async def get_assets(self) -> dict[str, float]:
"""Return the set of asset balances for this account
by symbol.
"""
balances = {}
for currency in self.currencies:
resp = await self.json_rpc(
'private/get_account_summary', params={
'currency': currency.upper()})
balances[currency] = resp.result['balance']
return balances
async def submit_limit(
self,
symbol: str,
price: float,
action: str,
size: float
) -> dict:
"""Place an order
"""
params = {
'instrument_name': symbol.upper(),
'amount': size,
'type': 'limit',
'price': price,
}
resp = await self.json_rpc(
f'private/{action}', params)
return resp.result
async def submit_cancel(self, oid: str):
"""Send cancel request for order id
"""
resp = await self.json_rpc(
'private/cancel', {'order_id': oid})
return resp.result
async def symbol_info(
self,
instrument: Optional[str] = None,
currency: str = 'btc', # BTC, ETH, SOL, USDC
kind: str = 'option',
expired: bool = False
) -> dict[str, Any]:
"""Get symbol info for the exchange.
"""
if self._pairs:
return self._pairs
# will retrieve all symbols by default
params = {
'currency': currency.upper(),
'kind': kind,
'expired': str(expired).lower()
}
resp = await self.json_rpc('public/get_instruments', params)
results = resp.result
instruments = {
item['instrument_name'].lower(): item
for item in results
}
if instrument is not None:
return instruments[instrument]
else:
return instruments
async def cache_symbols(
self,
) -> dict:
if not self._pairs:
self._pairs = await self.symbol_info()
return self._pairs
async def search_symbols(
self,
pattern: str,
limit: int = 30,
) -> dict[str, Any]:
data = await self.symbol_info()
matches = fuzzy.extractBests(
pattern,
data,
score_cutoff=35,
limit=limit
)
# repack in dict form
return {item[0]['instrument_name'].lower(): item[0]
for item in matches}
async def bars(
self,
symbol: str,
start_dt: Optional[datetime] = None,
end_dt: Optional[datetime] = None,
limit: int = 1000,
as_np: bool = True,
) -> dict:
instrument = symbol
if end_dt is None:
end_dt = pendulum.now('UTC')
if start_dt is None:
start_dt = end_dt.start_of(
'minute').subtract(minutes=limit)
start_time = deribit_timestamp(start_dt)
end_time = deribit_timestamp(end_dt)
# https://docs.deribit.com/#public-get_tradingview_chart_data
resp = await self.json_rpc(
'public/get_tradingview_chart_data',
params={
'instrument_name': instrument.upper(),
'start_timestamp': start_time,
'end_timestamp': end_time,
'resolution': '1'
})
result = KLinesResult(**resp.result)
new_bars = []
for i in range(len(result.close)):
_open = result.open[i]
high = result.high[i]
low = result.low[i]
close = result.close[i]
volume = result.volume[i]
row = [
(start_time + (i * (60 * 1000))) / 1000.0, # time
result.open[i],
result.high[i],
result.low[i],
result.close[i],
result.volume[i],
0
]
new_bars.append((i,) + tuple(row))
array = np.array(new_bars, dtype=_ohlc_dtype) if as_np else klines
return array
async def last_trades(
self,
instrument: str,
count: int = 10
):
resp = await self.json_rpc(
'public/get_last_trades_by_instrument',
params={
'instrument_name': instrument,
'count': count
})
return LastTradesResult(**resp.result)
@acm
async def get_client(
is_brokercheck: bool = False
) -> Client:
async with (
trio.open_nursery() as n,
open_jsonrpc_session(
_testnet_ws_url, dtype=JSONRPCResult) as json_rpc
):
client = Client(json_rpc)
_refresh_token: Optional[str] = None
_access_token: Optional[str] = None
async def _auth_loop(
task_status: TaskStatus = trio.TASK_STATUS_IGNORED
):
"""Background task that adquires a first access token and then will
refresh the access token while the nursery isn't cancelled.
https://docs.deribit.com/?python#authentication-2
"""
renew_time = 10
access_scope = 'trade:read_write'
_expiry_time = time.time()
got_access = False
nonlocal _refresh_token
nonlocal _access_token
while True:
if time.time() - _expiry_time < renew_time:
# if we are close to token expiry time
if _refresh_token != None:
# if we have a refresh token already dont need to send
# secret
params = {
'grant_type': 'refresh_token',
'refresh_token': _refresh_token,
'scope': access_scope
}
else:
# we don't have refresh token, send secret to initialize
params = {
'grant_type': 'client_credentials',
'client_id': client._key_id,
'client_secret': client._key_secret,
'scope': access_scope
}
resp = await json_rpc('public/auth', params)
result = resp.result
_expiry_time = time.time() + result['expires_in']
_refresh_token = result['refresh_token']
if 'access_token' in result:
_access_token = result['access_token']
if not got_access:
# first time this loop runs we must indicate task is
# started, we have auth
got_access = True
task_status.started()
else:
await trio.sleep(renew_time / 2)
# if we have client creds launch auth loop
if client._key_id is not None:
await n.start(_auth_loop)
await client.cache_symbols()
yield client
n.cancel_scope.cancel()
@acm
async def open_feed_handler():
fh = FeedHandler(config=get_config())
yield fh
await to_asyncio.run_task(fh.stop_async)
@acm
async def maybe_open_feed_handler() -> trio.abc.ReceiveStream:
async with maybe_open_context(
acm_func=open_feed_handler,
key='feedhandler',
) as (cache_hit, fh):
yield fh
async def aio_price_feed_relay(
fh: FeedHandler,
instrument: Symbol,
from_trio: asyncio.Queue,
to_trio: trio.abc.SendChannel,
) -> None:
async def _trade(data: dict, receipt_timestamp):
to_trio.send_nowait(('trade', {
'symbol': cb_sym_to_deribit_inst(
str_to_cb_sym(data.symbol)).lower(),
'last': data,
'broker_ts': time.time(),
'data': data.to_dict(),
'receipt': receipt_timestamp
}))
async def _l1(data: dict, receipt_timestamp):
to_trio.send_nowait(('l1', {
'symbol': cb_sym_to_deribit_inst(
str_to_cb_sym(data.symbol)).lower(),
'ticks': [
{'type': 'bid',
'price': float(data.bid_price), 'size': float(data.bid_size)},
{'type': 'bsize',
'price': float(data.bid_price), 'size': float(data.bid_size)},
{'type': 'ask',
'price': float(data.ask_price), 'size': float(data.ask_size)},
{'type': 'asize',
'price': float(data.ask_price), 'size': float(data.ask_size)}
]
}))
fh.add_feed(
DERIBIT,
channels=[TRADES, L1_BOOK],
symbols=[piker_sym_to_cb_sym(instrument)],
callbacks={
TRADES: _trade,
L1_BOOK: _l1
})
if not fh.running:
fh.run(
start_loop=False,
install_signal_handlers=False)
# sync with trio
to_trio.send_nowait(None)
await asyncio.sleep(float('inf'))
@acm
async def open_price_feed(
instrument: str
) -> trio.abc.ReceiveStream:
async with maybe_open_feed_handler() as fh:
async with to_asyncio.open_channel_from(
partial(
aio_price_feed_relay,
fh,
instrument
)
) as (first, chan):
yield chan
@acm
async def maybe_open_price_feed(
instrument: str
) -> trio.abc.ReceiveStream:
# TODO: add a predicate to maybe_open_context
async with maybe_open_context(
acm_func=open_price_feed,
kwargs={
'instrument': instrument
},
key=f'{instrument}-price',
) as (cache_hit, feed):
if cache_hit:
yield broadcast_receiver(feed, 10)
else:
yield feed
async def aio_order_feed_relay(
fh: FeedHandler,
instrument: Symbol,
from_trio: asyncio.Queue,
to_trio: trio.abc.SendChannel,
) -> None:
async def _fill(data: dict, receipt_timestamp):
breakpoint()
async def _order_info(data: dict, receipt_timestamp):
breakpoint()
fh.add_feed(
DERIBIT,
channels=[FILLS, ORDER_INFO],
symbols=[instrument.upper()],
callbacks={
FILLS: _fill,
ORDER_INFO: _order_info,
})
if not fh.running:
fh.run(
start_loop=False,
install_signal_handlers=False)
# sync with trio
to_trio.send_nowait(None)
await asyncio.sleep(float('inf'))
@acm
async def open_order_feed(
instrument: list[str]
) -> trio.abc.ReceiveStream:
async with maybe_open_feed_handler() as fh:
async with to_asyncio.open_channel_from(
partial(
aio_order_feed_relay,
fh,
instrument
)
) as (first, chan):
yield chan
@acm
async def maybe_open_order_feed(
instrument: str
) -> trio.abc.ReceiveStream:
# TODO: add a predicate to maybe_open_context
async with maybe_open_context(
acm_func=open_order_feed,
kwargs={
'instrument': instrument,
'fh': fh
},
key=f'{instrument}-order',
) as (cache_hit, feed):
if cache_hit:
yield broadcast_receiver(feed, 10)
else:
yield feed

Binary file not shown.

Before

Width:  |  Height:  |  Size: 169 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 59 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 132 KiB

View File

@ -1,185 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Deribit backend.
'''
from contextlib import asynccontextmanager as acm
from datetime import datetime
from typing import Any, Optional, Callable
import time
import trio
from trio_typing import TaskStatus
import pendulum
from fuzzywuzzy import process as fuzzy
import numpy as np
import tractor
from piker._cacheables import open_cached_client
from piker.log import get_logger, get_console_log
from piker.data import ShmArray
from piker.brokers._util import (
BrokerError,
DataUnavailable,
)
from cryptofeed import FeedHandler
from cryptofeed.defines import (
DERIBIT, L1_BOOK, TRADES, OPTION, CALL, PUT
)
from cryptofeed.symbols import Symbol
from .api import (
Client, Trade,
get_config,
str_to_cb_sym, piker_sym_to_cb_sym, cb_sym_to_deribit_inst,
maybe_open_price_feed
)
_spawn_kwargs = {
'infect_asyncio': True,
}
log = get_logger(__name__)
@acm
async def open_history_client(
instrument: str,
) -> tuple[Callable, int]:
# TODO implement history getter for the new storage layer.
async with open_cached_client('deribit') as client:
async def get_ohlc(
end_dt: Optional[datetime] = None,
start_dt: Optional[datetime] = None,
) -> tuple[
np.ndarray,
datetime, # start
datetime, # end
]:
array = await client.bars(
instrument,
start_dt=start_dt,
end_dt=end_dt,
)
if len(array) == 0:
raise DataUnavailable
start_dt = pendulum.from_timestamp(array[0]['time'])
end_dt = pendulum.from_timestamp(array[-1]['time'])
return array, start_dt, end_dt
yield get_ohlc, {'erlangs': 3, 'rate': 3}
async def stream_quotes(
send_chan: trio.abc.SendChannel,
symbols: list[str],
feed_is_live: trio.Event,
loglevel: str = None,
# startup sync
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
) -> None:
# XXX: required to propagate ``tractor`` loglevel to piker logging
get_console_log(loglevel or tractor.current_actor().loglevel)
sym = symbols[0]
async with (
open_cached_client('deribit') as client,
send_chan as send_chan
):
init_msgs = {
# pass back token, and bool, signalling if we're the writer
# and that history has been written
sym: {
'symbol_info': {
'asset_type': 'option',
'price_tick_size': 0.0005
},
'shm_write_opts': {'sum_tick_vml': False},
'fqsn': sym,
},
}
nsym = piker_sym_to_cb_sym(sym)
async with maybe_open_price_feed(sym) as stream:
cache = await client.cache_symbols()
last_trades = (await client.last_trades(
cb_sym_to_deribit_inst(nsym), count=1)).trades
if len(last_trades) == 0:
last_trade = None
async for typ, quote in stream:
if typ == 'trade':
last_trade = Trade(**(quote['data']))
break
else:
last_trade = Trade(**(last_trades[0]))
first_quote = {
'symbol': sym,
'last': last_trade.price,
'brokerd_ts': last_trade.timestamp,
'ticks': [{
'type': 'trade',
'price': last_trade.price,
'size': last_trade.amount,
'broker_ts': last_trade.timestamp
}]
}
task_status.started((init_msgs, first_quote))
feed_is_live.set()
async for typ, quote in stream:
topic = quote['symbol']
await send_chan.send({topic: quote})
@tractor.context
async def open_symbol_search(
ctx: tractor.Context,
) -> Client:
async with open_cached_client('deribit') as client:
# load all symbols locally for fast search
cache = await client.cache_symbols()
await ctx.started()
async with ctx.open_stream() as stream:
async for pattern in stream:
# repack in dict form
await stream.send(
await client.search_symbols(pattern))

View File

@ -1,134 +0,0 @@
``ib`` backend
--------------
more or less the "everything broker" for traditional and international
markets. they are the "go to" provider for automatic retail trading
and we interface to their APIs using the `ib_insync` project.
status
******
current support is *production grade* and both real-time data and order
management should be correct and fast. this backend is used by core devs
for live trading.
currently there is not yet full support for:
- options charting and trading
- paxos based crypto rt feeds and trading
config
******
In order to get order mode support your ``brokers.toml``
needs to have something like the following:
.. code:: toml
[ib]
hosts = [
"127.0.0.1",
]
# TODO: when we eventually spawn gateways in our
# container, we can just dynamically allocate these
# using IBC.
ports = [
4002,
4003,
4006,
4001,
7497,
]
# XXX: for a paper account the flex web query service
# is not supported so you have to manually download
# and XML report and put it in a location that can be
# accessed by the ``brokerd.ib`` backend code for parsing.
flex_token = '1111111111111111'
flex_trades_query_id = '6969696' # live accounts only?
# 3rd party web-api token
# (XXX: not sure if this works yet)
trade_log_token = '111111111111111'
# when clients are being scanned this determines
# which clients are preferred to be used for data feeds
# based on account names which are detected as active
# on each client.
prefer_data_account = [
# this has to be first in order to make data work with dual paper + live
'main',
'algopaper',
]
[ib.accounts]
main = 'U69696969'
algopaper = 'DU9696969'
If everything works correctly you should see any current positions
loaded in the pps pane on chart load and you should also be able to
check your trade records in the file::
<pikerk_conf_dir>/ledgers/trades_ib_algopaper.toml
An example ledger file will have entries written verbatim from the
trade events schema:
.. code:: toml
["0000e1a7.630f5e5a.01.01"]
secType = "FUT"
conId = 515416577
symbol = "MNQ"
lastTradeDateOrContractMonth = "20221216"
strike = 0.0
right = ""
multiplier = "2"
exchange = "GLOBEX"
primaryExchange = ""
currency = "USD"
localSymbol = "MNQZ2"
tradingClass = "MNQ"
includeExpired = false
secIdType = ""
secId = ""
comboLegsDescrip = ""
comboLegs = []
execId = "0000e1a7.630f5e5a.01.01"
time = 1661972086.0
acctNumber = "DU69696969"
side = "BOT"
shares = 1.0
price = 12372.75
permId = 441472655
clientId = 6116
orderId = 985
liquidation = 0
cumQty = 1.0
avgPrice = 12372.75
orderRef = ""
evRule = ""
evMultiplier = 0.0
modelCode = ""
lastLiquidity = 1
broker_time = 1661972086.0
name = "ib"
commission = 0.57
realizedPNL = 243.41
yield_ = 0.0
yieldRedemptionDate = 0
listingExchange = "GLOBEX"
date = "2022-08-31T18:54:46+00:00"
your ``pps.toml`` file will have position entries like,
.. code:: toml
[ib.algopaper."mnq.globex.20221216"]
size = -1.0
ppu = 12423.630576923071
bsuid = 515416577
expiry = "2022-12-16T00:00:00+00:00"
clears = [
{ dt = "2022-08-31T18:54:46+00:00", ppu = 12423.630576923071, accum_size = -19.0, price = 12372.75, size = 1.0, cost = 0.57, tid = "0000e1a7.630f5e5a.01.01" },
]

View File

@ -20,10 +20,15 @@ Interactive Brokers API backend.
Sub-modules within break into the core functionalities: Sub-modules within break into the core functionalities:
- ``broker.py`` part for orders / trading endpoints - ``broker.py`` part for orders / trading endpoints
- ``feed.py`` for real-time data feed endpoints - ``data.py`` for real-time data feed endpoints
- ``api.py`` for the core API machinery which is ``trio``-ized
- ``client.py`` for the core API machinery which is ``trio``-ized
wrapping around ``ib_insync``. wrapping around ``ib_insync``.
- ``report.py`` for the hackery to build manual pp calcs
to avoid ib's absolute bullshit FIFO style position
tracking..
""" """
from .api import ( from .api import (
get_client, get_client,
@ -33,10 +38,7 @@ from .feed import (
open_symbol_search, open_symbol_search,
stream_quotes, stream_quotes,
) )
from .broker import ( from .broker import trades_dialogue
trades_dialogue,
norm_trade_records,
)
__all__ = [ __all__ = [
'get_client', 'get_client',

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,64 +0,0 @@
``kraken`` backend
------------------
though they don't have the most liquidity of all the cexes they sure are
accommodating to those of us who appreciate a little ``xmr``.
status
******
current support is *production grade* and both real-time data and order
management should be correct and fast. this backend is used by core devs
for live trading.
config
******
In order to get order mode support your ``brokers.toml``
needs to have something like the following:
.. code:: toml
[kraken]
accounts.spot = 'spot'
key_descr = "spot"
api_key = "69696969696969696696969696969696969696969696969696969696"
secret = "BOOBSBOOBSBOOBSBOOBSBOOBSSMBZ69696969696969669969696969696"
If everything works correctly you should see any current positions
loaded in the pps pane on chart load and you should also be able to
check your trade records in the file::
<pikerk_conf_dir>/ledgers/trades_kraken_spot.toml
An example ledger file will have entries written verbatim from the
trade events schema:
.. code:: toml
[TFJBKK-SMBZS-VJ4UWS]
ordertxid = "SMBZSA-7CNQU-3HWLNJ"
postxid = "SMBZSE-M7IF5-CFI7LT"
pair = "XXMRZEUR"
time = 1655691993.4133966
type = "buy"
ordertype = "limit"
price = "103.97000000"
cost = "499.99999977"
fee = "0.80000000"
vol = "4.80907954"
margin = "0.00000000"
misc = ""
your ``pps.toml`` file will have position entries like,
.. code:: toml
[kraken.spot."xmreur.kraken"]
size = 4.80907954
ppu = 103.97000000
bsuid = "XXMRZEUR"
clears = [
{ tid = "TFJBKK-SMBZS-VJ4UWS", cost = 0.8, price = 103.97, size = 4.80907954, dt = "2022-05-20T02:26:33.413397+00:00" },
]

View File

@ -1,61 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Kraken backend.
Sub-modules within break into the core functionalities:
- ``broker.py`` part for orders / trading endpoints
- ``feed.py`` for real-time data feed endpoints
- ``api.py`` for the core API machinery which is ``trio``-ized
wrapping around ``ib_insync``.
'''
from piker.log import get_logger
log = get_logger(__name__)
from .api import (
get_client,
)
from .feed import (
open_history_client,
open_symbol_search,
stream_quotes,
)
from .broker import (
trades_dialogue,
norm_trade_records,
)
__all__ = [
'get_client',
'trades_dialogue',
'open_history_client',
'open_symbol_search',
'stream_quotes',
'norm_trade_records',
]
# tractor RPC enable arg
__enable_modules__: list[str] = [
'api',
'feed',
'broker',
]

View File

@ -1,536 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Kraken web API wrapping.
'''
from contextlib import asynccontextmanager as acm
from datetime import datetime
import itertools
from typing import (
Any,
Optional,
Union,
)
import time
from bidict import bidict
import pendulum
import asks
from fuzzywuzzy import process as fuzzy
import numpy as np
import urllib.parse
import hashlib
import hmac
import base64
import trio
from piker import config
from piker.brokers._util import (
resproc,
SymbolNotFound,
BrokerError,
DataThrottle,
)
from piker.pp import Transaction
from . import log
# <uri>/<version>/
_url = 'https://api.kraken.com/0'
# Broker specific ohlc schema which includes a vwap field
_ohlc_dtype = [
('index', int),
('time', int),
('open', float),
('high', float),
('low', float),
('close', float),
('volume', float),
('count', int),
('bar_wap', float),
]
# UI components allow this to be declared such that additional
# (historical) fields can be exposed.
ohlc_dtype = np.dtype(_ohlc_dtype)
_show_wap_in_history = True
_symbol_info_translation: dict[str, str] = {
'tick_decimals': 'pair_decimals',
}
def get_config() -> dict[str, Any]:
conf, path = config.load()
section = conf.get('kraken')
if section is None:
log.warning(f'No config section found for kraken in {path}')
return {}
return section
def get_kraken_signature(
urlpath: str,
data: dict[str, Any],
secret: str
) -> str:
postdata = urllib.parse.urlencode(data)
encoded = (str(data['nonce']) + postdata).encode()
message = urlpath.encode() + hashlib.sha256(encoded).digest()
mac = hmac.new(base64.b64decode(secret), message, hashlib.sha512)
sigdigest = base64.b64encode(mac.digest())
return sigdigest.decode()
class InvalidKey(ValueError):
'''
EAPI:Invalid key
This error is returned when the API key used for the call is
either expired or disabled, please review the API key in your
Settings -> API tab of account management or generate a new one
and update your application.
'''
class Client:
# global symbol normalization table
_ntable: dict[str, str] = {}
_atable: bidict[str, str] = bidict()
def __init__(
self,
config: dict[str, str],
name: str = '',
api_key: str = '',
secret: str = ''
) -> None:
self._sesh = asks.Session(connections=4)
self._sesh.base_location = _url
self._sesh.headers.update({
'User-Agent':
'krakenex/2.1.0 (+https://github.com/veox/python3-krakenex)'
})
self.conf: dict[str, str] = config
self._pairs: list[str] = []
self._name = name
self._api_key = api_key
self._secret = secret
@property
def pairs(self) -> dict[str, Any]:
if self._pairs is None:
raise RuntimeError(
"Make sure to run `cache_symbols()` on startup!"
)
# retreive and cache all symbols
return self._pairs
async def _public(
self,
method: str,
data: dict,
) -> dict[str, Any]:
resp = await self._sesh.post(
path=f'/public/{method}',
json=data,
timeout=float('inf')
)
return resproc(resp, log)
async def _private(
self,
method: str,
data: dict,
uri_path: str
) -> dict[str, Any]:
headers = {
'Content-Type':
'application/x-www-form-urlencoded',
'API-Key':
self._api_key,
'API-Sign':
get_kraken_signature(uri_path, data, self._secret)
}
resp = await self._sesh.post(
path=f'/private/{method}',
data=data,
headers=headers,
timeout=float('inf')
)
return resproc(resp, log)
async def endpoint(
self,
method: str,
data: dict[str, Any]
) -> dict[str, Any]:
uri_path = f'/0/private/{method}'
data['nonce'] = str(int(1000*time.time()))
return await self._private(method, data, uri_path)
async def get_balances(
self,
) -> dict[str, float]:
'''
Return the set of asset balances for this account
by symbol.
'''
resp = await self.endpoint(
'Balance',
{},
)
by_bsuid = resp['result']
return {
self._atable[sym].lower(): float(bal)
for sym, bal in by_bsuid.items()
}
async def get_assets(self) -> dict[str, dict]:
resp = await self._public('Assets', {})
return resp['result']
async def cache_assets(self) -> None:
assets = self.assets = await self.get_assets()
for bsuid, info in assets.items():
self._atable[bsuid] = info['altname']
async def get_trades(
self,
fetch_limit: int = 10,
) -> dict[str, Any]:
'''
Get the trades (aka cleared orders) history from the rest endpoint:
https://docs.kraken.com/rest/#operation/getTradeHistory
'''
ofs = 0
trades_by_id: dict[str, Any] = {}
for i in itertools.count():
if i >= fetch_limit:
break
# increment 'ofs' pagination offset
ofs = i*50
resp = await self.endpoint(
'TradesHistory',
{'ofs': ofs},
)
by_id = resp['result']['trades']
trades_by_id.update(by_id)
# we can get up to 50 results per query
if (
len(by_id) < 50
):
err = resp.get('error')
if err:
raise BrokerError(err)
# we know we received the max amount of
# trade results so there may be more history.
# catch the end of the trades
count = resp['result']['count']
break
# santity check on update
assert count == len(trades_by_id.values())
return trades_by_id
async def get_xfers(
self,
asset: str,
src_asset: str = '',
) -> dict[str, Transaction]:
'''
Get asset balance transfer transactions.
Currently only withdrawals are supported.
'''
xfers: list[dict] = (await self.endpoint(
'WithdrawStatus',
{'asset': asset},
))['result']
# eg. resp schema:
# 'result': [{'method': 'Bitcoin', 'aclass': 'currency', 'asset':
# 'XXBT', 'refid': 'AGBJRMB-JHD2M4-NDI3NR', 'txid':
# 'b95d66d3bb6fd76cbccb93f7639f99a505cb20752c62ea0acc093a0e46547c44',
# 'info': 'bc1qc8enqjekwppmw3g80p56z5ns7ze3wraqk5rl9z',
# 'amount': '0.00300726', 'fee': '0.00001000', 'time':
# 1658347714, 'status': 'Success'}]}
trans: dict[str, Transaction] = {}
for entry in xfers:
# look up the normalized name
asset = self._atable[entry['asset']].lower()
# XXX: this is in the asset units (likely) so it isn't
# quite the same as a commisions cost necessarily..)
cost = float(entry['fee'])
tran = Transaction(
fqsn=asset + '.kraken',
tid=entry['txid'],
dt=pendulum.from_timestamp(entry['time']),
bsuid=f'{asset}{src_asset}',
size=-1*(
float(entry['amount'])
+
cost
),
# since this will be treated as a "sell" it
# shouldn't be needed to compute the be price.
price='NaN',
# XXX: see note above
cost=0,
)
trans[tran.tid] = tran
return trans
async def submit_limit(
self,
symbol: str,
price: float,
action: str,
size: float,
reqid: str = None,
validate: bool = False # set True test call without a real submission
) -> dict:
'''
Place an order and return integer request id provided by client.
'''
# Build common data dict for common keys from both endpoints
data = {
"pair": symbol,
"price": str(price),
"validate": validate
}
if reqid is None:
# Build order data for kraken api
data |= {
"ordertype": "limit",
"type": action,
"volume": str(size),
}
return await self.endpoint('AddOrder', data)
else:
# Edit order data for kraken api
data["txid"] = reqid
return await self.endpoint('EditOrder', data)
async def submit_cancel(
self,
reqid: str,
) -> dict:
'''
Send cancel request for order id ``reqid``.
'''
# txid is a transaction id given by kraken
return await self.endpoint('CancelOrder', {"txid": reqid})
async def symbol_info(
self,
pair: Optional[str] = None,
) -> dict[str, dict[str, str]]:
if pair is not None:
pairs = {'pair': pair}
else:
pairs = None # get all pairs
resp = await self._public('AssetPairs', pairs)
err = resp['error']
if err:
symbolname = pairs['pair'] if pair else None
raise SymbolNotFound(f'{symbolname}.kraken')
pairs = resp['result']
if pair is not None:
_, data = next(iter(pairs.items()))
return data
else:
return pairs
async def cache_symbols(
self,
) -> dict:
if not self._pairs:
self._pairs = await self.symbol_info()
ntable = {}
for restapikey, info in self._pairs.items():
ntable[restapikey] = ntable[info['wsname']] = info['altname']
self._ntable.update(ntable)
return self._pairs
async def search_symbols(
self,
pattern: str,
limit: int = None,
) -> dict[str, Any]:
if self._pairs is not None:
data = self._pairs
else:
data = await self.symbol_info()
matches = fuzzy.extractBests(
pattern,
data,
score_cutoff=50,
)
# repack in dict form
return {item[0]['altname']: item[0] for item in matches}
async def bars(
self,
symbol: str = 'XBTUSD',
# UTC 2017-07-02 12:53:20
since: Optional[Union[int, datetime]] = None,
count: int = 720, # <- max allowed per query
as_np: bool = True,
) -> dict:
if since is None:
since = pendulum.now('UTC').start_of('minute').subtract(
minutes=count).timestamp()
elif isinstance(since, int):
since = pendulum.from_timestamp(since).timestamp()
else: # presumably a pendulum datetime
since = since.timestamp()
# UTC 2017-07-02 12:53:20 is oldest seconds value
since = str(max(1499000000, int(since)))
json = await self._public(
'OHLC',
data={
'pair': symbol,
'since': since,
},
)
try:
res = json['result']
res.pop('last')
bars = next(iter(res.values()))
new_bars = []
first = bars[0]
last_nz_vwap = first[-3]
if last_nz_vwap == 0:
# use close if vwap is zero
last_nz_vwap = first[-4]
# convert all fields to native types
for i, bar in enumerate(bars):
# normalize weird zero-ed vwap values..cmon kraken..
# indicates vwap didn't change since last bar
vwap = float(bar.pop(-3))
if vwap != 0:
last_nz_vwap = vwap
if vwap == 0:
vwap = last_nz_vwap
# re-insert vwap as the last of the fields
bar.append(vwap)
new_bars.append(
(i,) + tuple(
ftype(bar[j]) for j, (name, ftype) in enumerate(
_ohlc_dtype[1:]
)
)
)
array = np.array(new_bars, dtype=_ohlc_dtype) if as_np else bars
return array
except KeyError:
errmsg = json['error'][0]
if 'not found' in errmsg:
raise SymbolNotFound(errmsg + f': {symbol}')
elif 'Too many requests' in errmsg:
raise DataThrottle(f'{symbol}')
else:
raise BrokerError(errmsg)
@classmethod
def normalize_symbol(
cls,
ticker: str
) -> str:
'''
Normalize symbol names to to a 3x3 pair from the global
definition map which we build out from the data retreived from
the 'AssetPairs' endpoint, see methods above.
'''
ticker = cls._ntable[ticker]
return ticker.lower()
@acm
async def get_client() -> Client:
conf = get_config()
if conf:
client = Client(
conf,
name=conf['key_descr'],
api_key=conf['api_key'],
secret=conf['secret']
)
else:
client = Client({})
# at startup, load all symbols, and asset info in
# batch requests.
async with trio.open_nursery() as nurse:
nurse.start_soon(client.cache_assets)
await client.cache_symbols()
yield client

File diff suppressed because it is too large Load Diff

View File

@ -1,500 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Real-time and historical data feed endpoints.
'''
from contextlib import asynccontextmanager as acm
from datetime import datetime
from typing import (
Any,
Optional,
Callable,
)
import time
from async_generator import aclosing
from fuzzywuzzy import process as fuzzy
import numpy as np
import pendulum
from trio_typing import TaskStatus
import tractor
import trio
from piker._cacheables import open_cached_client
from piker.brokers._util import (
BrokerError,
DataThrottle,
DataUnavailable,
)
from piker.log import get_console_log
from piker.data import ShmArray
from piker.data.types import Struct
from piker.data._web_bs import open_autorecon_ws, NoBsWs
from . import log
from .api import (
Client,
)
# https://www.kraken.com/features/api#get-tradable-pairs
class Pair(Struct):
altname: str # alternate pair name
wsname: str # WebSocket pair name (if available)
aclass_base: str # asset class of base component
base: str # asset id of base component
aclass_quote: str # asset class of quote component
quote: str # asset id of quote component
lot: str # volume lot size
cost_decimals: int
costmin: float
pair_decimals: int # scaling decimal places for pair
lot_decimals: int # scaling decimal places for volume
# amount to multiply lot volume by to get currency volume
lot_multiplier: float
# array of leverage amounts available when buying
leverage_buy: list[int]
# array of leverage amounts available when selling
leverage_sell: list[int]
# fee schedule array in [volume, percent fee] tuples
fees: list[tuple[int, float]]
# maker fee schedule array in [volume, percent fee] tuples (if on
# maker/taker)
fees_maker: list[tuple[int, float]]
fee_volume_currency: str # volume discount currency
margin_call: str # margin call level
margin_stop: str # stop-out/liquidation margin level
ordermin: float # minimum order volume for pair
tick_size: float # min price step size
status: str
short_position_limit: float
long_position_limit: float
class OHLC(Struct):
'''
Description of the flattened OHLC quote format.
For schema details see:
https://docs.kraken.com/websockets/#message-ohlc
'''
chan_id: int # internal kraken id
chan_name: str # eg. ohlc-1 (name-interval)
pair: str # fx pair
time: float # Begin time of interval, in seconds since epoch
etime: float # End time of interval, in seconds since epoch
open: float # Open price of interval
high: float # High price within interval
low: float # Low price within interval
close: float # Close price of interval
vwap: float # Volume weighted average price within interval
volume: float # Accumulated volume **within interval**
count: int # Number of trades within interval
# (sampled) generated tick data
ticks: list[Any] = []
async def stream_messages(
ws: NoBsWs,
):
'''
Message stream parser and heartbeat handler.
Deliver ws subscription messages as well as handle heartbeat logic
though a single async generator.
'''
too_slow_count = last_hb = 0
while True:
with trio.move_on_after(5) as cs:
msg = await ws.recv_msg()
# trigger reconnection if heartbeat is laggy
if cs.cancelled_caught:
too_slow_count += 1
if too_slow_count > 20:
log.warning(
"Heartbeat is too slow, resetting ws connection")
await ws._connect()
too_slow_count = 0
continue
match msg:
case {'event': 'heartbeat'}:
now = time.time()
delay = now - last_hb
last_hb = now
# XXX: why tf is this not printing without --tl flag?
log.debug(f"Heartbeat after {delay}")
# print(f"Heartbeat after {delay}")
continue
case _:
# passthrough sub msgs
yield msg
async def process_data_feed_msgs(
ws: NoBsWs,
):
'''
Parse and pack data feed messages.
'''
async for msg in stream_messages(ws):
match msg:
case {
'errorMessage': errmsg
}:
raise BrokerError(errmsg)
case {
'event': 'subscriptionStatus',
} as sub:
log.info(
'WS subscription is active:\n'
f'{sub}'
)
continue
case [
chan_id,
*payload_array,
chan_name,
pair
]:
if 'ohlc' in chan_name:
ohlc = OHLC(
chan_id,
chan_name,
pair,
*payload_array[0]
)
ohlc.typecast()
yield 'ohlc', ohlc
elif 'spread' in chan_name:
bid, ask, ts, bsize, asize = map(
float, payload_array[0])
# TODO: really makes you think IB has a horrible API...
quote = {
'symbol': pair.replace('/', ''),
'ticks': [
{'type': 'bid', 'price': bid, 'size': bsize},
{'type': 'bsize', 'price': bid, 'size': bsize},
{'type': 'ask', 'price': ask, 'size': asize},
{'type': 'asize', 'price': ask, 'size': asize},
],
}
yield 'l1', quote
# elif 'book' in msg[-2]:
# chan_id, *payload_array, chan_name, pair = msg
# print(msg)
case _:
print(f'UNHANDLED MSG: {msg}')
# yield msg
def normalize(
ohlc: OHLC,
) -> dict:
quote = ohlc.to_dict()
quote['broker_ts'] = quote['time']
quote['brokerd_ts'] = time.time()
quote['symbol'] = quote['pair'] = quote['pair'].replace('/', '')
quote['last'] = quote['close']
quote['bar_wap'] = ohlc.vwap
# seriously eh? what's with this non-symmetry everywhere
# in subscription systems...
# XXX: piker style is always lowercases symbols.
topic = quote['pair'].replace('/', '').lower()
# print(quote)
return topic, quote
@acm
async def open_history_client(
symbol: str,
) -> tuple[Callable, int]:
# TODO implement history getter for the new storage layer.
async with open_cached_client('kraken') as client:
# lol, kraken won't send any more then the "last"
# 720 1m bars.. so we have to just ignore further
# requests of this type..
queries: int = 0
async def get_ohlc(
timeframe: float,
end_dt: Optional[datetime] = None,
start_dt: Optional[datetime] = None,
) -> tuple[
np.ndarray,
datetime, # start
datetime, # end
]:
nonlocal queries
if (
queries > 0
or timeframe != 60
):
raise DataUnavailable(
'Only a single query for 1m bars supported')
count = 0
while count <= 3:
try:
array = await client.bars(
symbol,
since=end_dt,
)
count += 1
queries += 1
break
except DataThrottle:
log.warning(f'kraken OHLC throttle for {symbol}')
await trio.sleep(1)
start_dt = pendulum.from_timestamp(array[0]['time'])
end_dt = pendulum.from_timestamp(array[-1]['time'])
return array, start_dt, end_dt
yield get_ohlc, {'erlangs': 1, 'rate': 1}
async def stream_quotes(
send_chan: trio.abc.SendChannel,
symbols: list[str],
feed_is_live: trio.Event,
loglevel: str = None,
# backend specific
sub_type: str = 'ohlc',
# startup sync
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
) -> None:
'''
Subscribe for ohlc stream of quotes for ``pairs``.
``pairs`` must be formatted <crypto_symbol>/<fiat_symbol>.
'''
# XXX: required to propagate ``tractor`` loglevel to piker logging
get_console_log(loglevel or tractor.current_actor().loglevel)
ws_pairs = {}
sym_infos = {}
async with open_cached_client('kraken') as client, send_chan as send_chan:
# keep client cached for real-time section
for sym in symbols:
# transform to upper since piker style is always lower
sym = sym.upper()
sym_info = await client.symbol_info(sym)
try:
si = Pair(**sym_info) # validation
except TypeError:
fields_diff = set(sym_info) - set(Pair.__struct_fields__)
raise TypeError(
f'Missing msg fields {fields_diff}'
)
syminfo = si.to_dict()
syminfo['price_tick_size'] = 1 / 10**si.pair_decimals
syminfo['lot_tick_size'] = 1 / 10**si.lot_decimals
syminfo['asset_type'] = 'crypto'
sym_infos[sym] = syminfo
ws_pairs[sym] = si.wsname
symbol = symbols[0].lower()
init_msgs = {
# pass back token, and bool, signalling if we're the writer
# and that history has been written
symbol: {
'symbol_info': sym_infos[sym],
'shm_write_opts': {'sum_tick_vml': False},
'fqsn': sym,
},
}
@acm
async def subscribe(ws: NoBsWs):
# XXX: setup subs
# https://docs.kraken.com/websockets/#message-subscribe
# specific logic for this in kraken's sync client:
# https://github.com/krakenfx/kraken-wsclient-py/blob/master/kraken_wsclient_py/kraken_wsclient_py.py#L188
ohlc_sub = {
'event': 'subscribe',
'pair': list(ws_pairs.values()),
'subscription': {
'name': 'ohlc',
'interval': 1,
},
}
# TODO: we want to eventually allow unsubs which should
# be completely fine to request from a separate task
# since internally the ws methods appear to be FIFO
# locked.
await ws.send_msg(ohlc_sub)
# trade data (aka L1)
l1_sub = {
'event': 'subscribe',
'pair': list(ws_pairs.values()),
'subscription': {
'name': 'spread',
# 'depth': 10}
},
}
# pull a first quote and deliver
await ws.send_msg(l1_sub)
yield
# unsub from all pairs on teardown
if ws.connected():
await ws.send_msg({
'pair': list(ws_pairs.values()),
'event': 'unsubscribe',
'subscription': ['ohlc', 'spread'],
})
# XXX: do we need to ack the unsub?
# await ws.recv_msg()
# see the tips on reconnection logic:
# https://support.kraken.com/hc/en-us/articles/360044504011-WebSocket-API-unexpected-disconnections-from-market-data-feeds
ws: NoBsWs
async with (
open_autorecon_ws(
'wss://ws.kraken.com/',
fixture=subscribe,
) as ws,
aclosing(process_data_feed_msgs(ws)) as msg_gen,
):
# pull a first quote and deliver
typ, ohlc_last = await anext(msg_gen)
topic, quote = normalize(ohlc_last)
task_status.started((init_msgs, quote))
# lol, only "closes" when they're margin squeezing clients ;P
feed_is_live.set()
# keep start of last interval for volume tracking
last_interval_start = ohlc_last.etime
# start streaming
async for typ, ohlc in msg_gen:
if typ == 'ohlc':
# TODO: can get rid of all this by using
# ``trades`` subscription...
# generate tick values to match time & sales pane:
# https://trade.kraken.com/charts/KRAKEN:BTC-USD?period=1m
volume = ohlc.volume
# new OHLC sample interval
if ohlc.etime > last_interval_start:
last_interval_start = ohlc.etime
tick_volume = volume
else:
# this is the tick volume *within the interval*
tick_volume = volume - ohlc_last.volume
ohlc_last = ohlc
last = ohlc.close
if tick_volume:
ohlc.ticks.append({
'type': 'trade',
'price': last,
'size': tick_volume,
})
topic, quote = normalize(ohlc)
elif typ == 'l1':
quote = ohlc
topic = quote['symbol'].lower()
await send_chan.send({topic: quote})
@tractor.context
async def open_symbol_search(
ctx: tractor.Context,
) -> Client:
async with open_cached_client('kraken') as client:
# load all symbols locally for fast search
cache = await client.cache_symbols()
await ctx.started(cache)
async with ctx.open_stream() as stream:
async for pattern in stream:
matches = fuzzy.extractBests(
pattern,
cache,
score_cutoff=50,
)
# repack in dict form
await stream.send(
{item[0]['altname']: item[0]
for item in matches}
)

View File

@ -18,9 +18,3 @@
Market machinery for order executions, book, management. Market machinery for order executions, book, management.
""" """
from ._client import open_ems
__all__ = [
'open_ems',
]

View File

@ -22,10 +22,54 @@ from enum import Enum
from typing import Optional from typing import Optional
from bidict import bidict from bidict import bidict
from pydantic import BaseModel, validator
from ..data._source import Symbol from ..data._source import Symbol
from ..data.types import Struct from ._messages import BrokerdPosition, Status
from ..pp import Position
class Position(BaseModel):
'''
Basic pp (personal position) model with attached fills history.
This type should be IPC wire ready?
'''
symbol: Symbol
# last size and avg entry price
size: float
avg_price: float # TODO: contextual pricing
# ordered record of known constituent trade messages
fills: list[Status] = []
def update_from_msg(
self,
msg: BrokerdPosition,
) -> None:
# XXX: better place to do this?
symbol = self.symbol
lot_size_digits = symbol.lot_size_digits
avg_price, size = (
round(msg['avg_price'], ndigits=symbol.tick_size_digits),
round(msg['size'], ndigits=lot_size_digits),
)
self.avg_price = avg_price
self.size = size
@property
def dsize(self) -> float:
'''
The "dollar" size of the pp, normally in trading (fiat) unit
terms.
'''
return self.avg_price * self.size
_size_units = bidict({ _size_units = bidict({
@ -40,9 +84,34 @@ SizeUnit = Enum(
) )
class Allocator(Struct): class Allocator(BaseModel):
class Config:
validate_assignment = True
copy_on_model_validation = False
arbitrary_types_allowed = True
# required to get the account validator lookup working?
extra = 'allow'
underscore_attrs_are_private = False
symbol: Symbol symbol: Symbol
account: Optional[str] = 'paper'
# TODO: for enums this clearly doesn't fucking work, you can't set
# a default at startup by passing in a `dict` but yet you can set
# that value through assignment..for wtv cucked reason.. honestly, pure
# unintuitive garbage.
size_unit: str = 'currency'
_size_units: dict[str, Optional[str]] = _size_units
@validator('size_unit', pre=True)
def maybe_lookup_key(cls, v):
# apply the corresponding enum key for the text "description" value
if v not in _size_units:
return _size_units.inverse[v]
assert v in _size_units
return v
# TODO: if we ever want ot support non-uniform entry-slot-proportion # TODO: if we ever want ot support non-uniform entry-slot-proportion
# "sizes" # "sizes"
@ -51,28 +120,6 @@ class Allocator(Struct):
units_limit: float units_limit: float
currency_limit: float currency_limit: float
slots: int slots: int
account: Optional[str] = 'paper'
_size_units: bidict[str, Optional[str]] = _size_units
# TODO: for enums this clearly doesn't fucking work, you can't set
# a default at startup by passing in a `dict` but yet you can set
# that value through assignment..for wtv cucked reason.. honestly, pure
# unintuitive garbage.
_size_unit: str = 'currency'
@property
def size_unit(self) -> str:
return self._size_unit
@size_unit.setter
def size_unit(self, v: str) -> Optional[str]:
if v not in _size_units:
v = _size_units.inverse[v]
assert v in _size_units
self._size_unit = v
return v
def step_sizes( def step_sizes(
self, self,
@ -93,13 +140,10 @@ class Allocator(Struct):
else: else:
return self.units_limit return self.units_limit
def limit_info(self) -> tuple[str, float]:
return self.size_unit, self.limit()
def next_order_info( def next_order_info(
self, self,
# we only need a startup size for exit calcs, we can then # we only need a startup size for exit calcs, we can the
# determine how large slots should be if the initial pp size was # determine how large slots should be if the initial pp size was
# larger then the current live one, and the live one is smaller # larger then the current live one, and the live one is smaller
# then the initial config settings. # then the initial config settings.
@ -129,7 +173,7 @@ class Allocator(Struct):
l_sub_pp = self.units_limit - abs_live_size l_sub_pp = self.units_limit - abs_live_size
elif size_unit == 'currency': elif size_unit == 'currency':
live_cost_basis = abs_live_size * live_pp.ppu live_cost_basis = abs_live_size * live_pp.avg_price
slot_size = currency_per_slot / price slot_size = currency_per_slot / price
l_sub_pp = (self.currency_limit - live_cost_basis) / price l_sub_pp = (self.currency_limit - live_cost_basis) / price
@ -140,14 +184,12 @@ class Allocator(Struct):
# an entry (adding-to or starting a pp) # an entry (adding-to or starting a pp)
if ( if (
action == 'buy' and live_size > 0 or
action == 'sell' and live_size < 0 or
live_size == 0 live_size == 0
or (action == 'buy' and live_size > 0)
or action == 'sell' and live_size < 0
): ):
order_size = min(
slot_size, order_size = min(slot_size, l_sub_pp)
max(l_sub_pp, 0),
)
# an exit (removing-from or going to net-zero pp) # an exit (removing-from or going to net-zero pp)
else: else:
@ -163,7 +205,7 @@ class Allocator(Struct):
if size_unit == 'currency': if size_unit == 'currency':
# compute the "projected" limit's worth of units at the # compute the "projected" limit's worth of units at the
# current pp (weighted) price: # current pp (weighted) price:
slot_size = currency_per_slot / live_pp.ppu slot_size = currency_per_slot / live_pp.avg_price
else: else:
slot_size = u_per_slot slot_size = u_per_slot
@ -202,12 +244,7 @@ class Allocator(Struct):
if order_size < slot_size: if order_size < slot_size:
# compute a fractional slots size to display # compute a fractional slots size to display
slots_used = self.slots_used( slots_used = self.slots_used(
Position( Position(symbol=sym, size=order_size, avg_price=price)
symbol=sym,
size=order_size,
ppu=price,
bsuid=sym,
)
) )
return { return {
@ -234,8 +271,8 @@ class Allocator(Struct):
abs_pp_size = abs(pp.size) abs_pp_size = abs(pp.size)
if self.size_unit == 'currency': if self.size_unit == 'currency':
# live_currency_size = size or (abs_pp_size * pp.ppu) # live_currency_size = size or (abs_pp_size * pp.avg_price)
live_currency_size = abs_pp_size * pp.ppu live_currency_size = abs_pp_size * pp.avg_price
prop = live_currency_size / self.currency_limit prop = live_currency_size / self.currency_limit
else: else:
@ -247,6 +284,14 @@ class Allocator(Struct):
return round(prop * self.slots) return round(prop * self.slots)
_derivs = (
'future',
'continuous_future',
'option',
'futures_option',
)
def mk_allocator( def mk_allocator(
symbol: Symbol, symbol: Symbol,
@ -255,7 +300,7 @@ def mk_allocator(
# default allocation settings # default allocation settings
defaults: dict[str, float] = { defaults: dict[str, float] = {
'account': None, # select paper by default 'account': None, # select paper by default
# 'size_unit': 'currency', 'size_unit': 'currency',
'units_limit': 400, 'units_limit': 400,
'currency_limit': 5e3, 'currency_limit': 5e3,
'slots': 4, 'slots': 4,
@ -273,9 +318,42 @@ def mk_allocator(
'currency_limit': 6e3, 'currency_limit': 6e3,
'slots': 6, 'slots': 6,
} }
defaults.update(user_def) defaults.update(user_def)
return Allocator( alloc = Allocator(
symbol=symbol, symbol=symbol,
**defaults, **defaults,
) )
asset_type = symbol.type_key
# specific configs by asset class / type
if asset_type in _derivs:
# since it's harder to know how currency "applies" in this case
# given leverage properties
alloc.size_unit = '# units'
# set units limit to slots size thus making make the next
# entry step 1.0
alloc.units_limit = alloc.slots
# if the current position is already greater then the limit
# settings, increase the limit to the current position
if alloc.size_unit == 'currency':
startup_size = startup_pp.size * startup_pp.avg_price
if startup_size > alloc.currency_limit:
alloc.currency_limit = round(startup_size, ndigits=2)
else:
startup_size = abs(startup_pp.size)
if startup_size > alloc.units_limit:
alloc.units_limit = startup_size
if asset_type in _derivs:
alloc.slots = alloc.units_limit
return alloc

View File

@ -18,32 +18,26 @@
Orders and execution client API. Orders and execution client API.
""" """
from __future__ import annotations
from contextlib import asynccontextmanager as acm from contextlib import asynccontextmanager as acm
from typing import Dict
from pprint import pformat from pprint import pformat
from typing import TYPE_CHECKING from dataclasses import dataclass, field
import trio import trio
import tractor import tractor
from tractor.trionics import broadcast_receiver from tractor.trionics import broadcast_receiver
from ..log import get_logger from ..log import get_logger
from ..data.types import Struct from ._ems import _emsd_main
from .._daemon import maybe_open_emsd from .._daemon import maybe_open_emsd
from ._messages import Order, Cancel from ._messages import Order, Cancel
from ..brokers import get_brokermod
if TYPE_CHECKING:
from ._messages import (
BrokerdPosition,
Status,
)
log = get_logger(__name__) log = get_logger(__name__)
class OrderBook(Struct): @dataclass
class OrderBook:
'''EMS-client-side order book ctl and tracking. '''EMS-client-side order book ctl and tracking.
A style similar to "model-view" is used here where this api is A style similar to "model-view" is used here where this api is
@ -58,18 +52,20 @@ class OrderBook(Struct):
# mem channels used to relay order requests to the EMS daemon # mem channels used to relay order requests to the EMS daemon
_to_ems: trio.abc.SendChannel _to_ems: trio.abc.SendChannel
_from_order_book: trio.abc.ReceiveChannel _from_order_book: trio.abc.ReceiveChannel
_sent_orders: dict[str, Order] = {}
_sent_orders: Dict[str, Order] = field(default_factory=dict)
_ready_to_receive: trio.Event = trio.Event()
def send( def send(
self, self,
msg: Order | dict, msg: Order,
) -> dict: ) -> dict:
self._sent_orders[msg.oid] = msg self._sent_orders[msg.oid] = msg
self._to_ems.send_nowait(msg) self._to_ems.send_nowait(msg.dict())
return msg return msg
def send_update( def update(
self, self,
uuid: str, uuid: str,
@ -77,8 +73,9 @@ class OrderBook(Struct):
) -> dict: ) -> dict:
cmd = self._sent_orders[uuid] cmd = self._sent_orders[uuid]
msg = cmd.copy(update=data) msg = cmd.dict()
self._sent_orders[uuid] = msg msg.update(data)
self._sent_orders[uuid] = Order(**msg)
self._to_ems.send_nowait(msg) self._to_ems.send_nowait(msg)
return cmd return cmd
@ -86,18 +83,12 @@ class OrderBook(Struct):
"""Cancel an order (or alert) in the EMS. """Cancel an order (or alert) in the EMS.
""" """
cmd = self._sent_orders.get(uuid) cmd = self._sent_orders[uuid]
if not cmd:
log.error(
f'Unknown order {uuid}!?\n'
f'Maybe there is a stale entry or line?\n'
f'You should report this as a bug!'
)
msg = Cancel( msg = Cancel(
oid=uuid, oid=uuid,
symbol=cmd.symbol, symbol=cmd.symbol,
) )
self._to_ems.send_nowait(msg) self._to_ems.send_nowait(msg.dict())
_orders: OrderBook = None _orders: OrderBook = None
@ -158,36 +149,21 @@ async def relay_order_cmds_from_sync_code(
book = get_orders() book = get_orders()
async with book._from_order_book.subscribe() as orders_stream: async with book._from_order_book.subscribe() as orders_stream:
async for cmd in orders_stream: async for cmd in orders_stream:
sym = cmd.symbol if cmd['symbol'] == symbol_key:
msg = pformat(cmd) log.info(f'Send order cmd:\n{pformat(cmd)}')
if sym == symbol_key:
log.info(f'Send order cmd:\n{msg}')
# send msg over IPC / wire # send msg over IPC / wire
await to_ems_stream.send(cmd) await to_ems_stream.send(cmd)
else:
log.warning(
f'Ignoring unmatched order cmd for {sym} != {symbol_key}:'
f'\n{msg}'
)
@acm @acm
async def open_ems( async def open_ems(
fqsn: str, fqsn: str,
mode: str = 'live',
loglevel: str = 'error',
) -> tuple[ ) -> (
OrderBook, OrderBook,
tractor.MsgStream, tractor.MsgStream,
dict[ dict,
# brokername, acctid ):
tuple[str, str],
list[BrokerdPosition],
],
list[str],
dict[str, Status],
]:
''' '''
Spawn an EMS daemon and begin sending orders and receiving Spawn an EMS daemon and begin sending orders and receiving
alerts. alerts.
@ -230,36 +206,18 @@ async def open_ems(
async with maybe_open_emsd(broker) as portal: async with maybe_open_emsd(broker) as portal:
mod = get_brokermod(broker)
if (
not getattr(mod, 'trades_dialogue', None)
or mode == 'paper'
):
mode = 'paper'
from ._ems import _emsd_main
async with ( async with (
# connect to emsd # connect to emsd
portal.open_context( portal.open_context(
_emsd_main, _emsd_main,
fqsn=fqsn, fqsn=fqsn,
exec_mode=mode,
loglevel=loglevel,
) as ( ) as (ctx, (positions, accounts)),
ctx,
(
positions,
accounts,
dialogs,
)
),
# open 2-way trade command stream # open 2-way trade command stream
ctx.open_stream() as trades_stream, ctx.open_stream() as trades_stream,
): ):
# start sync code order msg delivery task
async with trio.open_nursery() as n: async with trio.open_nursery() as n:
n.start_soon( n.start_soon(
relay_order_cmds_from_sync_code, relay_order_cmds_from_sync_code,
@ -267,10 +225,4 @@ async def open_ems(
trades_stream trades_stream
) )
yield ( yield book, trades_stream, positions, accounts
book,
trades_stream,
positions,
accounts,
dialogs,
)

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
# piker: trading gear for hackers # piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers) # Copyright (C) Tyler Goodlet (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by # it under the terms of the GNU Affero General Public License as published by
@ -15,162 +15,108 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>. # along with this program. If not, see <https://www.gnu.org/licenses/>.
""" """
Clearing sub-system message and protocols. Clearing system messagingn types and protocols.
""" """
# from collections import ( from typing import Optional, Union
# ChainMap,
# deque,
# )
from typing import (
Optional,
Literal,
)
from msgspec import field # TODO: try out just encoding/send direction for now?
# import msgspec
from pydantic import BaseModel
from ..data._source import Symbol from ..data._source import Symbol
from ..data.types import Struct
# TODO: a composite for tracking msg flow on 2-legged
# dialogs.
# class Dialog(ChainMap):
# '''
# Msg collection abstraction to easily track the state changes of
# a msg flow in one high level, query-able and immutable construct.
# The main use case is to query data from a (long-running)
# msg-transaction-sequence
# '''
# def update(
# self,
# msg,
# ) -> None:
# self.maps.insert(0, msg.to_dict())
# def flatten(self) -> dict:
# return dict(self)
# TODO: ``msgspec`` stuff worth paying attention to:
# - schema evolution:
# https://jcristharif.com/msgspec/usage.html#schema-evolution
# - for eg. ``BrokerdStatus``, instead just have separate messages?
# - use literals for a common msg determined by diff keys?
# - https://jcristharif.com/msgspec/usage.html#literal
# --------------
# Client -> emsd # Client -> emsd
# --------------
class Order(Struct):
# TODO: ideally we can combine these 2 fields into
# 1 and just use the size polarity to determine a buy/sell.
# i would like to see this become more like
# https://jcristharif.com/msgspec/usage.html#literal
# action: Literal[
# 'live',
# 'dark',
# 'alert',
# ]
action: Literal[
'buy',
'sell',
'alert',
]
# determines whether the create execution
# will be submitted to the ems or directly to
# the backend broker
exec_mode: Literal[
'dark',
'live',
# 'paper', no right?
]
# internal ``emdsd`` unique "order id"
oid: str # uuid4
symbol: str | Symbol
account: str # should we set a default as '' ?
price: float
size: float # -ve is "sell", +ve is "buy"
brokers: Optional[list[str]] = []
class Cancel(Struct): class Cancel(BaseModel):
''' '''Cancel msg for removing a dark (ems triggered) or
Cancel msg for removing a dark (ems triggered) or
broker-submitted (live) trigger/order. broker-submitted (live) trigger/order.
''' '''
action: str = 'cancel'
oid: str # uuid4 oid: str # uuid4
symbol: str symbol: str
action: str = 'cancel'
# -------------- class Order(BaseModel):
action: str # {'buy', 'sell', 'alert'}
# internal ``emdsd`` unique "order id"
oid: str # uuid4
symbol: Union[str, Symbol]
account: str # should we set a default as '' ?
price: float
size: float
brokers: list[str]
# Assigned once initial ack is received
# ack_time_ns: Optional[int] = None
# determines whether the create execution
# will be submitted to the ems or directly to
# the backend broker
exec_mode: str # {'dark', 'live', 'paper'}
class Config:
# just for pre-loading a ``Symbol`` when used
# in the order mode staging process
arbitrary_types_allowed = True
# don't copy this model instance when used in
# a recursive model
copy_on_model_validation = False
# Client <- emsd # Client <- emsd
# --------------
# update msgs from ems which relay state change info # update msgs from ems which relay state change info
# from the active clearing engine. # from the active clearing engine.
class Status(Struct):
time_ns: int class Status(BaseModel):
oid: str # uuid4 ems-order dialog id
resp: Literal[
'pending', # acked by broker but not yet open
'open',
'dark_open', # dark/algo triggered order is open in ems clearing loop
'triggered', # above triggered order sent to brokerd, or an alert closed
'closed', # fully cleared all size/units
'fill', # partial execution
'canceled',
'error',
]
name: str = 'status' name: str = 'status'
oid: str # uuid4
time_ns: int
# {
# 'dark_submitted',
# 'dark_cancelled',
# 'dark_triggered',
# 'broker_submitted',
# 'broker_cancelled',
# 'broker_executed',
# 'broker_filled',
# 'broker_errored',
# 'alert_submitted',
# 'alert_triggered',
# }
resp: str # "response", see above
# symbol: str
# trigger info
trigger_price: Optional[float] = None
# price: float
# broker: Optional[str] = None
# this maps normally to the ``BrokerdOrder.reqid`` below, an id # this maps normally to the ``BrokerdOrder.reqid`` below, an id
# normally allocated internally by the backend broker routing system # normally allocated internally by the backend broker routing system
reqid: Optional[int | str] = None broker_reqid: Optional[Union[int, str]] = None
# the (last) source order/request msg if provided # for relaying backend msg data "through" the ems layer
# (eg. the Order/Cancel which causes this msg) and
# acts as a back-reference to the corresponding
# request message which was the source of this msg.
req: Order | None = None
# XXX: better design/name here?
# flag that can be set to indicate a message for an order
# event that wasn't originated by piker's emsd (eg. some external
# trading system which does it's own order control but that you
# might want to "track" using piker UIs/systems).
src: Optional[str] = None
# set when a cancel request msg was set for this order flow dialog
# but the brokerd dialog isn't yet in a cancelled state.
cancel_called: bool = False
# for relaying a boxed brokerd-dialog-side msg data "through" the
# ems layer to clients.
brokerd_msg: dict = {} brokerd_msg: dict = {}
# ---------------
# emsd -> brokerd # emsd -> brokerd
# ---------------
# requests *sent* from ems to respective backend broker daemon # requests *sent* from ems to respective backend broker daemon
class BrokerdCancel(Struct): class BrokerdCancel(BaseModel):
action: str = 'cancel'
oid: str # piker emsd order id oid: str # piker emsd order id
time_ns: int time_ns: int
@ -181,39 +127,34 @@ class BrokerdCancel(Struct):
# for setting a unique order id then this value will be relayed back # for setting a unique order id then this value will be relayed back
# on the emsd order request stream as the ``BrokerdOrderAck.reqid`` # on the emsd order request stream as the ``BrokerdOrderAck.reqid``
# field # field
reqid: Optional[int | str] = None reqid: Optional[Union[int, str]] = None
action: str = 'cancel'
class BrokerdOrder(Struct): class BrokerdOrder(BaseModel):
action: str # {buy, sell}
oid: str oid: str
account: str account: str
time_ns: int time_ns: int
symbol: str # fqsn
price: float
size: float
# TODO: if we instead rely on a +ve/-ve size to determine
# the action we more or less don't need this field right?
action: str = '' # {buy, sell}
# "broker request id": broker specific/internal order id if this is # "broker request id": broker specific/internal order id if this is
# None, creates a new order otherwise if the id is valid the backend # None, creates a new order otherwise if the id is valid the backend
# api must modify the existing matching order. If the broker allows # api must modify the existing matching order. If the broker allows
# for setting a unique order id then this value will be relayed back # for setting a unique order id then this value will be relayed back
# on the emsd order request stream as the ``BrokerdOrderAck.reqid`` # on the emsd order request stream as the ``BrokerdOrderAck.reqid``
# field # field
reqid: Optional[int | str] = None reqid: Optional[Union[int, str]] = None
symbol: str # symbol.<providername> ?
price: float
size: float
# ---------------
# emsd <- brokerd # emsd <- brokerd
# ---------------
# requests *received* to ems from broker backend # requests *received* to ems from broker backend
class BrokerdOrderAck(Struct):
class BrokerdOrderAck(BaseModel):
''' '''
Immediate reponse to a brokerd order request providing the broker Immediate reponse to a brokerd order request providing the broker
specific unique order id so that the EMS can associate this specific unique order id so that the EMS can associate this
@ -221,93 +162,102 @@ class BrokerdOrderAck(Struct):
``.oid`` (which is a uuid4). ``.oid`` (which is a uuid4).
''' '''
name: str = 'ack'
# defined and provided by backend # defined and provided by backend
reqid: int | str reqid: Union[int, str]
# emsd id originally sent in matching request msg # emsd id originally sent in matching request msg
oid: str oid: str
account: str = '' account: str = ''
name: str = 'ack'
class BrokerdStatus(Struct): class BrokerdStatus(BaseModel):
reqid: int | str
time_ns: int
status: Literal[
'open',
'canceled',
'fill',
'pending',
'error',
]
account: str
name: str = 'status' name: str = 'status'
reqid: Union[int, str]
time_ns: int
# XXX: should be best effort set for every update
account: str = ''
# {
# 'submitted',
# 'cancelled',
# 'filled',
# }
status: str
filled: float = 0.0 filled: float = 0.0
reason: str = '' reason: str = ''
remaining: float = 0.0 remaining: float = 0.0
# external: bool = False # XXX: better design/name here?
# flag that can be set to indicate a message for an order
# event that wasn't originated by piker's emsd (eg. some external
# trading system which does it's own order control but that you
# might want to "track" using piker UIs/systems).
external: bool = False
# XXX: not required schema as of yet # XXX: not required schema as of yet
broker_details: dict = field(default_factory=lambda: { broker_details: dict = {
'name': '', 'name': '',
}) }
class BrokerdFill(Struct): class BrokerdFill(BaseModel):
''' '''
A single message indicating a "fill-details" event from the broker A single message indicating a "fill-details" event from the broker
if avaiable. if avaiable.
''' '''
name: str = 'fill'
reqid: Union[int, str]
time_ns: int
# order exeuction related
action: str
size: float
price: float
broker_details: dict = {} # meta-data (eg. commisions etc.)
# brokerd timestamp required for order mode arrow placement on x-axis # brokerd timestamp required for order mode arrow placement on x-axis
# TODO: maybe int if we force ns? # TODO: maybe int if we force ns?
# we need to normalize this somehow since backends will use their # we need to normalize this somehow since backends will use their
# own format and likely across many disparate epoch clocks... # own format and likely across many disparate epoch clocks...
broker_time: float broker_time: float
reqid: int | str
time_ns: int
# order exeuction related
size: float
price: float
name: str = 'fill'
action: Optional[str] = None
broker_details: dict = {} # meta-data (eg. commisions etc.)
class BrokerdError(Struct): class BrokerdError(BaseModel):
''' '''
Optional error type that can be relayed to emsd for error handling. Optional error type that can be relayed to emsd for error handling.
This is still a TODO thing since we're not sure how to employ it yet. This is still a TODO thing since we're not sure how to employ it yet.
''' '''
name: str = 'error'
oid: str oid: str
symbol: str
reason: str
# if no brokerd order request was actually submitted (eg. we errored # if no brokerd order request was actually submitted (eg. we errored
# at the ``pikerd`` layer) then there will be ``reqid`` allocated. # at the ``pikerd`` layer) then there will be ``reqid`` allocated.
reqid: Optional[int | str] = None reqid: Optional[Union[int, str]] = None
name: str = 'error' symbol: str
reason: str
broker_details: dict = {} broker_details: dict = {}
class BrokerdPosition(Struct): class BrokerdPosition(BaseModel):
'''Position update event from brokerd. '''Position update event from brokerd.
''' '''
name: str = 'position'
broker: str broker: str
account: str account: str
symbol: str symbol: str
currency: str
size: float size: float
avg_price: float avg_price: float
currency: str = ''
name: str = 'position'

View File

@ -18,71 +18,54 @@
Fake trading for forward testing. Fake trading for forward testing.
""" """
from collections import defaultdict
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from datetime import datetime from datetime import datetime
from operator import itemgetter from operator import itemgetter
import itertools
import time import time
from typing import ( from typing import Tuple, Optional, Callable
Any,
Optional,
Callable,
)
import uuid import uuid
from bidict import bidict from bidict import bidict
import pendulum
import trio import trio
import tractor import tractor
from dataclasses import dataclass
from .. import data from .. import data
from ..data._source import Symbol
from ..data.types import Struct
from ..pp import (
Position,
Transaction,
)
from ..data._normalize import iterticks from ..data._normalize import iterticks
from ..data._source import unpack_fqsn from ..data._source import unpack_fqsn
from ..log import get_logger from ..log import get_logger
from ._messages import ( from ._messages import (
BrokerdCancel, BrokerdCancel, BrokerdOrder, BrokerdOrderAck, BrokerdStatus,
BrokerdOrder, BrokerdFill, BrokerdPosition, BrokerdError
BrokerdOrderAck,
BrokerdStatus,
BrokerdFill,
BrokerdPosition,
BrokerdError,
) )
log = get_logger(__name__) log = get_logger(__name__)
class PaperBoi(Struct): @dataclass
''' class PaperBoi:
Emulates a broker order client providing approximately the same API """
and delivering an order-event response stream but with methods for Emulates a broker order client providing the same API and
delivering an order-event response stream but with methods for
triggering desired events based on forward testing engine triggering desired events based on forward testing engine
requirements (eg open, closed, fill msgs). requirements.
''' """
broker: str broker: str
ems_trades_stream: tractor.MsgStream ems_trades_stream: tractor.MsgStream
# map of paper "live" orders which be used # map of paper "live" orders which be used
# to simulate fills based on paper engine settings # to simulate fills based on paper engine settings
_buys: defaultdict[str, bidict] _buys: bidict
_sells: defaultdict[str, bidict] _sells: bidict
_reqids: bidict _reqids: bidict
_positions: dict[str, Position] _positions: dict[str, BrokerdPosition]
_trade_ledger: dict[str, Any]
# init edge case L1 spread # init edge case L1 spread
last_ask: tuple[float, float] = (float('inf'), 0) # price, size last_ask: Tuple[float, float] = (float('inf'), 0) # price, size
last_bid: tuple[float, float] = (0, 0) last_bid: Tuple[float, float] = (0, 0)
async def submit_limit( async def submit_limit(
self, self,
@ -92,24 +75,27 @@ class PaperBoi(Struct):
action: str, action: str,
size: float, size: float,
reqid: Optional[str], reqid: Optional[str],
) -> int: ) -> int:
''' """Place an order and return integer request id provided by client.
Place an order and return integer request id provided by client.
"""
is_modify: bool = False
if reqid is None:
reqid = str(uuid.uuid4())
else:
# order is already existing, this is a modify
(oid, symbol, action, old_price) = self._reqids[reqid]
assert old_price != price
is_modify = True
# register order internally
self._reqids[reqid] = (oid, symbol, action, price)
'''
if action == 'alert': if action == 'alert':
# bypass all fill simulation # bypass all fill simulation
return reqid return reqid
entry = self._reqids.get(reqid)
if entry:
# order is already existing, this is a modify
(oid, symbol, action, old_price) = entry
else:
# register order internally
self._reqids[reqid] = (oid, symbol, action, price)
# TODO: net latency model # TODO: net latency model
# we checkpoint here quickly particulalry # we checkpoint here quickly particulalry
# for dark orders since we want the dark_executed # for dark orders since we want the dark_executed
@ -121,18 +107,15 @@ class PaperBoi(Struct):
size = -size size = -size
msg = BrokerdStatus( msg = BrokerdStatus(
status='open', status='submitted',
# account=f'paper_{self.broker}',
account='paper',
reqid=reqid, reqid=reqid,
broker=self.broker,
time_ns=time.time_ns(), time_ns=time.time_ns(),
filled=0.0, filled=0.0,
reason='paper_trigger', reason='paper_trigger',
remaining=size, remaining=size,
broker_details={'name': 'paperboi'},
) )
await self.ems_trades_stream.send(msg) await self.ems_trades_stream.send(msg.dict())
# if we're already a clearing price simulate an immediate fill # if we're already a clearing price simulate an immediate fill
if ( if (
@ -140,28 +123,28 @@ class PaperBoi(Struct):
) or ( ) or (
action == 'sell' and (clear_price := self.last_bid[0]) >= price action == 'sell' and (clear_price := self.last_bid[0]) >= price
): ):
await self.fake_fill( await self.fake_fill(symbol, clear_price, size, action, reqid, oid)
symbol,
clear_price,
size,
action,
reqid,
oid,
)
# register this submissions as a paper live order
else: else:
# set the simulated order in the respective table for lookup # register this submissions as a paper live order
# and trigger by the simulated clearing task normally
# running ``simulate_fills()``. # submit order to book simulation fill loop
if action == 'buy': if action == 'buy':
orders = self._buys orders = self._buys
elif action == 'sell': elif action == 'sell':
orders = self._sells orders = self._sells
# {symbol -> bidict[oid, (<price data>)]} # set the simulated order in the respective table for lookup
orders[symbol][oid] = (price, size, reqid, action) # and trigger by the simulated clearing task normally
# running ``simulate_fills()``.
if is_modify:
# remove any existing order for the old price
orders[symbol].pop((oid, old_price))
# buys/sells: (symbol -> (price -> order))
orders.setdefault(symbol, {})[(oid, price)] = (size, reqid, action)
return reqid return reqid
@ -174,26 +157,26 @@ class PaperBoi(Struct):
oid, symbol, action, price = self._reqids[reqid] oid, symbol, action, price = self._reqids[reqid]
if action == 'buy': if action == 'buy':
self._buys[symbol].pop(oid, None) self._buys[symbol].pop((oid, price))
elif action == 'sell': elif action == 'sell':
self._sells[symbol].pop(oid, None) self._sells[symbol].pop((oid, price))
# TODO: net latency model # TODO: net latency model
await trio.sleep(0.05) await trio.sleep(0.05)
msg = BrokerdStatus( msg = BrokerdStatus(
status='canceled', status='cancelled',
account='paper', oid=oid,
reqid=reqid, reqid=reqid,
broker=self.broker,
time_ns=time.time_ns(), time_ns=time.time_ns(),
broker_details={'name': 'paperboi'},
) )
await self.ems_trades_stream.send(msg) await self.ems_trades_stream.send(msg.dict())
async def fake_fill( async def fake_fill(
self, self,
fqsn: str, symbol: str,
price: float, price: float,
size: float, size: float,
action: str, # one of {'buy', 'sell'} action: str, # one of {'buy', 'sell'}
@ -207,21 +190,21 @@ class PaperBoi(Struct):
remaining: float = 0, remaining: float = 0,
) -> None: ) -> None:
''' """Pretend to fill a broker order @ price and size.
Pretend to fill a broker order @ price and size.
''' """
# TODO: net latency model # TODO: net latency model
await trio.sleep(0.05) await trio.sleep(0.05)
fill_time_ns = time.time_ns()
fill_time_s = time.time()
fill_msg = BrokerdFill( msg = BrokerdFill(
reqid=reqid, reqid=reqid,
time_ns=fill_time_ns, time_ns=time.time_ns(),
action=action, action=action,
size=size, size=size,
price=price, price=price,
broker_time=datetime.now().timestamp(), broker_time=datetime.now().timestamp(),
broker_details={ broker_details={
'paper_info': { 'paper_info': {
@ -231,67 +214,79 @@ class PaperBoi(Struct):
'name': self.broker + '_paper', 'name': self.broker + '_paper',
}, },
) )
log.info(f'Fake filling order:\n{fill_msg}') await self.ems_trades_stream.send(msg.dict())
await self.ems_trades_stream.send(fill_msg)
self._trade_ledger.update(fill_msg.to_dict())
if order_complete: if order_complete:
msg = BrokerdStatus( msg = BrokerdStatus(
reqid=reqid, reqid=reqid,
time_ns=time.time_ns(), time_ns=time.time_ns(),
# account=f'paper_{self.broker}',
account='paper', status='filled',
status='closed',
filled=size, filled=size,
remaining=0 if order_complete else remaining, remaining=0 if order_complete else remaining,
action=action,
size=size,
price=price,
broker_details={
'paper_info': {
'oid': oid,
},
'name': self.broker,
},
) )
await self.ems_trades_stream.send(msg) await self.ems_trades_stream.send(msg.dict())
# lookup any existing position # lookup any existing position
key = fqsn.rstrip(f'.{self.broker}') token = f'{symbol}.{self.broker}'
pp = self._positions.setdefault( pp_msg = self._positions.setdefault(
fqsn, token,
Position( BrokerdPosition(
Symbol( broker=self.broker,
key=key, account='paper',
broker_info={self.broker: {}}, symbol=symbol,
), # TODO: we need to look up the asset currency from
size=size, # broker info. i guess for crypto this can be
ppu=price, # inferred from the pair?
bsuid=key, currency='',
size=0.0,
avg_price=0,
) )
) )
t = Transaction(
fqsn=fqsn,
tid=oid,
size=size,
price=price,
cost=0, # TODO: cost model
dt=pendulum.from_timestamp(fill_time_s),
bsuid=key,
)
pp.add_clear(t)
pp_msg = BrokerdPosition( # "avg position price" calcs
broker=self.broker, # TODO: eventually it'd be nice to have a small set of routines
account='paper', # to do this stuff from a sequence of cleared orders to enable
symbol=fqsn, # so called "contextual positions".
# TODO: we need to look up the asset currency from new_size = size + pp_msg.size
# broker info. i guess for crypto this can be
# inferred from the pair?
currency='',
size=pp.size,
avg_price=pp.ppu,
)
await self.ems_trades_stream.send(pp_msg) # old size minus the new size gives us size differential with
# +ve -> increase in pp size
# -ve -> decrease in pp size
size_diff = abs(new_size) - abs(pp_msg.size)
if new_size == 0:
pp_msg.avg_price = 0
elif size_diff > 0:
# only update the "average position price" when the position
# size increases not when it decreases (i.e. the position is
# being made smaller)
pp_msg.avg_price = (
abs(size) * price + pp_msg.avg_price * abs(pp_msg.size)
) / abs(new_size)
pp_msg.size = new_size
await self.ems_trades_stream.send(pp_msg.dict())
async def simulate_fills( async def simulate_fills(
quote_stream: tractor.MsgStream, # noqa quote_stream: 'tractor.ReceiveStream', # noqa
client: PaperBoi, client: PaperBoi,
) -> None: ) -> None:
# TODO: more machinery to better simulate real-world market things: # TODO: more machinery to better simulate real-world market things:
@ -311,116 +306,61 @@ async def simulate_fills(
# this stream may eventually contain multiple symbols # this stream may eventually contain multiple symbols
async for quotes in quote_stream: async for quotes in quote_stream:
for sym, quote in quotes.items(): for sym, quote in quotes.items():
for tick in iterticks( for tick in iterticks(
quote, quote,
# dark order price filter(s) # dark order price filter(s)
types=('ask', 'bid', 'trade', 'last') types=('ask', 'bid', 'trade', 'last')
): ):
tick_price = tick['price'] # print(tick)
tick_price = tick.get('price')
ttype = tick['type']
buys: bidict[str, tuple] = client._buys[sym] if ttype in ('ask',):
iter_buys = reversed(sorted(
buys.values(),
key=itemgetter(0),
))
def buy_on_ask(our_price): client.last_ask = (
return tick_price <= our_price tick_price,
tick.get('size', client.last_ask[1]),
)
sells: bidict[str, tuple] = client._sells[sym] orders = client._buys.get(sym, {})
iter_sells = sorted(
sells.values(),
key=itemgetter(0)
)
def sell_on_bid(our_price): book_sequence = reversed(
return tick_price >= our_price sorted(orders.keys(), key=itemgetter(1)))
match tick: def pred(our_price):
return tick_price < our_price
# on an ask queue tick, only clear buy entries elif ttype in ('bid',):
case {
'price': tick_price,
'type': 'ask',
}:
client.last_ask = (
tick_price,
tick.get('size', client.last_ask[1]),
)
iter_entries = zip( client.last_bid = (
iter_buys, tick_price,
itertools.repeat(buy_on_ask) tick.get('size', client.last_bid[1]),
) )
# on a bid queue tick, only clear sell entries orders = client._sells.get(sym, {})
case { book_sequence = sorted(orders.keys(), key=itemgetter(1))
'price': tick_price,
'type': 'bid',
}:
client.last_bid = (
tick_price,
tick.get('size', client.last_bid[1]),
)
iter_entries = zip( def pred(our_price):
iter_sells, return tick_price > our_price
itertools.repeat(sell_on_bid)
)
# TODO: fix this block, though it definitely elif ttype in ('trade', 'last'):
# costs a lot more CPU-wise # TODO: simulate actual book queues and our orders
# - doesn't seem like clears are happening still on # place in it, might require full L2 data?
# "resting" limit orders? continue
case {
'price': tick_price,
'type': ('trade' | 'last'),
}:
# in the clearing price / last price case we
# want to iterate both sides of our book for
# clears since we don't know which direction the
# price is going to move (especially with HFT)
# and thus we simply interleave both sides (buys
# and sells) until one side clears and then
# break until the next tick?
def interleave():
for pair in zip(
iter_buys,
iter_sells,
):
for order_info, pred in zip(
pair,
itertools.cycle([buy_on_ask, sell_on_bid]),
):
yield order_info, pred
iter_entries = interleave() # iterate book prices descending
for oid, our_price in book_sequence:
if pred(our_price):
# NOTE: all other (non-clearable) tick event types # retreive order info
# - we don't want to sping the simulated clear loop (size, reqid, action) = orders.pop((oid, our_price))
# below unecessarily and further don't want to pop
# simulated live orders prematurely.
case _:
continue
# iterate all potentially clearable book prices
# in FIFO order per side.
for order_info, pred in iter_entries:
(our_price, size, reqid, action) = order_info
# print(order_info)
clearable = pred(our_price)
if clearable:
# pop and retreive order info
oid = {
'buy': buys,
'sell': sells
}[action].inverse.pop(order_info)
# clearing price would have filled entirely # clearing price would have filled entirely
await client.fake_fill( await client.fake_fill(
fqsn=sym, symbol=sym,
# todo slippage to determine fill price # todo slippage to determine fill price
price=tick_price, price=tick_price,
size=size, size=size,
@ -428,6 +368,9 @@ async def simulate_fills(
reqid=reqid, reqid=reqid,
oid=oid, oid=oid,
) )
else:
# prices are iterated in sorted order so we're done
break
async def handle_order_requests( async def handle_order_requests(
@ -437,81 +380,66 @@ async def handle_order_requests(
) -> None: ) -> None:
request_msg: dict # order_request: dict
async for request_msg in ems_order_stream: async for request_msg in ems_order_stream:
match request_msg:
case {'action': ('buy' | 'sell')}:
order = BrokerdOrder(**request_msg)
account = order.account
# error on bad inputs action = request_msg['action']
reason = None
if account != 'paper':
reason = f'No account found:`{account}` (paper only)?'
elif order.size == 0: if action in {'buy', 'sell'}:
reason = 'Invalid size: 0'
if reason: account = request_msg['account']
log.error(reason) if account != 'paper':
await ems_order_stream.send(BrokerdError( log.error(
oid=order.oid, 'This is a paper account, only a `paper` selection is valid'
symbol=order.symbol,
reason=reason,
))
continue
reqid = order.reqid or str(uuid.uuid4())
# deliver ack that order has been submitted to broker routing
await ems_order_stream.send(
BrokerdOrderAck(
oid=order.oid,
reqid=reqid,
)
) )
await ems_order_stream.send(BrokerdError(
oid=request_msg['oid'],
symbol=request_msg['symbol'],
reason=f'Paper only. No account found: `{account}` ?',
).dict())
continue
# call our client api to submit the order # validate
reqid = await client.submit_limit( order = BrokerdOrder(**request_msg)
# call our client api to submit the order
reqid = await client.submit_limit(
oid=order.oid,
symbol=order.symbol,
price=order.price,
action=order.action,
size=order.size,
# XXX: by default 0 tells ``ib_insync`` methods that
# there is no existing order so ask the client to create
# a new one (which it seems to do by allocating an int
# counter - collision prone..)
reqid=order.reqid,
)
# deliver ack that order has been submitted to broker routing
await ems_order_stream.send(
BrokerdOrderAck(
# ems order request id
oid=order.oid, oid=order.oid,
symbol=f'{order.symbol}.{client.broker}',
price=order.price, # broker specific request id
action=order.action,
size=order.size,
# XXX: by default 0 tells ``ib_insync`` methods that
# there is no existing order so ask the client to create
# a new one (which it seems to do by allocating an int
# counter - collision prone..)
reqid=reqid, reqid=reqid,
)
log.info(f'Submitted paper LIMIT {reqid}:\n{order}')
case {'action': 'cancel'}: ).dict()
msg = BrokerdCancel(**request_msg) )
await client.submit_cancel(
reqid=msg.reqid
)
case _: elif action == 'cancel':
log.error(f'Unknown order command: {request_msg}') msg = BrokerdCancel(**request_msg)
await client.submit_cancel(
reqid=msg.reqid
)
_reqids: bidict[str, tuple] = {} else:
_buys: defaultdict[ log.error(f'Unknown order command: {request_msg}')
str, # symbol
bidict[
str, # oid
tuple[float, float, str, str], # order info
]
] = defaultdict(bidict)
_sells: defaultdict[
str, # symbol
bidict[
str, # oid
tuple[float, float, str, str], # order info
]
] = defaultdict(bidict)
_positions: dict[str, Position] = {}
@tractor.context @tractor.context
@ -523,62 +451,42 @@ async def trades_dialogue(
loglevel: str = None, loglevel: str = None,
) -> None: ) -> None:
tractor.log.get_console_log(loglevel) tractor.log.get_console_log(loglevel)
async with ( async with (
data.open_feed( data.open_feed(
[fqsn], [fqsn],
loglevel=loglevel, loglevel=loglevel,
) as feed, ) as feed,
): ):
pp_msgs: list[BrokerdPosition] = []
pos: Position
token: str # f'{symbol}.{self.broker}'
for token, pos in _positions.items():
pp_msgs.append(BrokerdPosition(
broker=broker,
account='paper',
symbol=pos.symbol.front_fqsn(),
size=pos.size,
avg_price=pos.ppu,
))
# TODO: load paper positions per broker from .toml config file # TODO: load paper positions per broker from .toml config file
# and pass as symbol to position data mapping: ``dict[str, dict]`` # and pass as symbol to position data mapping: ``dict[str, dict]``
await ctx.started(( # await ctx.started(all_positions)
pp_msgs, await ctx.started(({}, {'paper',}))
['paper'],
))
async with ( async with (
ctx.open_stream() as ems_stream, ctx.open_stream() as ems_stream,
trio.open_nursery() as n, trio.open_nursery() as n,
): ):
client = PaperBoi( client = PaperBoi(
broker, broker,
ems_stream, ems_stream,
_buys=_buys, _buys={},
_sells=_sells, _sells={},
_reqids=_reqids, _reqids={},
# TODO: load paper positions from ``positions.toml`` # TODO: load paper positions from ``positions.toml``
_positions=_positions, _positions={},
# TODO: load postions from ledger file
_trade_ledger={},
) )
n.start_soon( n.start_soon(handle_order_requests, client, ems_stream)
handle_order_requests,
client,
ems_stream,
)
# paper engine simulator clearing task # paper engine simulator clearing task
await simulate_fills(feed.streams[broker], client) await simulate_fills(feed.stream, client)
@asynccontextmanager @asynccontextmanager
@ -603,17 +511,17 @@ async def open_paperboi(
# (we likely don't need more then one proc for basic # (we likely don't need more then one proc for basic
# simulated order clearing) # simulated order clearing)
if portal is None: if portal is None:
log.info('Starting new paper-engine actor')
portal = await tn.start_actor( portal = await tn.start_actor(
service_name, service_name,
enable_modules=[__name__] enable_modules=[__name__]
) )
async with portal.open_context( async with portal.open_context(
trades_dialogue, trades_dialogue,
broker=broker, broker=broker,
fqsn=fqsn, fqsn=fqsn,
loglevel=loglevel, loglevel=loglevel,
) as (ctx, first): ) as (ctx, first):
yield ctx, first yield ctx, first

View File

@ -27,35 +27,25 @@ import tractor
from ..log import get_console_log, get_logger, colorize_json from ..log import get_console_log, get_logger, colorize_json
from ..brokers import get_brokermod from ..brokers import get_brokermod
from .._daemon import ( from .._daemon import _tractor_kwargs
_default_registry_host,
_default_registry_port,
)
from .. import config from .. import config
log = get_logger('cli') log = get_logger('cli')
DEFAULT_BROKER = 'questrade'
@click.command() @click.command()
@click.option('--loglevel', '-l', default='warning', help='Logging level') @click.option('--loglevel', '-l', default='warning', help='Logging level')
@click.option('--tl', is_flag=True, help='Enable tractor logging') @click.option('--tl', is_flag=True, help='Enable tractor logging')
@click.option('--pdb', is_flag=True, help='Enable tractor debug mode') @click.option('--pdb', is_flag=True, help='Enable tractor debug mode')
@click.option('--host', '-h', default=None, help='Host addr to bind') @click.option('--host', '-h', default='127.0.0.1', help='Host address to bind')
@click.option('--port', '-p', default=None, help='Port number to bind')
@click.option( @click.option(
'--tsdb', '--tsdb',
is_flag=True, is_flag=True,
help='Enable local ``marketstore`` instance' help='Enable local ``marketstore`` instance'
) )
def pikerd( def pikerd(loglevel, host, tl, pdb, tsdb):
loglevel: str,
host: str,
port: int,
tl: bool,
pdb: bool,
tsdb: bool,
):
''' '''
Spawn the piker broker-daemon. Spawn the piker broker-daemon.
@ -72,21 +62,12 @@ def pikerd(
"\n" "\n"
)) ))
reg_addr: None | tuple[str, int] = None
if host or port:
reg_addr = (
host or _default_registry_host,
int(port) or _default_registry_port,
)
async def main(): async def main():
async with ( async with (
open_pikerd( open_pikerd(
loglevel=loglevel, loglevel=loglevel,
debug_mode=pdb, debug_mode=pdb,
registry_addr=reg_addr,
), # normally delivers a ``Services`` handle ), # normally delivers a ``Services`` handle
trio.open_nursery() as n, trio.open_nursery() as n,
): ):
@ -102,9 +83,9 @@ def pikerd(
) )
log.info( log.info(
f'`marketstored` up!\n' f'`marketstore` up!\n'
f'pid: {pid}\n' f'`marketstored` pid: {pid}\n'
f'container id: {cid[:12]}\n' f'docker container id: {cid}\n'
f'config: {pformat(config)}' f'config: {pformat(config)}'
) )
@ -116,46 +97,25 @@ def pikerd(
@click.group(context_settings=config._context_defaults) @click.group(context_settings=config._context_defaults)
@click.option( @click.option(
'--brokers', '-b', '--brokers', '-b',
default=None, default=[DEFAULT_BROKER],
multiple=True, multiple=True,
help='Broker backend to use' help='Broker backend to use'
) )
@click.option('--loglevel', '-l', default='warning', help='Logging level') @click.option('--loglevel', '-l', default='warning', help='Logging level')
@click.option('--tl', is_flag=True, help='Enable tractor logging') @click.option('--tl', is_flag=True, help='Enable tractor logging')
@click.option('--configdir', '-c', help='Configuration directory') @click.option('--configdir', '-c', help='Configuration directory')
@click.option('--host', '-h', default=None, help='Host addr to bind')
@click.option('--port', '-p', default=None, help='Port number to bind')
@click.pass_context @click.pass_context
def cli( def cli(ctx, brokers, loglevel, tl, configdir):
ctx: click.Context,
brokers: list[str],
loglevel: str,
tl: bool,
configdir: str,
host: str,
port: int,
) -> None:
if configdir is not None: if configdir is not None:
assert os.path.isdir(configdir), f"`{configdir}` is not a valid path" assert os.path.isdir(configdir), f"`{configdir}` is not a valid path"
config._override_config_dir(configdir) config._override_config_dir(configdir)
ctx.ensure_object(dict) ctx.ensure_object(dict)
if not brokers: if len(brokers) == 1:
# (try to) load all (supposedly) supported data/broker backends brokermods = [get_brokermod(brokers[0])]
from piker.brokers import __brokers__ else:
brokers = __brokers__ brokermods = [get_brokermod(broker) for broker in brokers]
brokermods = [get_brokermod(broker) for broker in brokers]
assert brokermods
reg_addr: None | tuple[str, int] = None
if host or port:
reg_addr = (
host or _default_registry_host,
int(port) or _default_registry_port,
)
ctx.obj.update({ ctx.obj.update({
'brokers': brokers, 'brokers': brokers,
@ -165,7 +125,6 @@ def cli(
'log': get_console_log(loglevel), 'log': get_console_log(loglevel),
'confdir': config._config_dir, 'confdir': config._config_dir,
'wl_path': config._watchlists_data_path, 'wl_path': config._watchlists_data_path,
'registry_addr': reg_addr,
}) })
# allow enabling same loglevel in ``tractor`` machinery # allow enabling same loglevel in ``tractor`` machinery
@ -175,40 +134,29 @@ def cli(
@cli.command() @cli.command()
@click.option('--tl', is_flag=True, help='Enable tractor logging') @click.option('--tl', is_flag=True, help='Enable tractor logging')
@click.argument('ports', nargs=-1, required=False) @click.argument('names', nargs=-1, required=False)
@click.pass_obj @click.pass_obj
def services(config, tl, ports): def services(config, tl, names):
from .._daemon import (
open_piker_runtime,
_default_registry_port,
_default_registry_host,
)
host = _default_registry_host
if not ports:
ports = [_default_registry_port]
async def list_services(): async def list_services():
nonlocal host
async with ( async with tractor.get_arbiter(
open_piker_runtime( *_tractor_kwargs['arbiter_addr']
name='service_query', ) as portal:
loglevel=config['loglevel'] if tl else None,
),
tractor.get_arbiter(
host=host,
port=ports[0]
) as portal
):
registry = await portal.run_from_ns('self', 'get_registry') registry = await portal.run_from_ns('self', 'get_registry')
json_d = {} json_d = {}
for key, socket in registry.items(): for key, socket in registry.items():
# name, uuid = uid
host, port = socket host, port = socket
json_d[key] = f'{host}:{port}' json_d[key] = f'{host}:{port}'
click.echo(f"{colorize_json(json_d)}") click.echo(f"{colorize_json(json_d)}")
trio.run(list_services) tractor.run(
list_services,
name='service_query',
loglevel=config['loglevel'] if tl else None,
arbiter_addr=_tractor_kwargs['arbiter_addr'],
)
def _load_clis() -> None: def _load_clis() -> None:

View File

@ -21,7 +21,6 @@ Broker configuration mgmt.
import platform import platform
import sys import sys
import os import os
from os import path
from os.path import dirname from os.path import dirname
import shutil import shutil
from typing import Optional from typing import Optional
@ -112,7 +111,6 @@ if _parent_user:
_conf_names: set[str] = { _conf_names: set[str] = {
'brokers', 'brokers',
'pps',
'trades', 'trades',
'watchlists', 'watchlists',
} }
@ -149,21 +147,19 @@ def get_conf_path(
conf_name: str = 'brokers', conf_name: str = 'brokers',
) -> str: ) -> str:
''' """Return the default config path normally under
Return the top-level default config path normally under ``~/.config/piker`` on linux.
``~/.config/piker`` on linux for a given ``conf_name``, the config
name.
Contains files such as: Contains files such as:
- brokers.toml - brokers.toml
- pp.toml
- watchlists.toml - watchlists.toml
- trades.toml
# maybe coming soon ;) # maybe coming soon ;)
- signals.toml - signals.toml
- strats.toml - strats.toml
''' """
assert conf_name in _conf_names assert conf_name in _conf_names
fn = _conf_fn_w_ext(conf_name) fn = _conf_fn_w_ext(conf_name)
return os.path.join( return os.path.join(
@ -177,7 +173,7 @@ def repodir():
Return the abspath to the repo directory. Return the abspath to the repo directory.
''' '''
dirpath = path.abspath( dirpath = os.path.abspath(
# we're 3 levels down in **this** module file # we're 3 levels down in **this** module file
dirname(dirname(os.path.realpath(__file__))) dirname(dirname(os.path.realpath(__file__)))
) )
@ -186,9 +182,7 @@ def repodir():
def load( def load(
conf_name: str = 'brokers', conf_name: str = 'brokers',
path: str = None, path: str = None
**tomlkws,
) -> (dict, str): ) -> (dict, str):
''' '''
@ -196,10 +190,6 @@ def load(
''' '''
path = path or get_conf_path(conf_name) path = path or get_conf_path(conf_name)
if not os.path.isdir(_config_dir):
os.mkdir(_config_dir)
if not os.path.isfile(path): if not os.path.isfile(path):
fn = _conf_fn_w_ext(conf_name) fn = _conf_fn_w_ext(conf_name)
@ -212,11 +202,8 @@ def load(
# if one exists. # if one exists.
if os.path.isfile(template): if os.path.isfile(template):
shutil.copyfile(template, path) shutil.copyfile(template, path)
else:
with open(path, 'r'):
pass # touch it
config = toml.load(path, **tomlkws) config = toml.load(path)
log.debug(f"Read config file {path}") log.debug(f"Read config file {path}")
return config, path return config, path
@ -225,7 +212,6 @@ def write(
config: dict, # toml config as dict config: dict, # toml config as dict
name: str = 'brokers', name: str = 'brokers',
path: str = None, path: str = None,
**toml_kwargs,
) -> None: ) -> None:
'''' ''''
@ -249,14 +235,11 @@ def write(
f"{path}" f"{path}"
) )
with open(path, 'w') as cf: with open(path, 'w') as cf:
return toml.dump( return toml.dump(config, cf)
config,
cf,
**toml_kwargs,
)
def load_accounts( def load_accounts(
providers: Optional[list[str]] = None providers: Optional[list[str]] = None
) -> bidict[str, Optional[str]]: ) -> bidict[str, Optional[str]]:

View File

@ -22,12 +22,6 @@ and storing data from your brokers as well as
sharing live streams over a network. sharing live streams over a network.
""" """
import tractor
import trio
from ..log import (
get_console_log,
)
from ._normalize import iterticks from ._normalize import iterticks
from ._sharedmem import ( from ._sharedmem import (
maybe_open_shm_array, maybe_open_shm_array,
@ -38,6 +32,7 @@ from ._sharedmem import (
) )
from .feed import ( from .feed import (
open_feed, open_feed,
_setup_persistent_brokerd,
) )
@ -49,40 +44,5 @@ __all__ = [
'attach_shm_array', 'attach_shm_array',
'open_shm_array', 'open_shm_array',
'get_shm_token', 'get_shm_token',
'_setup_persistent_brokerd',
] ]
@tractor.context
async def _setup_persistent_brokerd(
ctx: tractor.Context,
brokername: str,
) -> None:
'''
Allocate a actor-wide service nursery in ``brokerd``
such that feeds can be run in the background persistently by
the broker backend as needed.
'''
get_console_log(tractor.current_actor().loglevel)
from .feed import (
_bus,
get_feed_bus,
)
global _bus
assert not _bus
async with trio.open_nursery() as service_nursery:
# assign a nursery to the feeds bus for spawning
# background tasks from clients
get_feed_bus(brokername, service_nursery)
# unblock caller
await ctx.started()
# we pin this task to keep the feeds manager active until the
# parent actor decides to tear it down
await trio.sleep_forever()

View File

@ -37,13 +37,8 @@ from docker.models.containers import Container as DockerContainer
from docker.errors import ( from docker.errors import (
DockerException, DockerException,
APIError, APIError,
# ContainerError,
)
import requests
from requests.exceptions import (
ConnectionError,
ReadTimeout,
) )
from requests.exceptions import ConnectionError, ReadTimeout
from ..log import get_logger, get_console_log from ..log import get_logger, get_console_log
from .. import config from .. import config
@ -55,8 +50,8 @@ class DockerNotStarted(Exception):
'Prolly you dint start da daemon bruh' 'Prolly you dint start da daemon bruh'
class ApplicationLogError(Exception): class ContainerError(RuntimeError):
'App in container reported an error in logs' 'Error reported via app-container logging level'
@acm @acm
@ -101,9 +96,9 @@ async def open_docker(
# not perms? # not perms?
raise raise
# finally: finally:
# if client: if client:
# client.close() client.close()
class Container: class Container:
@ -161,7 +156,7 @@ class Container:
# print(f'level: {level}') # print(f'level: {level}')
if level in ('error', 'fatal'): if level in ('error', 'fatal'):
raise ApplicationLogError(msg) raise ContainerError(msg)
if patt in msg: if patt in msg:
return True return True
@ -190,29 +185,12 @@ class Container:
if 'is not running' in err.explanation: if 'is not running' in err.explanation:
return False return False
def hard_kill(self, start: float) -> None:
delay = time.time() - start
# get out the big guns, bc apparently marketstore
# doesn't actually know how to terminate gracefully
# :eyeroll:...
log.error(
f'SIGKILL-ing: {self.cntr.id} after {delay}s\n'
)
self.try_signal('SIGKILL')
self.cntr.wait(
timeout=3,
condition='not-running',
)
async def cancel( async def cancel(
self, self,
stop_msg: str, stop_msg: str,
hard_kill: bool = False,
) -> None: ) -> None:
cid = self.cntr.id cid = self.cntr.id
# first try a graceful cancel # first try a graceful cancel
log.cancel( log.cancel(
f'SIGINT cancelling container: {cid}\n' f'SIGINT cancelling container: {cid}\n'
@ -221,25 +199,15 @@ class Container:
self.try_signal('SIGINT') self.try_signal('SIGINT')
start = time.time() start = time.time()
for _ in range(6): for _ in range(30):
with trio.move_on_after(0.5) as cs: with trio.move_on_after(0.5) as cs:
log.cancel('polling for CNTR logs...') cs.shield = True
await self.process_logs_until(stop_msg)
try: # if we aren't cancelled on above checkpoint then we
await self.process_logs_until(stop_msg) # assume we read the expected stop msg and terminated.
except ApplicationLogError: break
hard_kill = True
else:
# if we aren't cancelled on above checkpoint then we
# assume we read the expected stop msg and
# terminated.
break
if cs.cancelled_caught:
# on timeout just try a hard kill after
# a quick container sync-wait.
hard_kill = True
try: try:
log.info(f'Polling for container shutdown:\n{cid}') log.info(f'Polling for container shutdown:\n{cid}')
@ -250,7 +218,6 @@ class Container:
condition='not-running', condition='not-running',
) )
# graceful exit if we didn't time out
break break
except ( except (
@ -262,23 +229,25 @@ class Container:
except ( except (
docker.errors.APIError, docker.errors.APIError,
ConnectionError, ConnectionError,
requests.exceptions.ConnectionError,
trio.Cancelled,
): ):
log.exception('Docker connection failure') log.exception('Docker connection failure')
self.hard_kill(start) break
raise
except trio.Cancelled:
log.exception('trio cancelled...')
self.hard_kill(start)
else: else:
hard_kill = True delay = time.time() - start
log.error(
f'Failed to kill container {cid} after {delay}s\n'
'sending SIGKILL..'
)
# get out the big guns, bc apparently marketstore
# doesn't actually know how to terminate gracefully
# :eyeroll:...
self.try_signal('SIGKILL')
self.cntr.wait(
timeout=3,
condition='not-running',
)
if hard_kill: log.cancel(f'Container stopped: {cid}')
self.hard_kill(start)
else:
log.cancel(f'Container stopped: {cid}')
@tractor.context @tractor.context
@ -320,13 +289,15 @@ async def open_ahabd(
)) ))
try: try:
# TODO: we might eventually want a proxy-style msg-prot here # TODO: we might eventually want a proxy-style msg-prot here
# to allow remote control of containers without needing # to allow remote control of containers without needing
# callers to have root perms? # callers to have root perms?
await trio.sleep_forever() await trio.sleep_forever()
finally: finally:
await cntr.cancel(stop_msg) with trio.CancelScope(shield=True):
await cntr.cancel(stop_msg)
async def start_ahab( async def start_ahab(

View File

@ -1,827 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Pre-(path)-graphics formatted x/y nd/1d rendering subsystem.
"""
from __future__ import annotations
from typing import (
Optional,
TYPE_CHECKING,
)
import msgspec
from msgspec import field
import numpy as np
from numpy.lib import recfunctions as rfn
from ._sharedmem import (
ShmArray,
)
from ._pathops import (
path_arrays_from_ohlc,
)
if TYPE_CHECKING:
from ._dataviz import (
Viz,
)
from .._profile import Profiler
class IncrementalFormatter(msgspec.Struct):
'''
Incrementally updating, pre-path-graphics tracking, formatter.
Allows tracking source data state in an updateable pre-graphics
``np.ndarray`` format (in local process memory) as well as
incrementally rendering from that format **to** 1d x/y for path
generation using ``pg.functions.arrayToQPath()``.
'''
shm: ShmArray
viz: Viz
# the value to be multiplied any any index into the x/y_1d arrays
# given the input index is based on the original source data array.
flat_index_ratio: float = 1
@property
def index_field(self) -> 'str':
'''
Value (``str``) used to look up the "index series" from the
underlying source ``numpy`` struct-array; delegate directly to
the managing ``Viz``.
'''
return self.viz.index_field
# Incrementally updated xy ndarray formatted data, a pre-1d
# format which is updated and cached independently of the final
# pre-graphics-path 1d format.
x_nd: Optional[np.ndarray] = None
y_nd: Optional[np.ndarray] = None
@property
def xy_nd(self) -> tuple[np.ndarray, np.ndarray]:
return (
self.x_nd[self.xy_slice],
self.y_nd[self.xy_slice],
)
@property
def xy_slice(self) -> slice:
return slice(
self.xy_nd_start,
self.xy_nd_stop,
)
# indexes which slice into the above arrays (which are allocated
# based on source data shm input size) and allow retrieving
# incrementally updated data.
xy_nd_start: int | None = None
xy_nd_stop: int | None = None
# TODO: eventually incrementally update 1d-pre-graphics path data?
x_1d: np.ndarray | None = None
y_1d: np.ndarray | None = None
# incremental view-change state(s) tracking
_last_vr: tuple[float, float] | None = None
_last_ivdr: tuple[float, float] | None = None
@property
def index_step_size(self) -> float:
'''
Readonly value computed on first ``.diff()`` call.
'''
return self.viz.index_step()
def diff(
self,
new_read: tuple[np.ndarray],
) -> tuple[
np.ndarray,
np.ndarray,
]:
# TODO:
# - can the renderer just call ``Viz.read()`` directly? unpack
# latest source data read
# - eventually maybe we can implement some kind of
# transform on the ``QPainterPath`` that will more or less
# detect the diff in "elements" terms? update diff state since
# we've now rendered paths.
(
xfirst,
xlast,
array,
ivl,
ivr,
in_view,
) = new_read
index = array['index']
# if the first index in the read array is 0 then
# it means the source buffer has bee completely backfilled to
# available space.
src_start = index[0]
src_stop = index[-1] + 1
# these are the "formatted output data" indices
# for the pre-graphics arrays.
nd_start = self.xy_nd_start
nd_stop = self.xy_nd_stop
if (
nd_start is None
):
assert nd_stop is None
# setup to do a prepend of all existing src history
nd_start = self.xy_nd_start = src_stop
# set us in a zero-to-append state
nd_stop = self.xy_nd_stop = src_stop
# compute the length diffs between the first/last index entry in
# the input data and the last indexes we have on record from the
# last time we updated the curve index.
prepend_length = int(nd_start - src_start)
append_length = int(src_stop - nd_stop)
# blah blah blah
# do diffing for prepend, append and last entry
return (
slice(src_start, nd_start),
prepend_length,
append_length,
slice(nd_stop, src_stop),
)
def _track_inview_range(
self,
view_range: tuple[int, int],
) -> bool:
# if a view range is passed, plan to draw the
# source ouput that's "in view" of the chart.
vl, vr = view_range
zoom_or_append = False
last_vr = self._last_vr
# incremental in-view data update.
if last_vr:
lvl, lvr = last_vr # relative slice indices
# TODO: detecting more specifically the interaction changes
# last_ivr = self._last_ivdr or (vl, vr)
# al, ar = last_ivr # abs slice indices
# left_change = abs(x_iv[0] - al) >= 1
# right_change = abs(x_iv[-1] - ar) >= 1
# likely a zoom/pan view change or data append update
if (
(vr - lvr) > 2
or vl < lvl
# append / prepend update
# we had an append update where the view range
# didn't change but the data-viewed (shifted)
# underneath, so we need to redraw.
# or left_change and right_change and last_vr == view_range
# not (left_change and right_change) and ivr
# (
# or abs(x_iv[ivr] - livr) > 1
):
zoom_or_append = True
self._last_vr = view_range
return zoom_or_append
def format_to_1d(
self,
new_read: tuple,
array_key: str,
profiler: Profiler,
slice_to_inview: bool = True,
) -> tuple[
np.ndarray,
np.ndarray,
]:
shm = self.shm
(
_,
_,
array,
ivl,
ivr,
in_view,
) = new_read
(
pre_slice,
prepend_len,
append_len,
post_slice,
) = self.diff(new_read)
# we first need to allocate xy data arrays
# from the source data.
if self.y_nd is None:
self.xy_nd_start = shm._first.value
self.xy_nd_stop = shm._last.value
self.x_nd, self.y_nd = self.allocate_xy_nd(
shm,
array_key,
)
profiler('allocated xy history')
# once allocated we do incremental pre/append
# updates from the diff with the source buffer.
else:
if prepend_len:
self.incr_update_xy_nd(
shm,
array_key,
# this is the pre-sliced, "normally expected"
# new data that an updater would normally be
# expected to process, however in some cases (like
# step curves) the updater routine may want to do
# the source history-data reading itself, so we pass
# both here.
shm._array[pre_slice],
pre_slice,
prepend_len,
self.xy_nd_start,
self.xy_nd_stop,
is_append=False,
)
self.xy_nd_start -= prepend_len
profiler('prepended xy history: {prepend_length}')
if append_len:
self.incr_update_xy_nd(
shm,
array_key,
shm._array[post_slice],
post_slice,
append_len,
self.xy_nd_start,
self.xy_nd_stop,
is_append=True,
)
self.xy_nd_stop += append_len
profiler('appened xy history: {append_length}')
# sanity
# slice_ln = post_slice.stop - post_slice.start
# assert append_len == slice_ln
view_changed: bool = False
view_range: tuple[int, int] = (ivl, ivr)
if slice_to_inview:
view_changed = self._track_inview_range(view_range)
array = in_view
profiler(f'{self.viz.name} view range slice {view_range}')
# TODO: we need to check if the last-datum-in-view is true and
# if so only slice to the 2nd last datumonly slice to the 2nd
# last datum.
# hist = array[:slice_to_head]
# XXX: WOA WTF TRACTOR DEBUGGING BUGGG
# assert 0
# xy-path data transform: convert source data to a format
# able to be passed to a `QPainterPath` rendering routine.
if not len(array):
# XXX: this might be why the profiler only has exits?
return
# TODO: hist here should be the pre-sliced
# x/y_data in the case where allocate_xy is
# defined?
x_1d, y_1d, connect = self.format_xy_nd_to_1d(
array,
array_key,
view_range,
)
# cache/save last 1d outputs for use by other
# readers (eg. `Viz.draw_last_datum()` in the
# only-draw-last-uppx case).
self.x_1d = x_1d
self.y_1d = y_1d
# app_tres = None
# if append_len:
# appended = array[-append_len-1:slice_to_head]
# app_tres = self.format_xy_nd_to_1d(
# appended,
# array_key,
# (
# view_range[1] - append_len + slice_to_head,
# view_range[1]
# ),
# )
# # assert (len(appended) - 1) == append_len
# # assert len(appended) == append_len
# print(
# f'{self.viz.name} APPEND LEN: {append_len}\n'
# f'{self.viz.name} APPENDED: {appended}\n'
# f'{self.viz.name} app_tres: {app_tres}\n'
# )
# update the last "in view data range"
if len(x_1d):
self._last_ivdr = x_1d[0], x_1d[-1]
profiler('.format_to_1d()')
return (
x_1d,
y_1d,
connect,
prepend_len,
append_len,
view_changed,
# app_tres,
)
###############################
# Sub-type override interface #
###############################
x_offset: np.ndarray = np.array([0])
# optional pre-graphics xy formatted data which
# is incrementally updated in sync with the source data.
# XXX: was ``.allocate_xy()``
def allocate_xy_nd(
self,
src_shm: ShmArray,
data_field: str,
) -> tuple[
np.ndarray, # x
np.nd.array # y
]:
'''
Convert the structured-array ``src_shm`` format to
a equivalently shaped (and field-less) ``np.ndarray``.
Eg. a 4 field x N struct-array => (N, 4)
'''
y_nd = src_shm._array[data_field].copy()
x_nd = (
src_shm._array[self.index_field].copy()
+
self.x_offset
)
return x_nd, y_nd
# XXX: was ``.update_xy()``
def incr_update_xy_nd(
self,
src_shm: ShmArray,
data_field: str,
new_from_src: np.ndarray, # portion of source that was updated
read_slc: slice,
ln: int, # len of updated
nd_start: int,
nd_stop: int,
is_append: bool,
) -> None:
# write pushed data to flattened copy
y_nd_new = new_from_src[data_field]
self.y_nd[read_slc] = y_nd_new
x_nd_new = self.x_nd[read_slc]
x_nd_new[:] = (
new_from_src[self.index_field]
+
self.x_offset
)
# x_nd = self.x_nd[self.xy_slice]
# y_nd = self.y_nd[self.xy_slice]
# name = self.viz.name
# if 'trade_rate' == name:
# s = 4
# print(
# f'{name.upper()}:\n'
# 'NEW_FROM_SRC:\n'
# f'new_from_src: {new_from_src}\n\n'
# f'PRE self.x_nd:'
# f'\n{list(x_nd[-s:])}\n'
# f'PRE self.y_nd:\n'
# f'{list(y_nd[-s:])}\n\n'
# f'TO WRITE:\n'
# f'x_nd_new:\n'
# f'{x_nd_new[0]}\n'
# f'y_nd_new:\n'
# f'{y_nd_new}\n'
# )
# XXX: was ``.format_xy()``
def format_xy_nd_to_1d(
self,
array: np.ndarray,
array_key: str,
vr: tuple[int, int],
) -> tuple[
np.ndarray, # 1d x
np.ndarray, # 1d y
np.ndarray | str, # connection array/style
]:
'''
Default xy-nd array to 1d pre-graphics-path render routine.
Return single field column data verbatim
'''
# NOTE: we don't include the very last datum which is filled in
# normally by another graphics object.
x_1d = array[self.index_field][:-1]
y_1d = array[array_key][:-1]
# name = self.viz.name
# if 'trade_rate' == name:
# s = 4
# x_nd = list(self.x_nd[self.xy_slice][-s:-1])
# y_nd = list(self.y_nd[self.xy_slice][-s:-1])
# print(
# f'{name}:\n'
# f'XY data:\n'
# f'x: {x_nd}\n'
# f'y: {y_nd}\n\n'
# f'x_1d: {list(x_1d[-s:])}\n'
# f'y_1d: {list(y_1d[-s:])}\n\n'
# )
return (
x_1d,
y_1d,
# 1d connection array or style-key to
# ``pg.functions.arrayToQPath()``
'all',
)
class OHLCBarsFmtr(IncrementalFormatter):
x_offset: np.ndarray = np.array([
-0.5,
0,
0,
0.5,
])
fields: list[str] = field(
default_factory=lambda: ['open', 'high', 'low', 'close']
)
flat_index_ratio: float = 4
def allocate_xy_nd(
self,
ohlc_shm: ShmArray,
data_field: str,
) -> tuple[
np.ndarray, # x
np.nd.array # y
]:
'''
Convert an input struct-array holding OHLC samples into a pair of
flattened x, y arrays with the same size (datums wise) as the source
data.
'''
y_nd = ohlc_shm.ustruct(self.fields)
# generate an flat-interpolated x-domain
x_nd = (
np.broadcast_to(
ohlc_shm._array[self.index_field][:, None],
(
ohlc_shm._array.size,
# 4, # only ohlc
y_nd.shape[1],
),
)
+
self.x_offset
)
assert y_nd.any()
# write pushed data to flattened copy
return (
x_nd,
y_nd,
)
def incr_update_xy_nd(
self,
src_shm: ShmArray,
data_field: str,
new_from_src: np.ndarray, # portion of source that was updated
read_slc: slice,
ln: int, # len of updated
nd_start: int,
nd_stop: int,
is_append: bool,
) -> None:
# write newly pushed data to flattened copy
# a struct-arr is always passed in.
new_y_nd = rfn.structured_to_unstructured(
new_from_src[self.fields]
)
self.y_nd[read_slc] = new_y_nd
# generate same-valued-per-row x support based on y shape
x_nd_new = self.x_nd[read_slc]
x_nd_new[:] = np.broadcast_to(
new_from_src[self.index_field][:, None],
new_y_nd.shape,
) + self.x_offset
# TODO: can we drop this frame and just use the above?
def format_xy_nd_to_1d(
self,
array: np.ndarray,
array_key: str,
vr: tuple[int, int],
start: int = 0, # XXX: do we need this?
# 0.5 is no overlap between arms, 1.0 is full overlap
w: float = 0.16,
) -> tuple[
np.ndarray,
np.ndarray,
np.ndarray,
]:
'''
More or less direct proxy to the ``numba``-fied
``path_arrays_from_ohlc()`` (above) but with closed in kwargs
for line spacing.
'''
x, y, c = path_arrays_from_ohlc(
array[:-1],
start,
bar_w=self.index_step_size,
bar_gap=w * self.index_step_size,
# XXX: don't ask, due to a ``numba`` bug..
use_time_index=(self.index_field == 'time'),
)
return x, y, c
class OHLCBarsAsCurveFmtr(OHLCBarsFmtr):
def format_xy_nd_to_1d(
self,
array: np.ndarray,
array_key: str,
vr: tuple[int, int],
) -> tuple[
np.ndarray,
np.ndarray,
str,
]:
# TODO: in the case of an existing ``.update_xy()``
# should we be passing in array as an xy arrays tuple?
# 2 more datum-indexes to capture zero at end
x_flat = self.x_nd[self.xy_nd_start:self.xy_nd_stop-1]
y_flat = self.y_nd[self.xy_nd_start:self.xy_nd_stop-1]
# slice to view
ivl, ivr = vr
x_iv_flat = x_flat[ivl:ivr]
y_iv_flat = y_flat[ivl:ivr]
# reshape to 1d for graphics rendering
y_iv = y_iv_flat.reshape(-1)
x_iv = x_iv_flat.reshape(-1)
return x_iv, y_iv, 'all'
class StepCurveFmtr(IncrementalFormatter):
x_offset: np.ndarray = np.array([
0,
1,
])
def allocate_xy_nd(
self,
shm: ShmArray,
data_field: str,
) -> tuple[
np.ndarray, # x
np.nd.array # y
]:
'''
Convert an input 1d shm array to a "step array" format
for use by path graphics generation.
'''
i = shm._array[self.index_field].copy()
out = shm._array[data_field].copy()
x_out = (
np.broadcast_to(
i[:, None],
(i.size, 2),
)
+
self.x_offset
)
# fill out Nx2 array to hold each step's left + right vertices.
y_out = np.empty(
x_out.shape,
dtype=out.dtype,
)
# fill in (current) values from source shm buffer
y_out[:] = out[:, np.newaxis]
# TODO: pretty sure we can drop this?
# start y at origin level
# y_out[0, 0] = 0
# y_out[self.xy_nd_start] = 0
return x_out, y_out
def incr_update_xy_nd(
self,
src_shm: ShmArray,
array_key: str,
new_from_src: np.ndarray, # portion of source that was updated
read_slc: slice,
ln: int, # len of updated
nd_start: int,
nd_stop: int,
is_append: bool,
) -> tuple[
np.ndarray,
slice,
]:
# NOTE: for a step curve we slice from one datum prior
# to the current "update slice" to get the previous
# "level".
#
# why this is needed,
# - the current new append slice will often have a zero
# value in the latest datum-step (at least for zero-on-new
# cases like vlm in the) as per configuration of the FSP
# engine.
# - we need to look back a datum to get the last level which
# will be used to terminate/complete the last step x-width
# which will be set to pair with the last x-index THIS MEANS
#
# XXX: this means WE CAN'T USE the append slice since we need to
# "look backward" one step to get the needed back-to-zero level
# and the update data in ``new_from_src`` will only contain the
# latest new data.
back_1 = slice(
read_slc.start - 1,
read_slc.stop,
)
to_write = src_shm._array[back_1]
y_nd_new = self.y_nd[back_1]
y_nd_new[:] = to_write[array_key][:, None]
x_nd_new = self.x_nd[read_slc]
x_nd_new[:] = (
new_from_src[self.index_field][:, None]
+
self.x_offset
)
# XXX: uncomment for debugging
# x_nd = self.x_nd[self.xy_slice]
# y_nd = self.y_nd[self.xy_slice]
# name = self.viz.name
# if 'dolla_vlm' in name:
# s = 4
# print(
# f'{name}:\n'
# 'NEW_FROM_SRC:\n'
# f'new_from_src: {new_from_src}\n\n'
# f'PRE self.x_nd:'
# f'\n{x_nd[-s:]}\n'
# f'PRE self.y_nd:\n'
# f'{y_nd[-s:]}\n\n'
# f'TO WRITE:\n'
# f'x_nd_new:\n'
# f'{x_nd_new}\n'
# f'y_nd_new:\n'
# f'{y_nd_new}\n'
# )
def format_xy_nd_to_1d(
self,
array: np.ndarray,
array_key: str,
vr: tuple[int, int],
) -> tuple[
np.ndarray,
np.ndarray,
str,
]:
last_t, last = array[-1][[self.index_field, array_key]]
start = self.xy_nd_start
stop = self.xy_nd_stop
x_step = self.x_nd[start:stop]
y_step = self.y_nd[start:stop]
# slice out in-view data
ivl, ivr = vr
# NOTE: add an extra step to get the vertical-line-down-to-zero
# adjacent to the last-datum graphic (filled rect).
x_step_iv = x_step[ivl:ivr+1]
y_step_iv = y_step[ivl:ivr+1]
# flatten to 1d
x_1d = x_step_iv.reshape(x_step_iv.size)
y_1d = y_step_iv.reshape(y_step_iv.size)
# debugging
# if y_1d.any():
# s = 6
# print(
# f'x_step_iv:\n{x_step_iv[-s:]}\n'
# f'y_step_iv:\n{y_step_iv[-s:]}\n\n'
# f'x_1d:\n{x_1d[-s:]}\n'
# f'y_1d:\n{y_1d[-s:]}\n'
# )
return x_1d, y_1d, 'all'

View File

@ -56,7 +56,7 @@ def iterticks(
sig = ( sig = (
time, time,
tick['price'], tick['price'],
tick.get('size') tick['size']
) )
if ttype == 'dark_trade': if ttype == 'dark_trade':

View File

@ -1,452 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Super fast ``QPainterPath`` generation related operator routines.
"""
from math import (
ceil,
floor,
)
import numpy as np
from numpy.lib import recfunctions as rfn
from numba import (
# types,
njit,
float64,
int64,
# optional,
)
# TODO: for ``numba`` typing..
# from ._source import numba_ohlc_dtype
from ._m4 import ds_m4
from .._profile import (
Profiler,
pg_profile_enabled,
ms_slower_then,
)
def xy_downsample(
x,
y,
uppx,
x_spacer: float = 0.5,
) -> tuple[
np.ndarray,
np.ndarray,
float,
float,
]:
'''
Downsample 1D (flat ``numpy.ndarray``) arrays using M4 given an input
``uppx`` (units-per-pixel) and add space between discreet datums.
'''
# downsample whenever more then 1 pixels per datum can be shown.
# always refresh data bounds until we get diffing
# working properly, see above..
m4_out = ds_m4(
x,
y,
uppx,
)
if m4_out is not None:
bins, x, y, ymn, ymx = m4_out
# flatten output to 1d arrays suitable for path-graphics generation.
x = np.broadcast_to(x[:, None], y.shape)
x = (x + np.array(
[-x_spacer, 0, 0, x_spacer]
)).flatten()
y = y.flatten()
return x, y, ymn, ymx
# XXX: we accept a None output for the case where the input range
# to ``ds_m4()`` is bad (-ve) and we want to catch and debug
# that (seemingly super rare) circumstance..
return None
@njit(
# NOTE: need to construct this manually for readonly
# arrays, see https://github.com/numba/numba/issues/4511
# (
# types.Array(
# numba_ohlc_dtype,
# 1,
# 'C',
# readonly=True,
# ),
# int64,
# types.unicode_type,
# optional(float64),
# ),
nogil=True
)
def path_arrays_from_ohlc(
data: np.ndarray,
start: int64,
bar_w: float64,
bar_gap: float64 = 0.16,
use_time_index: bool = True,
# XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622
# index_field: str,
) -> tuple[
np.ndarray,
np.ndarray,
np.ndarray,
]:
'''
Generate an array of lines objects from input ohlc data.
'''
size = int(data.shape[0] * 6)
# XXX: see this for why the dtype might have to be defined outside
# the routine.
# https://github.com/numba/numba/issues/4098#issuecomment-493914533
x = np.zeros(
shape=size,
dtype=float64,
)
y, c = x.copy(), x.copy()
half_w: float = bar_w/2
# TODO: report bug for assert @
# /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
for i, q in enumerate(data[start:], start):
open = q['open']
high = q['high']
low = q['low']
close = q['close']
if use_time_index:
index = float64(q['time'])
else:
index = float64(q['index'])
# XXX: ``numba`` issue: https://github.com/numba/numba/issues/8622
# index = float64(q[index_field])
# AND this (probably)
# open, high, low, close, index = q[
# ['open', 'high', 'low', 'close', 'index']]
istart = i * 6
istop = istart + 6
# x,y detail the 6 points which connect all vertexes of a ohlc bar
mid: float = index + half_w
x[istart:istop] = (
index + bar_gap,
mid,
mid,
mid,
mid,
index + bar_w - bar_gap,
)
y[istart:istop] = (
open,
open,
low,
high,
close,
close,
)
# specifies that the first edge is never connected to the
# prior bars last edge thus providing a small "gap"/"space"
# between bars determined by ``bar_gap``.
c[istart:istop] = (1, 1, 1, 1, 1, 0)
return x, y, c
def hl2mxmn(
ohlc: np.ndarray,
index_field: str = 'index',
) -> np.ndarray:
'''
Convert a OHLC struct-array containing 'high'/'low' columns
to a "joined" max/min 1-d array.
'''
index = ohlc[index_field]
hls = ohlc[[
'low',
'high',
]]
mxmn = np.empty(2*hls.size, dtype=np.float64)
x = np.empty(2*hls.size, dtype=np.float64)
trace_hl(hls, mxmn, x, index[0])
x = x + index[0]
return mxmn, x
@njit(
# TODO: the type annots..
# float64[:](float64[:],),
)
def trace_hl(
hl: 'np.ndarray',
out: np.ndarray,
x: np.ndarray,
start: int,
# the "offset" values in the x-domain which
# place the 2 output points around each ``int``
# master index.
margin: float = 0.43,
) -> None:
'''
"Trace" the outline of the high-low values of an ohlc sequence
as a line such that the maximum deviation (aka disperaion) between
bars if preserved.
This routine is expected to modify input arrays in-place.
'''
last_l = hl['low'][0]
last_h = hl['high'][0]
for i in range(hl.size):
row = hl[i]
l, h = row['low'], row['high']
up_diff = h - last_l
down_diff = last_h - l
if up_diff > down_diff:
out[2*i + 1] = h
out[2*i] = last_l
else:
out[2*i + 1] = l
out[2*i] = last_h
last_l = l
last_h = h
x[2*i] = int(i) - margin
x[2*i + 1] = int(i) + margin
return out
def ohlc_flatten(
ohlc: np.ndarray,
use_mxmn: bool = True,
index_field: str = 'index',
) -> tuple[np.ndarray, np.ndarray]:
'''
Convert an OHLCV struct-array into a flat ready-for-line-plotting
1-d array that is 4 times the size with x-domain values distributed
evenly (by 0.5 steps) over each index.
'''
index = ohlc[index_field]
if use_mxmn:
# traces a line optimally over highs to lows
# using numba. NOTE: pretty sure this is faster
# and looks about the same as the below output.
flat, x = hl2mxmn(ohlc)
else:
flat = rfn.structured_to_unstructured(
ohlc[['open', 'high', 'low', 'close']]
).flatten()
x = np.linspace(
start=index[0] - 0.5,
stop=index[-1] + 0.5,
num=len(flat),
)
return x, flat
def slice_from_time(
arr: np.ndarray,
start_t: float,
stop_t: float,
step: int | None = None,
) -> slice:
'''
Calculate array indices mapped from a time range and return them in
a slice.
Given an input array with an epoch `'time'` series entry, calculate
the indices which span the time range and return in a slice. Presume
each `'time'` step increment is uniform and when the time stamp
series contains gaps (the uniform presumption is untrue) use
``np.searchsorted()`` binary search to look up the appropriate
index.
'''
profiler = Profiler(
msg='slice_from_time()',
disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then,
)
times = arr['time']
t_first = floor(times[0])
t_last = ceil(times[-1])
# the greatest index we can return which slices to the
# end of the input array.
read_i_max = arr.shape[0]
# TODO: require this is always passed in?
if step is None:
step = round(t_last - times[-2])
if step == 0:
step = 1
# compute (presumed) uniform-time-step index offsets
i_start_t = floor(start_t)
read_i_start = floor(((i_start_t - t_first) // step)) - 1
i_stop_t = ceil(stop_t)
# XXX: edge case -> always set stop index to last in array whenever
# the input stop time is detected to be greater then the equiv time
# stamp at that last entry.
if i_stop_t >= t_last:
read_i_stop = read_i_max
else:
read_i_stop = ceil((i_stop_t - t_first) // step) + 1
# always clip outputs to array support
# for read start:
# - never allow a start < the 0 index
# - never allow an end index > the read array len
read_i_start = min(
max(0, read_i_start),
read_i_max - 1,
)
read_i_stop = max(
0,
min(read_i_stop, read_i_max),
)
# check for larger-then-latest calculated index for given start
# time, in which case we do a binary search for the correct index.
# NOTE: this is usually the result of a time series with time gaps
# where it is expected that each index step maps to a uniform step
# in the time stamp series.
t_iv_start = times[read_i_start]
if (
t_iv_start > i_start_t
):
# do a binary search for the best index mapping to ``start_t``
# given we measured an overshoot using the uniform-time-step
# calculation from above.
# TODO: once we start caching these per source-array,
# we can just overwrite ``read_i_start`` directly.
new_read_i_start = np.searchsorted(
times,
i_start_t,
side='left',
)
# TODO: minimize binary search work as much as possible:
# - cache these remap values which compensate for gaps in the
# uniform time step basis where we calc a later start
# index for the given input ``start_t``.
# - can we shorten the input search sequence by heuristic?
# up_to_arith_start = index[:read_i_start]
if (
new_read_i_start <= read_i_start
):
# t_diff = t_iv_start - start_t
# print(
# f"WE'RE CUTTING OUT TIME - STEP:{step}\n"
# f'start_t:{start_t} -> 0index start_t:{t_iv_start}\n'
# f'diff: {t_diff}\n'
# f'REMAPPED START i: {read_i_start} -> {new_read_i_start}\n'
# )
read_i_start = new_read_i_start - 1
t_iv_stop = times[read_i_stop - 1]
if (
t_iv_stop > i_stop_t
):
# t_diff = stop_t - t_iv_stop
# print(
# f"WE'RE CUTTING OUT TIME - STEP:{step}\n"
# f'calced iv stop:{t_iv_stop} -> stop_t:{stop_t}\n'
# f'diff: {t_diff}\n'
# # f'SHOULD REMAP STOP: {read_i_start} -> {new_read_i_start}\n'
# )
new_read_i_stop = np.searchsorted(
times[read_i_start:],
# times,
i_stop_t,
side='left',
)
if (
new_read_i_stop <= read_i_stop
):
read_i_stop = read_i_start + new_read_i_stop + 1
# sanity checks for range size
# samples = (i_stop_t - i_start_t) // step
# index_diff = read_i_stop - read_i_start + 1
# if index_diff > (samples + 3):
# breakpoint()
# read-relative indexes: gives a slice where `shm.array[read_slc]`
# will be the data spanning the input time range `start_t` ->
# `stop_t`
read_slc = slice(
int(read_i_start),
int(read_i_stop),
)
profiler(
'slicing complete'
# f'{start_t} -> {abs_slc.start} | {read_slc.start}\n'
# f'{stop_t} -> {abs_slc.stop} | {read_slc.stop}\n'
)
# NOTE: if caller needs absolute buffer indices they can
# slice the buffer abs index like so:
# index = arr['index']
# abs_indx = index[read_slc]
# abs_slc = slice(
# int(abs_indx[0]),
# int(abs_indx[-1]),
# )
return read_slc

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
# piker: trading gear for hackers # piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers) # Copyright (C) Tyler Goodlet (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify # This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by # it under the terms of the GNU Affero General Public License as published by
@ -27,14 +27,13 @@ from multiprocessing.shared_memory import SharedMemory, _USE_POSIX
if _USE_POSIX: if _USE_POSIX:
from _posixshmem import shm_unlink from _posixshmem import shm_unlink
# import msgspec
import numpy as np
from numpy.lib import recfunctions as rfn
import tractor import tractor
import numpy as np
from pydantic import BaseModel
from numpy.lib import recfunctions as rfn
from ..log import get_logger from ..log import get_logger
from ._source import base_iohlc_dtype from ._source import base_iohlc_dtype
from .types import Struct
log = get_logger(__name__) log = get_logger(__name__)
@ -50,11 +49,7 @@ _rt_buffer_start = int((_days_worth - 1) * _secs_in_day)
def cuckoff_mantracker(): def cuckoff_mantracker():
'''
Disable all ``multiprocessing``` "resource tracking" machinery since
it's an absolute multi-threaded mess of non-SC madness.
'''
from multiprocessing import resource_tracker as mantracker from multiprocessing import resource_tracker as mantracker
# Tell the "resource tracker" thing to fuck off. # Tell the "resource tracker" thing to fuck off.
@ -112,39 +107,36 @@ class SharedInt:
log.warning(f'Shm for {name} already unlinked?') log.warning(f'Shm for {name} already unlinked?')
class _Token(Struct, frozen=True): class _Token(BaseModel):
''' '''
Internal represenation of a shared memory "token" Internal represenation of a shared memory "token"
which can be used to key a system wide post shm entry. which can be used to key a system wide post shm entry.
''' '''
class Config:
frozen = True
shm_name: str # this servers as a "key" value shm_name: str # this servers as a "key" value
shm_first_index_name: str shm_first_index_name: str
shm_last_index_name: str shm_last_index_name: str
dtype_descr: tuple dtype_descr: tuple
size: int # in struct-array index / row terms
@property @property
def dtype(self) -> np.dtype: def dtype(self) -> np.dtype:
return np.dtype(list(map(tuple, self.dtype_descr))).descr return np.dtype(list(map(tuple, self.dtype_descr))).descr
def as_msg(self): def as_msg(self):
return self.to_dict() return self.dict()
@classmethod @classmethod
def from_msg(cls, msg: dict) -> _Token: def from_msg(cls, msg: dict) -> _Token:
if isinstance(msg, _Token): if isinstance(msg, _Token):
return msg return msg
# TODO: native struct decoding
# return _token_dec.decode(msg)
msg['dtype_descr'] = tuple(map(tuple, msg['dtype_descr'])) msg['dtype_descr'] = tuple(map(tuple, msg['dtype_descr']))
return _Token(**msg) return _Token(**msg)
# _token_dec = msgspec.msgpack.Decoder(_Token)
# TODO: this api? # TODO: this api?
# _known_tokens = tractor.ActorVar('_shm_tokens', {}) # _known_tokens = tractor.ActorVar('_shm_tokens', {})
# _known_tokens = tractor.ContextStack('_known_tokens', ) # _known_tokens = tractor.ContextStack('_known_tokens', )
@ -163,7 +155,6 @@ def get_shm_token(key: str) -> _Token:
def _make_token( def _make_token(
key: str, key: str,
size: int,
dtype: Optional[np.dtype] = None, dtype: Optional[np.dtype] = None,
) -> _Token: ) -> _Token:
''' '''
@ -176,8 +167,7 @@ def _make_token(
shm_name=key, shm_name=key,
shm_first_index_name=key + "_first", shm_first_index_name=key + "_first",
shm_last_index_name=key + "_last", shm_last_index_name=key + "_last",
dtype_descr=tuple(np.dtype(dtype).descr), dtype_descr=np.dtype(dtype).descr
size=size,
) )
@ -229,7 +219,6 @@ class ShmArray:
shm_first_index_name=self._first._shm.name, shm_first_index_name=self._first._shm.name,
shm_last_index_name=self._last._shm.name, shm_last_index_name=self._last._shm.name,
dtype_descr=tuple(self._array.dtype.descr), dtype_descr=tuple(self._array.dtype.descr),
size=self._len,
) )
@property @property
@ -444,7 +433,7 @@ class ShmArray:
def open_shm_array( def open_shm_array(
key: Optional[str] = None, key: Optional[str] = None,
size: int = _default_size, # see above size: int = _default_size,
dtype: Optional[np.dtype] = None, dtype: Optional[np.dtype] = None,
readonly: bool = False, readonly: bool = False,
@ -475,8 +464,7 @@ def open_shm_array(
token = _make_token( token = _make_token(
key=key, key=key,
size=size, dtype=dtype
dtype=dtype,
) )
# create single entry arrays for storing an first and last indices # create single entry arrays for storing an first and last indices
@ -528,15 +516,15 @@ def open_shm_array(
# "unlink" created shm on process teardown by # "unlink" created shm on process teardown by
# pushing teardown calls onto actor context stack # pushing teardown calls onto actor context stack
stack = tractor.current_actor().lifetime_stack tractor._actor._lifetime_stack.callback(shmarr.close)
stack.callback(shmarr.close) tractor._actor._lifetime_stack.callback(shmarr.destroy)
stack.callback(shmarr.destroy)
return shmarr return shmarr
def attach_shm_array( def attach_shm_array(
token: tuple[str, str, tuple[str, str]], token: tuple[str, str, tuple[str, str]],
size: int = _default_size,
readonly: bool = True, readonly: bool = True,
) -> ShmArray: ) -> ShmArray:
@ -575,7 +563,7 @@ def attach_shm_array(
raise _err raise _err
shmarr = np.ndarray( shmarr = np.ndarray(
(token.size,), (size,),
dtype=token.dtype, dtype=token.dtype,
buffer=shm.buf buffer=shm.buf
) )
@ -614,8 +602,8 @@ def attach_shm_array(
if key not in _known_tokens: if key not in _known_tokens:
_known_tokens[key] = token _known_tokens[key] = token
# "close" attached shm on actor teardown # "close" attached shm on process teardown
tractor.current_actor().lifetime_stack.callback(sha.close) tractor._actor._lifetime_stack.callback(sha.close)
return sha return sha
@ -643,7 +631,6 @@ def maybe_open_shm_array(
use ``attach_shm_array``. use ``attach_shm_array``.
''' '''
size = kwargs.pop('size', _default_size)
try: try:
# see if we already know this key # see if we already know this key
token = _known_tokens[key] token = _known_tokens[key]
@ -651,11 +638,7 @@ def maybe_open_shm_array(
except KeyError: except KeyError:
log.warning(f"Could not find {key} in shms cache") log.warning(f"Could not find {key} in shms cache")
if dtype: if dtype:
token = _make_token( token = _make_token(key, dtype)
key,
size=size,
dtype=dtype,
)
try: try:
return attach_shm_array(token=token, **kwargs), False return attach_shm_array(token=token, **kwargs), False
except FileNotFoundError: except FileNotFoundError:

View File

@ -23,8 +23,7 @@ import decimal
from bidict import bidict from bidict import bidict
import numpy as np import numpy as np
from pydantic import BaseModel
from .types import Struct
# from numba import from_dtype # from numba import from_dtype
@ -127,7 +126,7 @@ def unpack_fqsn(fqsn: str) -> tuple[str, str, str]:
) )
class Symbol(Struct): class Symbol(BaseModel):
''' '''
I guess this is some kinda container thing for dealing with I guess this is some kinda container thing for dealing with
all the different meta-data formats from brokers? all the different meta-data formats from brokers?
@ -153,7 +152,9 @@ class Symbol(Struct):
info: dict[str, Any], info: dict[str, Any],
suffix: str = '', suffix: str = '',
) -> Symbol: # XXX: like wtf..
# ) -> 'Symbol':
) -> None:
tick_size = info.get('price_tick_size', 0.01) tick_size = info.get('price_tick_size', 0.01)
lot_tick_size = info.get('lot_tick_size', 0.0) lot_tick_size = info.get('lot_tick_size', 0.0)
@ -174,7 +175,9 @@ class Symbol(Struct):
fqsn: str, fqsn: str,
info: dict[str, Any], info: dict[str, Any],
) -> Symbol: # XXX: like wtf..
# ) -> 'Symbol':
) -> None:
broker, key, suffix = unpack_fqsn(fqsn) broker, key, suffix = unpack_fqsn(fqsn)
return cls.from_broker_info( return cls.from_broker_info(
broker, broker,
@ -218,10 +221,6 @@ class Symbol(Struct):
else: else:
return (key, broker) return (key, broker)
@property
def fqsn(self) -> str:
return '.'.join(self.tokens()).lower()
def front_fqsn(self) -> str: def front_fqsn(self) -> str:
''' '''
fqsn = "fully qualified symbol name" fqsn = "fully qualified symbol name"
@ -241,7 +240,7 @@ class Symbol(Struct):
''' '''
tokens = self.tokens() tokens = self.tokens()
fqsn = '.'.join(map(str.lower, tokens)) fqsn = '.'.join(tokens)
return fqsn return fqsn
def iterfqsns(self) -> list[str]: def iterfqsns(self) -> list[str]:

View File

@ -18,24 +18,13 @@
ToOlS fOr CoPInG wITh "tHE wEB" protocols. ToOlS fOr CoPInG wITh "tHE wEB" protocols.
""" """
from contextlib import ( from contextlib import asynccontextmanager, AsyncExitStack
asynccontextmanager,
AsyncExitStack,
)
from itertools import count
from types import ModuleType from types import ModuleType
from typing import ( from typing import Any, Callable, AsyncGenerator
Any,
Optional,
Callable,
AsyncGenerator,
Iterable,
)
import json import json
import trio import trio
import trio_websocket import trio_websocket
from wsproto.utilities import LocalProtocolError
from trio_websocket._impl import ( from trio_websocket._impl import (
ConnectionClosed, ConnectionClosed,
DisconnectionTimeout, DisconnectionTimeout,
@ -46,53 +35,43 @@ from trio_websocket._impl import (
from ..log import get_logger from ..log import get_logger
from .types import Struct
log = get_logger(__name__) log = get_logger(__name__)
class NoBsWs: class NoBsWs:
''' """Make ``trio_websocket`` sockets stay up no matter the bs.
Make ``trio_websocket`` sockets stay up no matter the bs.
You can provide a ``fixture`` async-context-manager which will be """
enter/exitted around each reconnect operation.
'''
recon_errors = ( recon_errors = (
ConnectionClosed, ConnectionClosed,
DisconnectionTimeout, DisconnectionTimeout,
ConnectionRejected, ConnectionRejected,
HandshakeError, HandshakeError,
ConnectionTimeout, ConnectionTimeout,
LocalProtocolError,
) )
def __init__( def __init__(
self, self,
url: str, url: str,
token: str,
stack: AsyncExitStack, stack: AsyncExitStack,
fixture: Optional[Callable] = None, fixture: Callable,
serializer: ModuleType = json serializer: ModuleType = json,
): ):
self.url = url self.url = url
self.token = token
self.fixture = fixture self.fixture = fixture
self._stack = stack self._stack = stack
self._ws: 'WebSocketConnection' = None # noqa self._ws: 'WebSocketConnection' = None # noqa
# TODO: is there some method we can call
# on the underlying `._ws` to get this?
self._connected: bool = False
async def _connect( async def _connect(
self, self,
tries: int = 1000, tries: int = 1000,
) -> None: ) -> None:
self._connected = False
while True: while True:
try: try:
await self._stack.aclose() await self._stack.aclose()
except self.recon_errors: except (DisconnectionTimeout, RuntimeError):
await trio.sleep(0.5) await trio.sleep(0.5)
else: else:
break break
@ -103,18 +82,19 @@ class NoBsWs:
self._ws = await self._stack.enter_async_context( self._ws = await self._stack.enter_async_context(
trio_websocket.open_websocket_url(self.url) trio_websocket.open_websocket_url(self.url)
) )
# rerun user code fixture
if self.fixture is not None: if self.token == '':
# rerun user code fixture
ret = await self._stack.enter_async_context( ret = await self._stack.enter_async_context(
self.fixture(self) self.fixture(self)
) )
else:
ret = await self._stack.enter_async_context(
self.fixture(self, self.token)
)
assert ret is None assert ret is None
log.info(f'Connection success: {self.url}') log.info(f'Connection success: {self.url}')
self._connected = True
return self._ws return self._ws
except self.recon_errors as err: except self.recon_errors as err:
@ -124,15 +104,11 @@ class NoBsWs:
f'{type(err)}...retry attempt {i}' f'{type(err)}...retry attempt {i}'
) )
await trio.sleep(0.5) await trio.sleep(0.5)
self._connected = False
continue continue
else: else:
log.exception('ws connection fail...') log.exception('ws connection fail...')
raise last_err raise last_err
def connected(self) -> bool:
return self._connected
async def send_msg( async def send_msg(
self, self,
data: Any, data: Any,
@ -152,26 +128,21 @@ class NoBsWs:
except self.recon_errors: except self.recon_errors:
await self._connect() await self._connect()
def __aiter__(self):
return self
async def __anext__(self):
return await self.recv_msg()
@asynccontextmanager @asynccontextmanager
async def open_autorecon_ws( async def open_autorecon_ws(
url: str, url: str,
# TODO: proper type cannot smh # TODO: proper type annot smh
fixture: Optional[Callable] = None, fixture: Callable,
# used for authenticated websockets
token: str = '',
) -> AsyncGenerator[tuple[...], NoBsWs]: ) -> AsyncGenerator[tuple[...], NoBsWs]:
"""Apparently we can QoS for all sorts of reasons..so catch em. """Apparently we can QoS for all sorts of reasons..so catch em.
""" """
async with AsyncExitStack() as stack: async with AsyncExitStack() as stack:
ws = NoBsWs(url, stack, fixture=fixture) ws = NoBsWs(url, token, stack, fixture=fixture)
await ws._connect() await ws._connect()
try: try:
@ -179,114 +150,3 @@ async def open_autorecon_ws(
finally: finally:
await stack.aclose() await stack.aclose()
'''
JSONRPC response-request style machinery for transparent multiplexing of msgs
over a NoBsWs.
'''
class JSONRPCResult(Struct):
id: int
jsonrpc: str = '2.0'
result: Optional[dict] = None
error: Optional[dict] = None
@asynccontextmanager
async def open_jsonrpc_session(
url: str,
start_id: int = 0,
response_type: type = JSONRPCResult,
request_type: Optional[type] = None,
request_hook: Optional[Callable] = None,
error_hook: Optional[Callable] = None,
) -> Callable[[str, dict], dict]:
async with (
trio.open_nursery() as n,
open_autorecon_ws(url) as ws
):
rpc_id: Iterable = count(start_id)
rpc_results: dict[int, dict] = {}
async def json_rpc(method: str, params: dict) -> dict:
'''
perform a json rpc call and wait for the result, raise exception in
case of error field present on response
'''
msg = {
'jsonrpc': '2.0',
'id': next(rpc_id),
'method': method,
'params': params
}
_id = msg['id']
rpc_results[_id] = {
'result': None,
'event': trio.Event()
}
await ws.send_msg(msg)
await rpc_results[_id]['event'].wait()
ret = rpc_results[_id]['result']
del rpc_results[_id]
if ret.error is not None:
raise Exception(json.dumps(ret.error, indent=4))
return ret
async def recv_task():
'''
receives every ws message and stores it in its corresponding
result field, then sets the event to wakeup original sender
tasks. also recieves responses to requests originated from
the server side.
'''
async for msg in ws:
match msg:
case {
'result': _,
'id': mid,
} if res_entry := rpc_results.get(mid):
res_entry['result'] = response_type(**msg)
res_entry['event'].set()
case {
'result': _,
'id': mid,
} if not rpc_results.get(mid):
log.warning(
f'Unexpected ws msg: {json.dumps(msg, indent=4)}'
)
case {
'method': _,
'params': _,
}:
log.debug(f'Recieved\n{msg}')
if request_hook:
await request_hook(request_type(**msg))
case {
'error': error
}:
log.warning(f'Recieved\n{error}')
if error_hook:
await error_hook(response_type(**msg))
case _:
log.warning(f'Unhandled JSON-RPC msg!?\n{msg}')
n.start_soon(recv_task)
yield json_rpc
n.cancel_scope.cancel()

File diff suppressed because it is too large Load Diff

View File

@ -1,210 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
abstractions for organizing, managing and generally operating-on
real-time data processing data-structures.
"Streams, flumes, cascades and flows.."
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
)
import tractor
import pendulum
import numpy as np
from .types import Struct
from ._source import (
Symbol,
)
from ._sharedmem import (
attach_shm_array,
ShmArray,
_Token,
)
# from .._profile import (
# Profiler,
# pg_profile_enabled,
# )
if TYPE_CHECKING:
# from pyqtgraph import PlotItem
from .feed import Feed
# TODO: ideas for further abstractions as per
# https://github.com/pikers/piker/issues/216 and
# https://github.com/pikers/piker/issues/270:
# - a ``Cascade`` would be the minimal "connection" of 2 ``Flumes``
# as per circuit parlance:
# https://en.wikipedia.org/wiki/Two-port_network#Cascade_connection
# - could cover the combination of our `FspAdmin` and the
# backend `.fsp._engine` related machinery to "connect" one flume
# to another?
# - a (financial signal) ``Flow`` would be the a "collection" of such
# minmial cascades. Some engineering based jargon concepts:
# - https://en.wikipedia.org/wiki/Signal_chain
# - https://en.wikipedia.org/wiki/Daisy_chain_(electrical_engineering)
# - https://en.wikipedia.org/wiki/Audio_signal_flow
# - https://en.wikipedia.org/wiki/Digital_signal_processing#Implementation
# - https://en.wikipedia.org/wiki/Dataflow_programming
# - https://en.wikipedia.org/wiki/Signal_programming
# - https://en.wikipedia.org/wiki/Incremental_computing
class Flume(Struct):
'''
Composite reference type which points to all the addressing handles
and other meta-data necessary for the read, measure and management
of a set of real-time updated data flows.
Can be thought of as a "flow descriptor" or "flow frame" which
describes the high level properties of a set of data flows that can
be used seamlessly across process-memory boundaries.
Each instance's sub-components normally includes:
- a msg oriented quote stream provided via an IPC transport
- history and real-time shm buffers which are both real-time
updated and backfilled.
- associated startup indexing information related to both buffer
real-time-append and historical prepend addresses.
- low level APIs to read and measure the updated data and manage
queuing properties.
'''
symbol: Symbol
first_quote: dict
_rt_shm_token: _Token
# optional since some data flows won't have a "downsampled" history
# buffer/stream (eg. FSPs).
_hist_shm_token: _Token | None = None
# private shm refs loaded dynamically from tokens
_hist_shm: ShmArray | None = None
_rt_shm: ShmArray | None = None
stream: tractor.MsgStream | None = None
izero_hist: int = 0
izero_rt: int = 0
throttle_rate: int | None = None
# TODO: do we need this really if we can pull the `Portal` from
# ``tractor``'s internals?
feed: Feed | None = None
@property
def rt_shm(self) -> ShmArray:
if self._rt_shm is None:
self._rt_shm = attach_shm_array(
token=self._rt_shm_token,
readonly=True,
)
return self._rt_shm
@property
def hist_shm(self) -> ShmArray:
if self._hist_shm_token is None:
raise RuntimeError(
'No shm token has been set for the history buffer?'
)
if (
self._hist_shm is None
):
self._hist_shm = attach_shm_array(
token=self._hist_shm_token,
readonly=True,
)
return self._hist_shm
async def receive(self) -> dict:
return await self.stream.receive()
def get_ds_info(
self,
) -> tuple[float, float, float]:
'''
Compute the "downsampling" ratio info between the historical shm
buffer and the real-time (HFT) one.
Return a tuple of the fast sample period, historical sample
period and ratio between them.
'''
times = self.hist_shm.array['time']
end = pendulum.from_timestamp(times[-1])
start = pendulum.from_timestamp(times[times != times[-1]][-1])
hist_step_size_s = (end - start).seconds
times = self.rt_shm.array['time']
end = pendulum.from_timestamp(times[-1])
start = pendulum.from_timestamp(times[times != times[-1]][-1])
rt_step_size_s = (end - start).seconds
ratio = hist_step_size_s / rt_step_size_s
return (
rt_step_size_s,
hist_step_size_s,
ratio,
)
# TODO: get native msgspec decoding for these workinn
def to_msg(self) -> dict:
msg = self.to_dict()
msg['symbol'] = msg['symbol'].to_dict()
# can't serialize the stream or feed objects, it's expected
# you'll have a ref to it since this msg should be rxed on
# a stream on whatever far end IPC..
msg.pop('stream')
msg.pop('feed')
return msg
@classmethod
def from_msg(cls, msg: dict) -> dict:
symbol = Symbol(**msg.pop('symbol'))
return cls(
symbol=symbol,
**msg,
)
def get_index(
self,
time_s: float,
array: np.ndarray,
) -> int | float:
'''
Return array shm-buffer index for for epoch time.
'''
times = array['time']
first = np.searchsorted(
times,
time_s,
side='left',
)
imx = times.shape[0] - 1
return min(first, imx)

View File

@ -37,8 +37,8 @@ import time
from math import isnan from math import isnan
from bidict import bidict from bidict import bidict
from msgspec.msgpack import encode, decode import msgpack
# import pyqtgraph as pg import pyqtgraph as pg
import numpy as np import numpy as np
import tractor import tractor
from trio_websocket import open_websocket_url from trio_websocket import open_websocket_url
@ -56,7 +56,6 @@ if TYPE_CHECKING:
from .feed import maybe_open_feed from .feed import maybe_open_feed
from ..log import get_logger, get_console_log from ..log import get_logger, get_console_log
from .._profile import Profiler
log = get_logger(__name__) log = get_logger(__name__)
@ -132,10 +131,7 @@ def start_marketstore(
mktsdir = os.path.join(config._config_dir, 'marketstore') mktsdir = os.path.join(config._config_dir, 'marketstore')
# create dirs when dne # create when dne
if not os.path.isdir(config._config_dir):
os.mkdir(config._config_dir)
if not os.path.isdir(mktsdir): if not os.path.isdir(mktsdir):
os.mkdir(mktsdir) os.mkdir(mktsdir)
@ -391,54 +387,50 @@ class Storage:
async def load( async def load(
self, self,
fqsn: str, fqsn: str,
timeframe: int,
) -> tuple[ ) -> tuple[
np.ndarray, # timeframe sampled array-series dict[int, np.ndarray], # timeframe (in secs) to series
Optional[datetime], # first dt Optional[datetime], # first dt
Optional[datetime], # last dt Optional[datetime], # last dt
]: ]:
first_tsdb_dt, last_tsdb_dt = None, None first_tsdb_dt, last_tsdb_dt = None, None
hist = await self.read_ohlcv( tsdb_arrays = await self.read_ohlcv(
fqsn, fqsn,
# on first load we don't need to pull the max # on first load we don't need to pull the max
# history per request size worth. # history per request size worth.
limit=3000, limit=3000,
timeframe=timeframe,
) )
log.info(f'Loaded tsdb history {hist}') log.info(f'Loaded tsdb history {tsdb_arrays}')
if len(hist): if tsdb_arrays:
times = hist['Epoch'] fastest = list(tsdb_arrays.values())[0]
times = fastest['Epoch']
first, last = times[0], times[-1] first, last = times[0], times[-1]
first_tsdb_dt, last_tsdb_dt = map( first_tsdb_dt, last_tsdb_dt = map(
pendulum.from_timestamp, [first, last] pendulum.from_timestamp, [first, last]
) )
return ( return tsdb_arrays, first_tsdb_dt, last_tsdb_dt
hist, # array-data
first_tsdb_dt, # start of query-frame
last_tsdb_dt, # most recent
)
async def read_ohlcv( async def read_ohlcv(
self, self,
fqsn: str, fqsn: str,
timeframe: int | str, timeframe: Optional[Union[int, str]] = None,
end: Optional[int] = None, end: Optional[int] = None,
limit: int = int(800e3), limit: int = int(800e3),
) -> np.ndarray: ) -> tuple[
MarketstoreClient,
Union[dict, np.ndarray]
]:
client = self.client client = self.client
syms = await client.list_symbols() syms = await client.list_symbols()
if fqsn not in syms: if fqsn not in syms:
return {} return {}
# use the provided timeframe or 1s by default tfstr = tf_in_1s[1]
tfstr = tf_in_1s.get(timeframe, tf_in_1s[1])
params = Params( params = Params(
symbols=fqsn, symbols=fqsn,
@ -452,72 +444,58 @@ class Storage:
limit=limit, limit=limit,
) )
try: if timeframe is None:
log.info(f'starting {fqsn} tsdb granularity scan..')
# loop through and try to find highest granularity
for tfstr in tf_in_1s.values():
try:
log.info(f'querying for {tfstr}@{fqsn}')
params.set('timeframe', tfstr)
result = await client.query(params)
break
except purerpc.grpclib.exceptions.UnknownError:
# XXX: this is already logged by the container and
# thus shows up through `marketstored` logs relay.
# log.warning(f'{tfstr}@{fqsn} not found')
continue
else:
return {}
else:
result = await client.query(params) result = await client.query(params)
except purerpc.grpclib.exceptions.UnknownError as err:
# indicate there is no history for this timeframe
log.exception(
f'Unknown mkts QUERY error: {params}\n'
f'{err.args}'
)
return {}
# TODO: it turns out column access on recarrays is actually slower: # TODO: it turns out column access on recarrays is actually slower:
# https://jakevdp.github.io/PythonDataScienceHandbook/02.09-structured-data-numpy.html#RecordArrays:-Structured-Arrays-with-a-Twist # https://jakevdp.github.io/PythonDataScienceHandbook/02.09-structured-data-numpy.html#RecordArrays:-Structured-Arrays-with-a-Twist
# it might make sense to make these structured arrays? # it might make sense to make these structured arrays?
data_set = result.by_symbols()[fqsn] # Fill out a `numpy` array-results map
array = data_set.array arrays = {}
for fqsn, data_set in result.by_symbols().items():
arrays.setdefault(fqsn, {})[
tf_in_1s.inverse[data_set.timeframe]
] = data_set.array
# XXX: ensure sample rate is as expected return arrays[fqsn][timeframe] if timeframe else arrays[fqsn]
time = data_set.array['Epoch']
if len(time) > 1:
time_step = time[-1] - time[-2]
ts = tf_in_1s.inverse[data_set.timeframe]
if time_step != ts:
log.warning(
f'MKTS BUG: wrong timeframe loaded: {time_step}'
'YOUR DATABASE LIKELY CONTAINS BAD DATA FROM AN OLD BUG'
f'WIPING HISTORY FOR {ts}s'
)
await self.delete_ts(fqsn, timeframe)
# try reading again..
return await self.read_ohlcv(
fqsn,
timeframe,
end,
limit,
)
return array
async def delete_ts( async def delete_ts(
self, self,
key: str, key: str,
timeframe: Optional[Union[int, str]] = None, timeframe: Optional[Union[int, str]] = None,
fmt: str = 'OHLCV',
) -> bool: ) -> bool:
client = self.client client = self.client
syms = await client.list_symbols() syms = await client.list_symbols()
print(syms) print(syms)
if key not in syms: # if key not in syms:
raise KeyError(f'`{key}` table key not found in\n{syms}?') # raise KeyError(f'`{fqsn}` table key not found?')
tbk = mk_tbk(( return await client.destroy(tbk=key)
key,
tf_in_1s.get(timeframe, tf_in_1s[60]),
fmt,
))
return await client.destroy(tbk=tbk)
async def write_ohlcv( async def write_ohlcv(
self, self,
fqsn: str, fqsn: str,
ohlcv: np.ndarray, ohlcv: np.ndarray,
timeframe: int,
append_and_duplicate: bool = True, append_and_duplicate: bool = True,
limit: int = int(800e3), limit: int = int(800e3),
@ -541,18 +519,17 @@ class Storage:
m, r = divmod(len(mkts_array), limit) m, r = divmod(len(mkts_array), limit)
tfkey = tf_in_1s[timeframe]
for i in range(m, 1): for i in range(m, 1):
to_push = mkts_array[i-1:i*limit] to_push = mkts_array[i-1:i*limit]
# write to db # write to db
resp = await self.client.write( resp = await self.client.write(
to_push, to_push,
tbk=f'{fqsn}/{tfkey}/OHLCV', tbk=f'{fqsn}/1Sec/OHLCV',
# NOTE: will will append duplicates # NOTE: will will append duplicates
# for the same timestamp-index. # for the same timestamp-index.
# TODO: pre-deduplicate? # TODO: pre deduplicate?
isvariablelength=append_and_duplicate, isvariablelength=append_and_duplicate,
) )
@ -571,7 +548,7 @@ class Storage:
# write to db # write to db
resp = await self.client.write( resp = await self.client.write(
to_push, to_push,
tbk=f'{fqsn}/{tfkey}/OHLCV', tbk=f'{fqsn}/1Sec/OHLCV',
# NOTE: will will append duplicates # NOTE: will will append duplicates
# for the same timestamp-index. # for the same timestamp-index.
@ -600,7 +577,6 @@ class Storage:
# def delete_range(self, start_dt, end_dt) -> None: # def delete_range(self, start_dt, end_dt) -> None:
# ... # ...
@acm @acm
async def open_storage_client( async def open_storage_client(
fqsn: str, fqsn: str,
@ -650,7 +626,7 @@ async def tsdb_history_update(
# * the original data feed arch blurb: # * the original data feed arch blurb:
# - https://github.com/pikers/piker/issues/98 # - https://github.com/pikers/piker/issues/98
# #
profiler = Profiler( profiler = pg.debug.Profiler(
disabled=False, # not pg_profile_enabled(), disabled=False, # not pg_profile_enabled(),
delayed=False, delayed=False,
) )
@ -662,35 +638,34 @@ async def tsdb_history_update(
[fqsn], [fqsn],
start_stream=False, start_stream=False,
) as feed, ) as (feed, stream),
): ):
profiler(f'opened feed for {fqsn}') profiler(f'opened feed for {fqsn}')
# to_append = feed.hist_shm.array to_append = feed.shm.array
# to_prepend = None to_prepend = None
if fqsn: if fqsn:
flume = feed.flumes[fqsn] symbol = feed.symbols.get(fqsn)
symbol = flume.symbol
if symbol: if symbol:
fqsn = symbol.fqsn fqsn = symbol.front_fqsn()
# diff db history with shm and only write the missing portions # diff db history with shm and only write the missing portions
# ohlcv = flume.hist_shm.array ohlcv = feed.shm.array
# TODO: use pg profiler # TODO: use pg profiler
# for secs in (1, 60): tsdb_arrays = await storage.read_ohlcv(fqsn)
# tsdb_array = await storage.read_ohlcv( # hist diffing
# fqsn, if tsdb_arrays:
# timeframe=timeframe, for secs in (1, 60):
# ) ts = tsdb_arrays.get(secs)
# # hist diffing: if ts is not None and len(ts):
# # these aren't currently used but can be referenced from # these aren't currently used but can be referenced from
# # within the embedded ipython shell below. # within the embedded ipython shell below.
# to_append = ohlcv[ohlcv['time'] > ts['Epoch'][-1]] to_append = ohlcv[ohlcv['time'] > ts['Epoch'][-1]]
# to_prepend = ohlcv[ohlcv['time'] < ts['Epoch'][0]] to_prepend = ohlcv[ohlcv['time'] < ts['Epoch'][0]]
# profiler('Finished db arrays diffs') profiler('Finished db arrays diffs')
syms = await storage.client.list_symbols() syms = await storage.client.list_symbols()
log.info(f'Existing tsdb symbol set:\n{pformat(syms)}') log.info(f'Existing tsdb symbol set:\n{pformat(syms)}')
@ -799,13 +774,12 @@ async def stream_quotes(
async with open_websocket_url(f'ws://{host}:{port}/ws') as ws: async with open_websocket_url(f'ws://{host}:{port}/ws') as ws:
# send subs topics to server # send subs topics to server
resp = await ws.send_message( resp = await ws.send_message(
msgpack.dumps({'streams': list(tbks.values())})
encode({'streams': list(tbks.values())})
) )
log.info(resp) log.info(resp)
async def recv() -> dict[str, Any]: async def recv() -> dict[str, Any]:
return decode((await ws.get_message()), encoding='utf-8') return msgpack.loads((await ws.get_message()), encoding='utf-8')
streams = (await recv())['streams'] streams = (await recv())['streams']
log.info(f"Subscribed to {streams}") log.info(f"Subscribed to {streams}")

View File

@ -1,88 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Guillermo Rodriguez (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Built-in (extension) types.
"""
import sys
from typing import Optional
from pprint import pformat
import msgspec
class Struct(
msgspec.Struct,
# https://jcristharif.com/msgspec/structs.html#tagged-unions
# tag='pikerstruct',
# tag=True,
):
'''
A "human friendlier" (aka repl buddy) struct subtype.
'''
def to_dict(self) -> dict:
return {
f: getattr(self, f)
for f in self.__struct_fields__
}
# Lul, doesn't seem to work that well..
# def __repr__(self):
# # only turn on pprint when we detect a python REPL
# # at runtime B)
# if (
# hasattr(sys, 'ps1')
# # TODO: check if we're in pdb
# ):
# return self.pformat()
# return super().__repr__()
def pformat(self) -> str:
return f'Struct({pformat(self.to_dict())})'
def copy(
self,
update: Optional[dict] = None,
) -> msgspec.Struct:
'''
Validate-typecast all self defined fields, return a copy of us
with all such fields.
This is kinda like the default behaviour in `pydantic.BaseModel`.
'''
if update:
for k, v in update.items():
setattr(self, k, v)
# roundtrip serialize to validate
return msgspec.msgpack.Decoder(
type=type(self)
).decode(
msgspec.msgpack.Encoder().encode(self)
)
def typecast(
self,
# fields: Optional[list[str]] = None,
) -> None:
for fname, ftype in self.__annotations__.items():
setattr(self, fname, ftype(getattr(self, fname)))

View File

@ -78,8 +78,7 @@ class Fsp:
# + the consuming fsp *to* the consumers output # + the consuming fsp *to* the consumers output
# shm flow. # shm flow.
_flow_registry: dict[ _flow_registry: dict[
tuple[_Token, str], tuple[_Token, str], _Token,
tuple[_Token, Optional[ShmArray]],
] = {} ] = {}
def __init__( def __init__(
@ -121,6 +120,7 @@ class Fsp:
): ):
return self.func(*args, **kwargs) return self.func(*args, **kwargs)
# TODO: lru_cache this? prettty sure it'll work?
def get_shm( def get_shm(
self, self,
src_shm: ShmArray, src_shm: ShmArray,
@ -131,27 +131,12 @@ class Fsp:
for this "instance" of a signal processor for for this "instance" of a signal processor for
the given ``key``. the given ``key``.
The destination shm "token" and array are cached if possible to
minimize multiple stdlib/system calls.
''' '''
dst_token, maybe_array = self._flow_registry[ dst_token = self._flow_registry[
(src_shm._token, self.name) (src_shm._token, self.name)
] ]
if maybe_array is None: shm = attach_shm_array(dst_token)
self._flow_registry[ return shm
(src_shm._token, self.name)
] = (
dst_token,
# "cache" the ``ShmArray`` such that
# we call the underlying "attach" code as few
# times as possible as per:
# - https://github.com/pikers/piker/issues/359
# - https://github.com/pikers/piker/issues/332
maybe_array := attach_shm_array(dst_token)
)
return maybe_array
def fsp( def fsp(
@ -199,10 +184,7 @@ def maybe_mk_fsp_shm(
# TODO: load output types from `Fsp` # TODO: load output types from `Fsp`
# - should `index` be a required internal field? # - should `index` be a required internal field?
fsp_dtype = np.dtype( fsp_dtype = np.dtype(
[('index', int)] [('index', int)] +
+
[('time', float)]
+
[(field_name, float) for field_name in target.outputs] [(field_name, float) for field_name in target.outputs]
) )

View File

@ -21,13 +21,12 @@ core task logic for processing chains
from dataclasses import dataclass from dataclasses import dataclass
from functools import partial from functools import partial
from typing import ( from typing import (
AsyncIterator, AsyncIterator, Callable, Optional,
Callable,
Optional,
Union, Union,
) )
import numpy as np import numpy as np
import pyqtgraph as pg
import trio import trio
from trio_typing import TaskStatus from trio_typing import TaskStatus
import tractor import tractor
@ -36,22 +35,14 @@ from tractor.msg import NamespacePath
from ..log import get_logger, get_console_log from ..log import get_logger, get_console_log
from .. import data from .. import data
from ..data import attach_shm_array from ..data import attach_shm_array
from ..data.feed import ( from ..data.feed import Feed
Flume,
Feed,
)
from ..data._sharedmem import ShmArray from ..data._sharedmem import ShmArray
from ..data._sampling import (
_default_delay_s,
open_sample_stream,
)
from ..data._source import Symbol from ..data._source import Symbol
from ._api import ( from ._api import (
Fsp, Fsp,
_load_builtins, _load_builtins,
_Token, _Token,
) )
from .._profile import Profiler
log = get_logger(__name__) log = get_logger(__name__)
@ -86,7 +77,7 @@ async def filter_quotes_by_sym(
async def fsp_compute( async def fsp_compute(
symbol: Symbol, symbol: Symbol,
flume: Flume, feed: Feed,
quote_stream: trio.abc.ReceiveChannel, quote_stream: trio.abc.ReceiveChannel,
src: ShmArray, src: ShmArray,
@ -99,7 +90,7 @@ async def fsp_compute(
) -> None: ) -> None:
profiler = Profiler( profiler = pg.debug.Profiler(
delayed=False, delayed=False,
disabled=True disabled=True
) )
@ -114,17 +105,16 @@ async def fsp_compute(
filter_quotes_by_sym(fqsn, quote_stream), filter_quotes_by_sym(fqsn, quote_stream),
# XXX: currently the ``ohlcv`` arg # XXX: currently the ``ohlcv`` arg
flume.rt_shm, feed.shm,
) )
# HISTORY COMPUTE PHASE # Conduct a single iteration of fsp with historical bars input
# conduct a single iteration of fsp with historical bars input # and get historical output
# and get historical output.
history_output: Union[ history_output: Union[
dict[str, np.ndarray], # multi-output case dict[str, np.ndarray], # multi-output case
np.ndarray, # single output case np.ndarray, # single output case
] ]
history_output = await anext(out_stream) history_output = await out_stream.__anext__()
func_name = func.__name__ func_name = func.__name__
profiler(f'{func_name} generated history') profiler(f'{func_name} generated history')
@ -136,13 +126,9 @@ async def fsp_compute(
# each respective field. # each respective field.
fields = getattr(dst.array.dtype, 'fields', None).copy() fields = getattr(dst.array.dtype, 'fields', None).copy()
fields.pop('index') fields.pop('index')
history_by_field: Optional[np.ndarray] = None history: Optional[np.ndarray] = None # TODO: nptyping here!
src_time = src.array['time']
if ( if fields and len(fields) > 1 and fields:
fields and
len(fields) > 1
):
if not isinstance(history_output, dict): if not isinstance(history_output, dict):
raise ValueError( raise ValueError(
f'`{func_name}` is a multi-output FSP and should yield a ' f'`{func_name}` is a multi-output FSP and should yield a '
@ -153,7 +139,7 @@ async def fsp_compute(
if key in history_output: if key in history_output:
output = history_output[key] output = history_output[key]
if history_by_field is None: if history is None:
if output is None: if output is None:
length = len(src.array) length = len(src.array)
@ -163,7 +149,7 @@ async def fsp_compute(
# using the first output, determine # using the first output, determine
# the length of the struct-array that # the length of the struct-array that
# will be pushed to shm. # will be pushed to shm.
history_by_field = np.zeros( history = np.zeros(
length, length,
dtype=dst.array.dtype dtype=dst.array.dtype
) )
@ -171,7 +157,7 @@ async def fsp_compute(
if output is None: if output is None:
continue continue
history_by_field[key] = output history[key] = output
# single-key output stream # single-key output stream
else: else:
@ -180,15 +166,11 @@ async def fsp_compute(
f'`{func_name}` is a single output FSP and should yield an ' f'`{func_name}` is a single output FSP and should yield an '
'`np.ndarray` for history' '`np.ndarray` for history'
) )
history_by_field = np.zeros( history = np.zeros(
len(history_output), len(history_output),
dtype=dst.array.dtype dtype=dst.array.dtype
) )
history_by_field[func_name] = history_output history[func_name] = history_output
history_by_field['time'] = src_time[-len(history_by_field):]
history_output['time'] = src.array['time']
# TODO: XXX: # TODO: XXX:
# THERE'S A BIG BUG HERE WITH THE `index` field since we're # THERE'S A BIG BUG HERE WITH THE `index` field since we're
@ -205,10 +187,7 @@ async def fsp_compute(
# TODO: can we use this `start` flag instead of the manual # TODO: can we use this `start` flag instead of the manual
# setting above? # setting above?
index = dst.push( index = dst.push(history, start=first)
history_by_field,
start=first,
)
profiler(f'{func_name} pushed history') profiler(f'{func_name} pushed history')
profiler.finish() profiler.finish()
@ -234,14 +213,8 @@ async def fsp_compute(
log.debug(f"{func_name}: {processed}") log.debug(f"{func_name}: {processed}")
key, output = processed key, output = processed
# dst.array[-1][key] = output index = src.index
dst.array[[key, 'time']][-1] = ( dst.array[-1][key] = output
output,
# TODO: what about pushing ``time.time_ns()``
# in which case we'll need to round at the graphics
# processing / sampling layer?
src.array[-1]['time']
)
# NOTE: for now we aren't streaming this to the consumer # NOTE: for now we aren't streaming this to the consumer
# stream latest array index entry which basically just acts # stream latest array index entry which basically just acts
@ -252,7 +225,6 @@ async def fsp_compute(
# N-consumers who subscribe for the real-time output, # N-consumers who subscribe for the real-time output,
# which we'll likely want to implement using local-mem # which we'll likely want to implement using local-mem
# chans for the fan out? # chans for the fan out?
# index = src.index
# if attach_stream: # if attach_stream:
# await client_stream.send(index) # await client_stream.send(index)
@ -289,7 +261,7 @@ async def cascade(
destination shm array buffer. destination shm array buffer.
''' '''
profiler = Profiler( profiler = pg.debug.Profiler(
delayed=False, delayed=False,
disabled=False disabled=False
) )
@ -312,10 +284,9 @@ async def cascade(
# TODO: ugh i hate this wind/unwind to list over the wire # TODO: ugh i hate this wind/unwind to list over the wire
# but not sure how else to do it. # but not sure how else to do it.
for (token, fsp_name, dst_token) in shm_registry: for (token, fsp_name, dst_token) in shm_registry:
Fsp._flow_registry[( Fsp._flow_registry[
_Token.from_msg(token), (_Token.from_msg(token), fsp_name)
fsp_name, ] = _Token.from_msg(dst_token)
)] = _Token.from_msg(dst_token), None
fsp: Fsp = reg.get( fsp: Fsp = reg.get(
NamespacePath(ns_path) NamespacePath(ns_path)
@ -327,7 +298,6 @@ async def cascade(
raise ValueError(f'Unknown fsp target: {ns_path}') raise ValueError(f'Unknown fsp target: {ns_path}')
# open a data feed stream with requested broker # open a data feed stream with requested broker
feed: Feed
async with data.feed.maybe_open_feed( async with data.feed.maybe_open_feed(
[fqsn], [fqsn],
@ -337,13 +307,14 @@ async def cascade(
# needs to get throttled the ticks we generate. # needs to get throttled the ticks we generate.
# tick_throttle=60, # tick_throttle=60,
) as feed: ) as (feed, quote_stream):
symbol = feed.symbols[fqsn]
flume = feed.flumes[fqsn]
symbol = flume.symbol
assert src.token == flume.rt_shm.token
profiler(f'{func}: feed up') profiler(f'{func}: feed up')
assert src.token == feed.shm.token
# last_len = new_len = len(src.array)
func_name = func.__name__ func_name = func.__name__
async with ( async with (
trio.open_nursery() as n, trio.open_nursery() as n,
@ -353,8 +324,8 @@ async def cascade(
fsp_compute, fsp_compute,
symbol=symbol, symbol=symbol,
flume=flume, feed=feed,
quote_stream=flume.stream, quote_stream=quote_stream,
# shm # shm
src=src, src=src,
@ -390,7 +361,7 @@ async def cascade(
) -> tuple[TaskTracker, int]: ) -> tuple[TaskTracker, int]:
# TODO: adopt an incremental update engine/approach # TODO: adopt an incremental update engine/approach
# where possible here eventually! # where possible here eventually!
log.info(f're-syncing fsp {func_name} to source') log.debug(f're-syncing fsp {func_name} to source')
tracker.cs.cancel() tracker.cs.cancel()
await tracker.complete.wait() await tracker.complete.wait()
tracker, index = await n.start(fsp_target) tracker, index = await n.start(fsp_target)
@ -403,16 +374,14 @@ async def cascade(
'key': dst_shm_token, 'key': dst_shm_token,
'first': dst._first.value, 'first': dst._first.value,
'last': dst._last.value, 'last': dst._last.value,
} }})
})
return tracker, index return tracker, index
def is_synced( def is_synced(
src: ShmArray, src: ShmArray,
dst: ShmArray dst: ShmArray
) -> tuple[bool, int, int]: ) -> tuple[bool, int, int]:
''' '''Predicate to dertmine if a destination FSP
Predicate to dertmine if a destination FSP
output array is aligned to its source array. output array is aligned to its source array.
''' '''
@ -421,15 +390,16 @@ async def cascade(
return not ( return not (
# the source is likely backfilling and we must # the source is likely backfilling and we must
# sync history calculations # sync history calculations
len_diff > 2 len_diff > 2 or
# we aren't step synced to the source and may be # we aren't step synced to the source and may be
# leading/lagging by a step # leading/lagging by a step
or step_diff > 1 step_diff > 1 or
or step_diff < 0 step_diff < 0
), step_diff, len_diff ), step_diff, len_diff
async def poll_and_sync_to_step( async def poll_and_sync_to_step(
tracker: TaskTracker, tracker: TaskTracker,
src: ShmArray, src: ShmArray,
dst: ShmArray, dst: ShmArray,
@ -448,23 +418,18 @@ async def cascade(
# detect sample period step for subscription to increment # detect sample period step for subscription to increment
# signal # signal
times = src.array['time'] times = src.array['time']
if len(times) > 1: delay_s = times[-1] - times[times != times[-1]][-1]
last_ts = times[-1]
delay_s = float(last_ts - times[times != last_ts][-1])
else:
# our default "HFT" sample rate.
delay_s = _default_delay_s
# sub and increment the underlying shared memory buffer # Increment the underlying shared memory buffer on every
# on every step msg received from the global `samplerd` # "increment" msg received from the underlying data feed.
# service. async with feed.index_stream(
async with open_sample_stream(float(delay_s)) as istream: int(delay_s)
) as istream:
profiler(f'{func_name}: sample stream up') profiler(f'{func_name}: sample stream up')
profiler.finish() profiler.finish()
async for i in istream: async for _ in istream:
# print(f'FSP incrementing {i}')
# respawn the compute task if the source # respawn the compute task if the source
# array has been updated such that we compute # array has been updated such that we compute
@ -493,23 +458,3 @@ async def cascade(
last = array[-1:].copy() last = array[-1:].copy()
dst.push(last) dst.push(last)
# sync with source buffer's time step
src_l2 = src.array[-2:]
src_li, src_lt = src_l2[-1][['index', 'time']]
src_2li, src_2lt = src_l2[-2][['index', 'time']]
dst._array['time'][src_li] = src_lt
dst._array['time'][src_2li] = src_2lt
# last2 = dst.array[-2:]
# if (
# last2[-1]['index'] != src_li
# or last2[-2]['index'] != src_2li
# ):
# dstl2 = list(last2)
# srcl2 = list(src_l2)
# print(
# # f'{dst.token}\n'
# f'src: {srcl2}\n'
# f'dst: {dstl2}\n'
# )

View File

@ -234,7 +234,7 @@ async def flow_rates(
# FSPs, user input, and possibly any general event stream in # FSPs, user input, and possibly any general event stream in
# real-time. Hint: ideally implemented with caching until mutated # real-time. Hint: ideally implemented with caching until mutated
# ;) # ;)
period: 'Param[int]' = 1, # noqa period: 'Param[int]' = 6, # noqa
# TODO: support other means by providing a map # TODO: support other means by providing a map
# to weights `partial()`-ed with `wma()`? # to weights `partial()`-ed with `wma()`?
@ -268,7 +268,8 @@ async def flow_rates(
'dark_dvlm_rate': None, 'dark_dvlm_rate': None,
} }
quote = await anext(source) # TODO: 3.10 do ``anext()``
quote = await source.__anext__()
# ltr = 0 # ltr = 0
# lvr = 0 # lvr = 0

View File

@ -1,998 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
Personal/Private position parsing, calculating, summarizing in a way
that doesn't try to cuk most humans who prefer to not lose their moneys..
(looking at you `ib` and dirt-bird friends)
'''
from contextlib import contextmanager as cm
from pprint import pformat
import os
from os import path
from math import copysign
import re
import time
from typing import (
Any,
Iterator,
Optional,
Union,
)
import pendulum
from pendulum import datetime, now
import tomli
import toml
from . import config
from .brokers import get_brokermod
from .clearing._messages import BrokerdPosition, Status
from .data._source import Symbol
from .log import get_logger
from .data.types import Struct
log = get_logger(__name__)
@cm
def open_trade_ledger(
broker: str,
account: str,
) -> dict:
'''
Indempotently create and read in a trade log file from the
``<configuration_dir>/ledgers/`` directory.
Files are named per broker account of the form
``<brokername>_<accountname>.toml``. The ``accountname`` here is the
name as defined in the user's ``brokers.toml`` config.
'''
ldir = path.join(config._config_dir, 'ledgers')
if not path.isdir(ldir):
os.makedirs(ldir)
fname = f'trades_{broker}_{account}.toml'
tradesfile = path.join(ldir, fname)
if not path.isfile(tradesfile):
log.info(
f'Creating new local trades ledger: {tradesfile}'
)
with open(tradesfile, 'w') as cf:
pass # touch
with open(tradesfile, 'rb') as cf:
start = time.time()
ledger = tomli.load(cf)
print(f'Ledger load took {time.time() - start}s')
cpy = ledger.copy()
try:
yield cpy
finally:
if cpy != ledger:
# TODO: show diff output?
# https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
print(f'Updating ledger for {tradesfile}:\n')
ledger.update(cpy)
# we write on close the mutated ledger data
with open(tradesfile, 'w') as cf:
toml.dump(ledger, cf)
class Transaction(Struct, frozen=True):
# TODO: should this be ``.to`` (see below)?
fqsn: str
tid: Union[str, int] # unique transaction id
size: float
price: float
cost: float # commisions or other additional costs
dt: datetime
expiry: Optional[datetime] = None
# optional key normally derived from the broker
# backend which ensures the instrument-symbol this record
# is for is truly unique.
bsuid: Optional[Union[str, int]] = None
# optional fqsn for the source "asset"/money symbol?
# from: Optional[str] = None
def iter_by_dt(
clears: dict[str, Any],
) -> Iterator[tuple[str, dict]]:
'''
Iterate entries of a ``clears: dict`` table sorted by entry recorded
datetime presumably set at the ``'dt'`` field in each entry.
'''
for tid, data in sorted(
list(clears.items()),
key=lambda item: item[1]['dt'],
):
yield tid, data
class Position(Struct):
'''
Basic pp (personal/piker position) model with attached clearing
transaction history.
'''
symbol: Symbol
# can be +ve or -ve for long/short
size: float
# "breakeven price" above or below which pnl moves above and below
# zero for the entirety of the current "trade state".
ppu: float
# unique backend symbol id
bsuid: str
split_ratio: Optional[int] = None
# ordered record of known constituent trade messages
clears: dict[
Union[str, int, Status], # trade id
dict[str, Any], # transaction history summaries
] = {}
first_clear_dt: Optional[datetime] = None
expiry: Optional[datetime] = None
def to_dict(self) -> dict:
return {
f: getattr(self, f)
for f in self.__struct_fields__
}
def to_pretoml(self) -> tuple[str, dict]:
'''
Prep this position's data contents for export to toml including
re-structuring of the ``.clears`` table to an array of
inline-subtables for better ``pps.toml`` compactness.
'''
d = self.to_dict()
clears = d.pop('clears')
expiry = d.pop('expiry')
if self.split_ratio is None:
d.pop('split_ratio')
# should be obvious from clears/event table
d.pop('first_clear_dt')
# TODO: we need to figure out how to have one top level
# listing venue here even when the backend isn't providing
# it via the trades ledger..
# drop symbol obj in serialized form
s = d.pop('symbol')
fqsn = s.front_fqsn()
if self.expiry is None:
d.pop('expiry', None)
elif expiry:
d['expiry'] = str(expiry)
toml_clears_list = []
# reverse sort so latest clears are at top of section?
for tid, data in iter_by_dt(clears):
inline_table = toml.TomlDecoder().get_empty_inline_table()
# serialize datetime to parsable `str`
inline_table['dt'] = str(data['dt'])
# insert optional clear fields in column order
for k in ['ppu', 'accum_size']:
val = data.get(k)
if val:
inline_table[k] = val
# insert required fields
for k in ['price', 'size', 'cost']:
inline_table[k] = data[k]
inline_table['tid'] = tid
toml_clears_list.append(inline_table)
d['clears'] = toml_clears_list
return fqsn, d
def ensure_state(self) -> None:
'''
Audit either the `.size` and `.ppu` local instance vars against
the clears table calculations and return the calc-ed values if
they differ and log warnings to console.
'''
clears = list(self.clears.values())
self.first_clear_dt = min(list(entry['dt'] for entry in clears))
last_clear = clears[-1]
csize = self.calc_size()
accum = last_clear['accum_size']
if not self.expired():
if (
csize != accum
and csize != round(accum * self.split_ratio or 1)
):
raise ValueError(f'Size mismatch: {csize}')
else:
assert csize == 0, 'Contract is expired but non-zero size?'
if self.size != csize:
log.warning(
'Position state mismatch:\n'
f'{self.size} => {csize}'
)
self.size = csize
cppu = self.calc_ppu()
ppu = last_clear['ppu']
if (
cppu != ppu
and self.split_ratio is not None
# handle any split info entered (for now) manually by user
and cppu != (ppu / self.split_ratio)
):
raise ValueError(f'PPU mismatch: {cppu}')
if self.ppu != cppu:
log.warning(
'Position state mismatch:\n'
f'{self.ppu} => {cppu}'
)
self.ppu = cppu
def update_from_msg(
self,
msg: BrokerdPosition,
) -> None:
# XXX: better place to do this?
symbol = self.symbol
lot_size_digits = symbol.lot_size_digits
ppu, size = (
round(
msg['avg_price'],
ndigits=symbol.tick_size_digits
),
round(
msg['size'],
ndigits=lot_size_digits
),
)
self.ppu = ppu
self.size = size
@property
def dsize(self) -> float:
'''
The "dollar" size of the pp, normally in trading (fiat) unit
terms.
'''
return self.ppu * self.size
# TODO: idea: "real LIFO" dynamic positioning.
# - when a trade takes place where the pnl for
# the (set of) trade(s) is below the breakeven price
# it may be that the trader took a +ve pnl on a short(er)
# term trade in the same account.
# - in this case we could recalc the be price to
# be reverted back to it's prior value before the nearest term
# trade was opened.?
# def lifo_price() -> float:
# ...
def iter_clears(self) -> Iterator[tuple[str, dict]]:
'''
Iterate the internally managed ``.clears: dict`` table in
datetime-stamped order.
'''
return iter_by_dt(self.clears)
def calc_ppu(
self,
# include transaction cost in breakeven price
# and presume the worst case of the same cost
# to exit this transaction (even though in reality
# it will be dynamic based on exit stratetgy).
cost_scalar: float = 2,
) -> float:
'''
Compute the "price-per-unit" price for the given non-zero sized
rolling position.
The recurrence relation which computes this (exponential) mean
per new clear which **increases** the accumulative postiion size
is:
ppu[-1] = (
ppu[-2] * accum_size[-2]
+
ppu[-1] * size
) / accum_size[-1]
where `cost_basis` for the current step is simply the price
* size of the most recent clearing transaction.
'''
asize_h: list[float] = [] # historical accumulative size
ppu_h: list[float] = [] # historical price-per-unit
tid: str
entry: dict[str, Any]
for (tid, entry) in self.iter_clears():
clear_size = entry['size']
clear_price = entry['price']
last_accum_size = asize_h[-1] if asize_h else 0
accum_size = last_accum_size + clear_size
accum_sign = copysign(1, accum_size)
sign_change: bool = False
if accum_size == 0:
ppu_h.append(0)
asize_h.append(0)
continue
if accum_size == 0:
ppu_h.append(0)
asize_h.append(0)
continue
# test if the pp somehow went "passed" a net zero size state
# resulting in a change of the "sign" of the size (+ve for
# long, -ve for short).
sign_change = (
copysign(1, last_accum_size) + accum_sign == 0
and last_accum_size != 0
)
# since we passed the net-zero-size state the new size
# after sum should be the remaining size the new
# "direction" (aka, long vs. short) for this clear.
if sign_change:
clear_size = accum_size
abs_diff = abs(accum_size)
asize_h.append(0)
ppu_h.append(0)
else:
# old size minus the new size gives us size diff with
# +ve -> increase in pp size
# -ve -> decrease in pp size
abs_diff = abs(accum_size) - abs(last_accum_size)
# XXX: LIFO breakeven price update. only an increaze in size
# of the position contributes the breakeven price,
# a decrease does not (i.e. the position is being made
# smaller).
# abs_clear_size = abs(clear_size)
abs_new_size = abs(accum_size)
if abs_diff > 0:
cost_basis = (
# cost basis for this clear
clear_price * abs(clear_size)
+
# transaction cost
accum_sign * cost_scalar * entry['cost']
)
if asize_h:
size_last = abs(asize_h[-1])
cb_last = ppu_h[-1] * size_last
ppu = (cost_basis + cb_last) / abs_new_size
else:
ppu = cost_basis / abs_new_size
ppu_h.append(ppu)
asize_h.append(accum_size)
else:
# on "exit" clears from a given direction,
# only the size changes not the price-per-unit
# need to be updated since the ppu remains constant
# and gets weighted by the new size.
asize_h.append(accum_size)
ppu_h.append(ppu_h[-1])
final_ppu = ppu_h[-1] if ppu_h else 0
# handle any split info entered (for now) manually by user
if self.split_ratio is not None:
final_ppu /= self.split_ratio
return final_ppu
def expired(self) -> bool:
'''
Predicate which checks if the contract/instrument is past its expiry.
'''
return bool(self.expiry) and self.expiry < now()
def calc_size(self) -> float:
'''
Calculate the unit size of this position in the destination
asset using the clears/trade event table; zero if expired.
'''
size: float = 0
# time-expired pps (normally derivatives) are "closed"
# and have a zero size.
if self.expired():
return 0
for tid, entry in self.clears.items():
size += entry['size']
if self.split_ratio is not None:
size = round(size * self.split_ratio)
return size
def minimize_clears(
self,
) -> dict[str, dict]:
'''
Minimize the position's clears entries by removing
all transactions before the last net zero size to avoid
unecessary history irrelevant to the current pp state.
'''
size: float = 0
clears_since_zero: list[tuple(str, dict)] = []
# TODO: we might just want to always do this when iterating
# a ledger? keep a state of the last net-zero and only do the
# full iterate when no state was stashed?
# scan for the last "net zero" position by iterating
# transactions until the next net-zero size, rinse, repeat.
for tid, clear in self.clears.items():
size += clear['size']
clears_since_zero.append((tid, clear))
if size == 0:
clears_since_zero.clear()
self.clears = dict(clears_since_zero)
return self.clears
def add_clear(
self,
t: Transaction,
) -> dict:
'''
Update clearing table and populate rolling ppu and accumulative
size in both the clears entry and local attrs state.
'''
clear = self.clears[t.tid] = {
'cost': t.cost,
'price': t.price,
'size': t.size,
'dt': t.dt,
}
# TODO: compute these incrementally instead
# of re-looping through each time resulting in O(n**2)
# behaviour..?
# NOTE: we compute these **after** adding the entry in order to
# make the recurrence relation math work inside
# ``.calc_size()``.
self.size = clear['accum_size'] = self.calc_size()
self.ppu = clear['ppu'] = self.calc_ppu()
return clear
def sugest_split(self) -> float:
...
class PpTable(Struct):
brokername: str
acctid: str
pps: dict[str, Position]
conf: Optional[dict] = {}
def update_from_trans(
self,
trans: dict[str, Transaction],
cost_scalar: float = 2,
) -> dict[str, Position]:
pps = self.pps
updated: dict[str, Position] = {}
# lifo update all pps from records
for tid, t in trans.items():
pp = pps.setdefault(
t.bsuid,
# if no existing pp, allocate fresh one.
Position(
Symbol.from_fqsn(
t.fqsn,
info={},
),
size=0.0,
ppu=0.0,
bsuid=t.bsuid,
expiry=t.expiry,
)
)
clears = pp.clears
if clears:
first_clear_dt = pp.first_clear_dt
# don't do updates for ledger records we already have
# included in the current pps state.
if (
t.tid in clears
or first_clear_dt and t.dt < first_clear_dt
):
# NOTE: likely you'll see repeats of the same
# ``Transaction`` passed in here if/when you are restarting
# a ``brokerd.ib`` where the API will re-report trades from
# the current session, so we need to make sure we don't
# "double count" these in pp calculations.
continue
# update clearing table
pp.add_clear(t)
updated[t.bsuid] = pp
# minimize clears tables and update sizing.
for bsuid, pp in updated.items():
pp.ensure_state()
return updated
def dump_active(
self,
) -> tuple[
dict[str, Position],
dict[str, Position]
]:
'''
Iterate all tabulated positions, render active positions to
a ``dict`` format amenable to serialization (via TOML) and drop
from state (``.pps``) as well as return in a ``dict`` all
``Position``s which have recently closed.
'''
# NOTE: newly closed position are also important to report/return
# since a consumer, like an order mode UI ;), might want to react
# based on the closure (for example removing the breakeven line
# and clearing the entry from any lists/monitors).
closed_pp_objs: dict[str, Position] = {}
open_pp_objs: dict[str, Position] = {}
pp_objs = self.pps
for bsuid in list(pp_objs):
pp = pp_objs[bsuid]
# XXX: debug hook for size mismatches
# qqqbsuid = 320227571
# if bsuid == qqqbsuid:
# breakpoint()
pp.ensure_state()
if (
# "net-zero" is a "closed" position
pp.size == 0
# time-expired pps (normally derivatives) are "closed"
or (pp.expiry and pp.expiry < now())
):
# for expired cases
pp.size = 0
# NOTE: we DO NOT pop the pp here since it can still be
# used to check for duplicate clears that may come in as
# new transaction from some backend API and need to be
# ignored; the closed positions won't be written to the
# ``pps.toml`` since ``pp_active_entries`` above is what's
# written.
closed_pp_objs[bsuid] = pp
else:
open_pp_objs[bsuid] = pp
return open_pp_objs, closed_pp_objs
def to_toml(
self,
) -> dict[str, Any]:
active, closed = self.dump_active()
# ONLY dict-serialize all active positions; those that are closed
# we don't store in the ``pps.toml``.
to_toml_dict = {}
for bsuid, pos in active.items():
# keep the minimal amount of clears that make up this
# position since the last net-zero state.
pos.minimize_clears()
pos.ensure_state()
# serialize to pre-toml form
fqsn, asdict = pos.to_pretoml()
log.info(f'Updating active pp: {fqsn}')
# XXX: ugh, it's cuz we push the section under
# the broker name.. maybe we need to rethink this?
brokerless_key = fqsn.removeprefix(f'{self.brokername}.')
to_toml_dict[brokerless_key] = asdict
return to_toml_dict
def write_config(self) -> None:
'''
Write the current position table to the user's ``pps.toml``.
'''
# TODO: show diff output?
# https://stackoverflow.com/questions/12956957/print-diff-of-python-dictionaries
print(f'Updating ``pps.toml`` for {path}:\n')
# active, closed_pp_objs = table.dump_active()
pp_entries = self.to_toml()
self.conf[self.brokername][self.acctid] = pp_entries
# TODO: why tf haven't they already done this for inline
# tables smh..
enc = PpsEncoder(preserve=True)
# table_bs_type = type(toml.TomlDecoder().get_empty_inline_table())
enc.dump_funcs[
toml.decoder.InlineTableDict
] = enc.dump_inline_table
config.write(
self.conf,
'pps',
encoder=enc,
)
def load_pps_from_ledger(
brokername: str,
acctname: str,
# post normalization filter on ledger entries to be processed
filter_by: Optional[list[dict]] = None,
) -> tuple[
dict[str, Transaction],
dict[str, Position],
]:
'''
Open a ledger file by broker name and account and read in and
process any trade records into our normalized ``Transaction`` form
and then update the equivalent ``Pptable`` and deliver the two
bsuid-mapped dict-sets of the transactions and pps.
'''
with (
open_trade_ledger(brokername, acctname) as ledger,
open_pps(brokername, acctname) as table,
):
if not ledger:
# null case, no ledger file with content
return {}
mod = get_brokermod(brokername)
src_records: dict[str, Transaction] = mod.norm_trade_records(ledger)
if filter_by:
records = {}
bsuids = set(filter_by)
for tid, r in src_records.items():
if r.bsuid in bsuids:
records[tid] = r
else:
records = src_records
updated = table.update_from_trans(records)
return records, updated
# TODO: instead see if we can hack tomli and tomli-w to do the same:
# - https://github.com/hukkin/tomli
# - https://github.com/hukkin/tomli-w
class PpsEncoder(toml.TomlEncoder):
'''
Special "styled" encoder that makes a ``pps.toml`` redable and
compact by putting `.clears` tables inline and everything else
flat-ish.
'''
separator = ','
def dump_list(self, v):
'''
Dump an inline list with a newline after every element and
with consideration for denoted inline table types.
'''
retval = "[\n"
for u in v:
if isinstance(u, toml.decoder.InlineTableDict):
out = self.dump_inline_table(u)
else:
out = str(self.dump_value(u))
retval += " " + out + "," + "\n"
retval += "]"
return retval
def dump_inline_table(self, section):
"""Preserve inline table in its compact syntax instead of expanding
into subsection.
https://github.com/toml-lang/toml#user-content-inline-table
"""
val_list = []
for k, v in section.items():
# if isinstance(v, toml.decoder.InlineTableDict):
if isinstance(v, dict):
val = self.dump_inline_table(v)
else:
val = str(self.dump_value(v))
val_list.append(k + " = " + val)
retval = "{ " + ", ".join(val_list) + " }"
return retval
def dump_sections(self, o, sup):
retstr = ""
if sup != "" and sup[-1] != ".":
sup += '.'
retdict = self._dict()
arraystr = ""
for section in o:
qsection = str(section)
value = o[section]
if not re.match(r'^[A-Za-z0-9_-]+$', section):
qsection = toml.encoder._dump_str(section)
# arrayoftables = False
if (
self.preserve
and isinstance(value, toml.decoder.InlineTableDict)
):
retstr += (
qsection
+
" = "
+
self.dump_inline_table(o[section])
+
'\n' # only on the final terminating left brace
)
# XXX: this code i'm pretty sure is just blatantly bad
# and/or wrong..
# if isinstance(o[section], list):
# for a in o[section]:
# if isinstance(a, dict):
# arrayoftables = True
# if arrayoftables:
# for a in o[section]:
# arraytabstr = "\n"
# arraystr += "[[" + sup + qsection + "]]\n"
# s, d = self.dump_sections(a, sup + qsection)
# if s:
# if s[0] == "[":
# arraytabstr += s
# else:
# arraystr += s
# while d:
# newd = self._dict()
# for dsec in d:
# s1, d1 = self.dump_sections(d[dsec], sup +
# qsection + "." +
# dsec)
# if s1:
# arraytabstr += ("[" + sup + qsection +
# "." + dsec + "]\n")
# arraytabstr += s1
# for s1 in d1:
# newd[dsec + "." + s1] = d1[s1]
# d = newd
# arraystr += arraytabstr
elif isinstance(value, dict):
retdict[qsection] = o[section]
elif o[section] is not None:
retstr += (
qsection
+
" = "
+
str(self.dump_value(o[section]))
)
# if not isinstance(value, dict):
if not isinstance(value, toml.decoder.InlineTableDict):
# inline tables should not contain newlines:
# https://toml.io/en/v1.0.0#inline-table
retstr += '\n'
else:
raise ValueError(value)
retstr += arraystr
return (retstr, retdict)
@cm
def open_pps(
brokername: str,
acctid: str,
write_on_exit: bool = True,
) -> PpTable:
'''
Read out broker-specific position entries from
incremental update file: ``pps.toml``.
'''
conf, path = config.load('pps')
brokersection = conf.setdefault(brokername, {})
pps = brokersection.setdefault(acctid, {})
# TODO: ideally we can pass in an existing
# pps state to this right? such that we
# don't have to do a ledger reload all the
# time.. a couple ideas I can think of,
# - mirror this in some client side actor which
# does the actual ledger updates (say the paper
# engine proc if we decide to always spawn it?),
# - do diffs against updates from the ledger writer
# actor and the in-mem state here?
pp_objs = {}
table = PpTable(
brokername,
acctid,
pp_objs,
conf=conf,
)
# unmarshal/load ``pps.toml`` config entries into object form
# and update `PpTable` obj entries.
for fqsn, entry in pps.items():
bsuid = entry['bsuid']
# convert clears sub-tables (only in this form
# for toml re-presentation) back into a master table.
clears_list = entry['clears']
# index clears entries in "object" form by tid in a top
# level dict instead of a list (as is presented in our
# ``pps.toml``).
clears = pp_objs.setdefault(bsuid, {})
# TODO: should be make a ``Struct`` for clear/event entries?
# convert "clear events table" from the toml config (list of
# a dicts) and load it into object form for use in position
# processing of new clear events.
trans: list[Transaction] = []
for clears_table in clears_list:
tid = clears_table.pop('tid')
dtstr = clears_table['dt']
dt = pendulum.parse(dtstr)
clears_table['dt'] = dt
trans.append(Transaction(
fqsn=bsuid,
bsuid=bsuid,
tid=tid,
size=clears_table['size'],
price=clears_table['price'],
cost=clears_table['cost'],
dt=dt,
))
clears[tid] = clears_table
size = entry['size']
# TODO: remove but, handle old field name for now
ppu = entry.get('ppu', entry.get('be_price', 0))
split_ratio = entry.get('split_ratio')
expiry = entry.get('expiry')
if expiry:
expiry = pendulum.parse(expiry)
pp = pp_objs[bsuid] = Position(
Symbol.from_fqsn(fqsn, info={}),
size=size,
ppu=ppu,
split_ratio=split_ratio,
expiry=expiry,
bsuid=entry['bsuid'],
)
# XXX: super critical, we need to be sure to include
# all pps.toml clears to avoid reusing clears that were
# already included in the current incremental update
# state, since today's records may have already been
# processed!
for t in trans:
pp.add_clear(t)
# audit entries loaded from toml
pp.ensure_state()
try:
yield table
finally:
if write_on_exit:
table.write_config()
if __name__ == '__main__':
import sys
args = sys.argv
assert len(args) > 1, 'Specifiy account(s) from `brokers.toml`'
args = args[1:]
for acctid in args:
broker, name = acctid.split('.')
trans, updated_pps = load_pps_from_ledger(broker, name)
print(
f'Processing transactions into pps for {broker}:{acctid}\n'
f'{pformat(trans)}\n\n'
f'{pformat(updated_pps)}'
)

View File

@ -32,22 +32,16 @@ def mk_marker_path(
style: str, style: str,
) -> QGraphicsPathItem: ) -> QGraphicsPathItem:
''' """Add a marker to be displayed on the line wrapped in a ``QGraphicsPathItem``
Add a marker to be displayed on the line wrapped in ready to be placed using scene coordinates (not view).
a ``QGraphicsPathItem`` ready to be placed using scene coordinates
(not view).
**Arguments** **Arguments**
style String indicating the style of marker to add: style String indicating the style of marker to add:
``'<|'``, ``'|>'``, ``'>|'``, ``'|<'``, ``'<|>'``, ``'<|'``, ``'|>'``, ``'>|'``, ``'|<'``, ``'<|>'``,
``'>|<'``, ``'^'``, ``'v'``, ``'o'`` ``'>|<'``, ``'^'``, ``'v'``, ``'o'``
size Size of the marker in pixels.
This code is taken nearly verbatim from the """
`InfiniteLine.addMarker()` method but does not attempt do be aware
of low(er) level graphics controls and expects for the output
polygon to be applied to a ``QGraphicsPathItem``.
'''
path = QtGui.QPainterPath() path = QtGui.QPainterPath()
if style == 'o': if style == 'o':
@ -93,8 +87,7 @@ def mk_marker_path(
class LevelMarker(QGraphicsPathItem): class LevelMarker(QGraphicsPathItem):
''' '''An arrow marker path graphich which redraws itself
An arrow marker path graphich which redraws itself
to the specified view coordinate level on each paint cycle. to the specified view coordinate level on each paint cycle.
''' '''
@ -111,8 +104,7 @@ class LevelMarker(QGraphicsPathItem):
# get polygon and scale # get polygon and scale
super().__init__() super().__init__()
# self.setScale(size, size) self.scale(size, size)
self.setScale(size)
# interally generates path # interally generates path
self._style = None self._style = None
@ -122,7 +114,6 @@ class LevelMarker(QGraphicsPathItem):
self.get_level = get_level self.get_level = get_level
self._on_paint = on_paint self._on_paint = on_paint
self.scene_x = lambda: chart.marker_right_points()[1] self.scene_x = lambda: chart.marker_right_points()[1]
self.level: float = 0 self.level: float = 0
self.keep_in_view = keep_in_view self.keep_in_view = keep_in_view
@ -158,9 +149,12 @@ class LevelMarker(QGraphicsPathItem):
def w(self) -> float: def w(self) -> float:
return self.path_br().width() return self.path_br().width()
def position_in_view(self) -> None: def position_in_view(
''' self,
Show a pp off-screen indicator for a level label. # level: float,
) -> None:
'''Show a pp off-screen indicator for a level label.
This is like in fps games where you have a gps "nav" indicator This is like in fps games where you have a gps "nav" indicator
but your teammate is outside the range of view, except in 2D, on but your teammate is outside the range of view, except in 2D, on
@ -168,6 +162,7 @@ class LevelMarker(QGraphicsPathItem):
''' '''
level = self.get_level() level = self.get_level()
view = self.chart.getViewBox() view = self.chart.getViewBox()
vr = view.state['viewRange'] vr = view.state['viewRange']
ymn, ymx = vr[1] ymn, ymx = vr[1]
@ -191,6 +186,7 @@ class LevelMarker(QGraphicsPathItem):
) )
elif level < ymn: # pin to bottom of view elif level < ymn: # pin to bottom of view
self.setPos( self.setPos(
QPointF( QPointF(
x, x,
@ -215,8 +211,7 @@ class LevelMarker(QGraphicsPathItem):
w: QtWidgets.QWidget w: QtWidgets.QWidget
) -> None: ) -> None:
''' '''Core paint which we override to always update
Core paint which we override to always update
our marker position in scene coordinates from a our marker position in scene coordinates from a
view cooridnate "level". view cooridnate "level".
@ -240,12 +235,11 @@ def qgo_draw_markers(
right_offset: float, right_offset: float,
) -> float: ) -> float:
''' """Paint markers in ``pg.GraphicsItem`` style by first
Paint markers in ``pg.GraphicsItem`` style by first
removing the view transform for the painter, drawing the markers removing the view transform for the painter, drawing the markers
in scene coords, then restoring the view coords. in scene coords, then restoring the view coords.
''' """
# paint markers in native coordinate system # paint markers in native coordinate system
orig_tr = p.transform() orig_tr = p.transform()

View File

@ -19,16 +19,15 @@ Main app startup and run.
''' '''
from functools import partial from functools import partial
from types import ModuleType
from PyQt5.QtCore import QEvent from PyQt5.QtCore import QEvent
import trio import trio
from .._daemon import maybe_spawn_brokerd from .._daemon import maybe_spawn_brokerd
from ..brokers import get_brokermod
from . import _event from . import _event
from ._exec import run_qtractor from ._exec import run_qtractor
from ..data.feed import install_brokerd_search from ..data.feed import install_brokerd_search
from ..data._source import unpack_fqsn
from . import _search from . import _search
from ._chart import GodWidget from ._chart import GodWidget
from ..log import get_logger from ..log import get_logger
@ -37,26 +36,27 @@ log = get_logger(__name__)
async def load_provider_search( async def load_provider_search(
brokermod: str,
broker: str,
loglevel: str, loglevel: str,
) -> None: ) -> None:
name = brokermod.name log.info(f'loading brokerd for {broker}..')
log.info(f'loading brokerd for {name}..')
async with ( async with (
maybe_spawn_brokerd( maybe_spawn_brokerd(
name, broker,
loglevel=loglevel loglevel=loglevel
) as portal, ) as portal,
install_brokerd_search( install_brokerd_search(
portal, portal,
brokermod, get_brokermod(broker),
), ),
): ):
# keep search engine stream up until cancelled # keep search engine stream up until cancelled
await trio.sleep_forever() await trio.sleep_forever()
@ -66,8 +66,8 @@ async def _async_main(
# implicit required argument provided by ``qtractor_run()`` # implicit required argument provided by ``qtractor_run()``
main_widget: GodWidget, main_widget: GodWidget,
syms: list[str], sym: str,
brokers: dict[str, ModuleType], brokernames: str,
loglevel: str, loglevel: str,
) -> None: ) -> None:
@ -78,8 +78,6 @@ async def _async_main(
""" """
from . import _display from . import _display
from ._pg_overrides import _do_overrides
_do_overrides()
godwidget = main_widget godwidget = main_widget
@ -99,11 +97,6 @@ async def _async_main(
sbar = godwidget.window.status_bar sbar = godwidget.window.status_bar
starting_done = sbar.open_status('starting ze sexy chartz') starting_done = sbar.open_status('starting ze sexy chartz')
needed_brokermods: dict[str, ModuleType] = {}
for fqsn in syms:
brokername, *_ = unpack_fqsn(fqsn)
needed_brokermods[brokername] = brokers[brokername]
async with ( async with (
trio.open_nursery() as root_n, trio.open_nursery() as root_n,
): ):
@ -114,14 +107,18 @@ async def _async_main(
# setup search widget and focus main chart view at startup # setup search widget and focus main chart view at startup
# search widget is a singleton alongside the godwidget # search widget is a singleton alongside the godwidget
search = _search.SearchWidget(godwidget=godwidget) search = _search.SearchWidget(godwidget=godwidget)
# search.bar.unfocus() search.bar.unfocus()
# godwidget.hbox.addWidget(search)
godwidget.hbox.addWidget(search)
godwidget.search = search godwidget.search = search
symbol, _, provider = sym.rpartition('.')
# this internally starts a ``display_symbol_data()`` task above # this internally starts a ``display_symbol_data()`` task above
order_mode_ready = await godwidget.load_symbols( order_mode_ready = await godwidget.load_symbol(
fqsns=syms, provider,
loglevel=loglevel, symbol,
loglevel
) )
# spin up a search engine for the local cached symbol set # spin up a search engine for the local cached symbol set
@ -138,12 +135,8 @@ async def _async_main(
): ):
# load other providers into search **after** # load other providers into search **after**
# the chart's select cache # the chart's select cache
for brokername, mod in needed_brokermods.items(): for broker in brokernames:
root_n.start_soon( root_n.start_soon(load_provider_search, broker, loglevel)
load_provider_search,
mod,
loglevel,
)
await order_mode_ready.wait() await order_mode_ready.wait()
@ -172,22 +165,19 @@ async def _async_main(
def _main( def _main(
syms: list[str], sym: str,
brokermods: list[ModuleType], brokernames: [str],
piker_loglevel: str, piker_loglevel: str,
tractor_kwargs, tractor_kwargs,
) -> None: ) -> None:
''' '''
Sync entry point to start a chart: a ``tractor`` + Qt runtime. Sync entry point to start a chart: a ``tractor`` + Qt runtime
entry point
''' '''
run_qtractor( run_qtractor(
func=_async_main, func=_async_main,
args=( args=(sym, brokernames, piker_loglevel),
syms, main_widget=GodWidget,
{mod.name: mod for mod in brokermods},
piker_loglevel,
),
main_widget_type=GodWidget,
tractor_kwargs=tractor_kwargs, tractor_kwargs=tractor_kwargs,
) )

View File

@ -18,7 +18,6 @@
Chart axes graphics and behavior. Chart axes graphics and behavior.
""" """
from __future__ import annotations
from functools import lru_cache from functools import lru_cache
from typing import Optional, Callable from typing import Optional, Callable
from math import floor from math import floor
@ -28,7 +27,6 @@ import pyqtgraph as pg
from PyQt5 import QtCore, QtGui, QtWidgets from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QPointF from PyQt5.QtCore import QPointF
from . import _pg_overrides as pgo
from ..data._source import float_digits from ..data._source import float_digits
from ._label import Label from ._label import Label
from ._style import DpiAwareFont, hcolor, _font from ._style import DpiAwareFont, hcolor, _font
@ -41,17 +39,12 @@ class Axis(pg.AxisItem):
''' '''
A better axis that sizes tick contents considering font size. A better axis that sizes tick contents considering font size.
Also includes tick values lru caching originally proposed in but never
accepted upstream:
https://github.com/pyqtgraph/pyqtgraph/pull/2160
''' '''
def __init__( def __init__(
self, self,
plotitem: pgo.PlotItem, linkedsplits,
typical_max_str: str = '100 000.000 ', typical_max_str: str = '100 000.000',
text_color: str = 'bracket', text_color: str = 'bracket',
lru_cache_tick_strings: bool = True,
**kwargs **kwargs
) -> None: ) -> None:
@ -63,78 +56,41 @@ class Axis(pg.AxisItem):
# XXX: pretty sure this makes things slower # XXX: pretty sure this makes things slower
# self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache) # self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
self.pi = plotitem self.linkedsplits = linkedsplits
self._dpi_font = _font self._dpi_font = _font
self.setTickFont(_font.font) self.setTickFont(_font.font)
font_size = self._dpi_font.font.pixelSize() font_size = self._dpi_font.font.pixelSize()
style_conf = {
'textFillLimits': [(0, 0.5)],
'tickFont': self._dpi_font.font,
}
text_offset = None
if self.orientation in ('bottom',): if self.orientation in ('bottom',):
text_offset = floor(0.25 * font_size) text_offset = floor(0.25 * font_size)
elif self.orientation in ('left', 'right'): elif self.orientation in ('left', 'right'):
text_offset = floor(font_size / 2) text_offset = floor(font_size / 2)
if text_offset: self.setStyle(**{
style_conf.update({ 'textFillLimits': [(0, 0.5)],
# offset of text *away from* axis line in px 'tickFont': self._dpi_font.font,
# use approx. half the font pixel size (height)
'tickTextOffset': text_offset, # offset of text *away from* axis line in px
}) # use approx. half the font pixel size (height)
'tickTextOffset': text_offset,
})
self.setStyle(**style_conf)
self.setTickFont(_font.font) self.setTickFont(_font.font)
# NOTE: this is for surrounding "border" # NOTE: this is for surrounding "border"
self.setPen(_axis_pen) self.setPen(_axis_pen)
# this is the text color # this is the text color
# self.setTextPen(pg.mkPen(hcolor(text_color)))
self.text_color = text_color self.text_color = text_color
# generate a bounding rect based on sizing to a "typical"
# maximum length-ed string defined as init default.
self.typical_br = _font._qfm.boundingRect(typical_max_str) self.typical_br = _font._qfm.boundingRect(typical_max_str)
# size the pertinent axis dimension to a "typical value" # size the pertinent axis dimension to a "typical value"
self.size_to_values() self.size_to_values()
# NOTE: requires override ``.tickValues()`` method seen below.
if lru_cache_tick_strings:
self.tickStrings = lru_cache(
maxsize=2**20
)(self.tickStrings)
# axis "sticky" labels
self._stickies: dict[str, YAxisLabel] = {}
# NOTE: only overriden to cast tick values entries into tuples
# for use with the lru caching.
def tickValues(
self,
minVal: float,
maxVal: float,
size: int,
) -> list[tuple[float, tuple[str]]]:
'''
Repack tick values into tuples for lru caching.
'''
ticks = []
for scalar, values in super().tickValues(minVal, maxVal, size):
ticks.append((
scalar,
tuple(values), # this
))
return ticks
@property @property
def text_color(self) -> str: def text_color(self) -> str:
return self._text_color return self._text_color
@ -150,38 +106,6 @@ class Axis(pg.AxisItem):
def txt_offsets(self) -> tuple[int, int]: def txt_offsets(self) -> tuple[int, int]:
return tuple(self.style['tickTextOffset']) return tuple(self.style['tickTextOffset'])
def add_sticky(
self,
pi: pgo.PlotItem,
name: None | str = None,
digits: None | int = 2,
bg_color='default',
fg_color='black',
) -> YAxisLabel:
# if the sticky is for our symbol
# use the tick size precision for display
name = name or pi.name
digits = digits or 2
# TODO: ``._ysticks`` should really be an attr on each
# ``PlotItem`` now instead of the containing widget (because of
# overlays) ?
# add y-axis "last" value label
sticky = self._stickies[name] = YAxisLabel(
pi=pi,
parent=self,
digits=digits, # TODO: pass this from symbol data
opacity=0.9, # slight see-through
bg_color=bg_color,
fg_color=fg_color,
)
pi.sigRangeChanged.connect(sticky.update_on_resize)
return sticky
class PriceAxis(Axis): class PriceAxis(Axis):
@ -243,6 +167,7 @@ class PriceAxis(Axis):
self._min_tick = size self._min_tick = size
def size_to_values(self) -> None: def size_to_values(self) -> None:
# self.typical_br = _font._qfm.boundingRect(typical_max_str)
self.setWidth(self.typical_br.width()) self.setWidth(self.typical_br.width())
# XXX: drop for now since it just eats up h space # XXX: drop for now since it just eats up h space
@ -297,50 +222,28 @@ class DynamicDateAxis(Axis):
) -> list[str]: ) -> list[str]:
# XX: ARGGGGG AG:LKSKDJF:LKJSDFD chart = self.linkedsplits.chart
chart = self.pi.chart_widget flow = chart._flows[chart.name]
shm = flow.shm
bars = shm.array
first = shm._first.value
viz = chart._vizs[chart.name] bars_len = len(bars)
shm = viz.shm times = bars['time']
array = shm.array
times = array['time']
i_0, i_l = times[0], times[-1]
# edge cases epochs = times[list(
if ( map(
not indexes int,
or filter(
(indexes[0] < i_0 lambda i: i > 0 and i < bars_len,
and indexes[-1] < i_l) (i-first for i in indexes)
or
(indexes[0] > i_0
and indexes[-1] > i_l)
):
return []
if viz.index_field == 'index':
arr_len = times.shape[0]
first = shm._first.value
epochs = times[
list(
map(
int,
filter(
lambda i: i > 0 and i < arr_len,
(i - first for i in indexes)
)
)
) )
] )
else: )]
epochs = list(map(int, indexes))
# TODO: **don't** have this hard coded shift to EST # TODO: **don't** have this hard coded shift to EST
# delay = times[-1] - times[-2] # delay = times[-1] - times[-2]
dts = np.array( dts = np.array(epochs, dtype='datetime64[s]')
epochs,
dtype='datetime64[s]',
)
# see units listing: # see units listing:
# https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units # https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units
@ -358,39 +261,24 @@ class DynamicDateAxis(Axis):
spacing: float, spacing: float,
) -> list[str]: ) -> list[str]:
return self._indexes_to_timestrs(values)
# NOTE: handy for debugging the lru cache
# info = self.tickStrings.cache_info() # info = self.tickStrings.cache_info()
# print(info) # print(info)
return self._indexes_to_timestrs(values)
class AxisLabel(pg.GraphicsObject): class AxisLabel(pg.GraphicsObject):
# relative offsets *OF* the bounding rect relative _x_margin = 0
# to parent graphics object. _y_margin = 0
# eg. <parent>| => <_x_br_offset> => | <text> |
_x_br_offset: float = 0
_y_br_offset: float = 0
# relative offsets of text *within* bounding rect
# eg. | <_x_margin> => <text> |
_x_margin: float = 0
_y_margin: float = 0
# multiplier of the text content's height in order
# to force a larger (y-dimension) bounding rect.
_y_txt_h_scaling: float = 1
def __init__( def __init__(
self, self,
parent: pg.GraphicsItem, parent: pg.GraphicsItem,
digits: int = 2, digits: int = 2,
bg_color: str = 'default', bg_color: str = 'bracket',
fg_color: str = 'black', fg_color: str = 'black',
opacity: int = .8, # XXX: seriously don't set this to 0 opacity: int = 1, # XXX: seriously don't set this to 0
font_size: str = 'default', font_size: str = 'default',
use_arrow: bool = True, use_arrow: bool = True,
@ -401,7 +289,6 @@ class AxisLabel(pg.GraphicsObject):
self.setParentItem(parent) self.setParentItem(parent)
self.setFlag(self.ItemIgnoresTransformations) self.setFlag(self.ItemIgnoresTransformations)
self.setZValue(100)
# XXX: pretty sure this is faster # XXX: pretty sure this is faster
self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache) self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
@ -433,14 +320,14 @@ class AxisLabel(pg.GraphicsObject):
p: QtGui.QPainter, p: QtGui.QPainter,
opt: QtWidgets.QStyleOptionGraphicsItem, opt: QtWidgets.QStyleOptionGraphicsItem,
w: QtWidgets.QWidget w: QtWidgets.QWidget
) -> None: ) -> None:
''' """Draw a filled rectangle based on the size of ``.label_str`` text.
Draw a filled rectangle based on the size of ``.label_str`` text.
Subtypes can customize further by overloading ``.draw()``. Subtypes can customize further by overloading ``.draw()``.
''' """
# p.setCompositionMode(QtWidgets.QPainter.CompositionMode_SourceOver)
if self.label_str: if self.label_str:
# if not self.rect: # if not self.rect:
@ -451,11 +338,7 @@ class AxisLabel(pg.GraphicsObject):
p.setFont(self._dpifont.font) p.setFont(self._dpifont.font)
p.setPen(self.fg_color) p.setPen(self.fg_color)
p.drawText( p.drawText(self.rect, self.text_flags, self.label_str)
self.rect,
self.text_flags,
self.label_str,
)
def draw( def draw(
self, self,
@ -463,8 +346,6 @@ class AxisLabel(pg.GraphicsObject):
rect: QtCore.QRectF rect: QtCore.QRectF
) -> None: ) -> None:
p.setOpacity(self.opacity)
if self._use_arrow: if self._use_arrow:
if not self.path: if not self.path:
self._draw_arrow_path() self._draw_arrow_path()
@ -472,13 +353,15 @@ class AxisLabel(pg.GraphicsObject):
p.drawPath(self.path) p.drawPath(self.path)
p.fillPath(self.path, pg.mkBrush(self.bg_color)) p.fillPath(self.path, pg.mkBrush(self.bg_color))
# this adds a nice black outline around the label for some odd
# reason; ok by us
p.setOpacity(self.opacity)
# this cause the L1 labels to glitch out if used in the subtype # this cause the L1 labels to glitch out if used in the subtype
# and it will leave a small black strip with the arrow path if # and it will leave a small black strip with the arrow path if
# done before the above # done before the above
p.fillRect( p.fillRect(self.rect, self.bg_color)
self.rect,
self.bg_color,
)
def boundingRect(self): # noqa def boundingRect(self): # noqa
''' '''
@ -522,18 +405,15 @@ class AxisLabel(pg.GraphicsObject):
txt_h, txt_w = txt_br.height(), txt_br.width() txt_h, txt_w = txt_br.height(), txt_br.width()
# print(f'wsw: {self._dpifont.boundingRect(" ")}') # print(f'wsw: {self._dpifont.boundingRect(" ")}')
# allow subtypes to override width and height # allow subtypes to specify a static width and height
h, w = self.size_hint() h, w = self.size_hint()
# print(f'axis size: {self._parent.size()}')
# print(f'axis geo: {self._parent.geometry()}')
self.rect = QtCore.QRectF( self.rect = QtCore.QRectF(
0, 0,
# relative bounds offsets
self._x_br_offset,
self._y_br_offset,
(w or txt_w) + self._x_margin / 2, (w or txt_w) + self._x_margin / 2,
(h or txt_h) + self._y_margin / 2,
(h or txt_h) * self._y_txt_h_scaling + (self._y_margin / 2),
) )
# print(self.rect) # print(self.rect)
# hb = self.path.controlPointRect() # hb = self.path.controlPointRect()
@ -609,7 +489,7 @@ class XAxisLabel(AxisLabel):
class YAxisLabel(AxisLabel): class YAxisLabel(AxisLabel):
_y_margin: int = 4 _y_margin = 4
text_flags = ( text_flags = (
QtCore.Qt.AlignLeft QtCore.Qt.AlignLeft
@ -620,19 +500,19 @@ class YAxisLabel(AxisLabel):
def __init__( def __init__(
self, self,
pi: pgo.PlotItem, chart,
*args, *args,
**kwargs **kwargs
) -> None: ) -> None:
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._pi = pi self._chart = chart
pi.sigRangeChanged.connect(self.update_on_resize)
chart.sigRangeChanged.connect(self.update_on_resize)
self._last_datum = (None, None) self._last_datum = (None, None)
self.x_offset = 0
# pull text offset from axis from parent axis # pull text offset from axis from parent axis
if getattr(self._parent, 'txt_offsets', False): if getattr(self._parent, 'txt_offsets', False):
self.x_offset, y_offset = self._parent.txt_offsets() self.x_offset, y_offset = self._parent.txt_offsets()
@ -651,8 +531,7 @@ class YAxisLabel(AxisLabel):
value: float, # data for text value: float, # data for text
# on odd dimension and/or adds nice black line # on odd dimension and/or adds nice black line
x_offset: int = 0, x_offset: Optional[int] = None
) -> None: ) -> None:
# this is read inside ``.paint()`` # this is read inside ``.paint()``
@ -698,7 +577,7 @@ class YAxisLabel(AxisLabel):
self._last_datum = (index, value) self._last_datum = (index, value)
self.update_label( self.update_label(
self._pi.mapFromView(QPointF(index, value)), self._chart.mapFromView(QPointF(index, value)),
value value
) )

File diff suppressed because it is too large Load Diff

View File

@ -15,30 +15,17 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>. # along with this program. If not, see <https://www.gnu.org/licenses/>.
''' '''
Graphics downsampling using the infamous M4 algorithm. Graphics related downsampling routines for compressing to pixel
limits on the display device.
This is one of ``piker``'s secret weapons allowing us to boss all other
charting platforms B)
(AND DON'T YOU DARE TAKE THIS CODE WITHOUT CREDIT OR WE'LL SUE UR F#&@* ASS).
NOTES: this method is a so called "visualization driven data
aggregation" approach. It gives error-free line chart
downsampling, see
further scientific paper resources:
- http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
- http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf
Details on implementation of this algo are based in,
https://github.com/pikers/piker/issues/109
''' '''
import math import math
from typing import Optional from typing import Optional
import numpy as np import numpy as np
from numpy.lib import recfunctions as rfn
from numba import ( from numba import (
njit, jit,
# float64, optional, int64, # float64, optional, int64,
) )
@ -48,6 +35,109 @@ from ..log import get_logger
log = get_logger(__name__) log = get_logger(__name__)
def hl2mxmn(ohlc: np.ndarray) -> np.ndarray:
'''
Convert a OHLC struct-array containing 'high'/'low' columns
to a "joined" max/min 1-d array.
'''
index = ohlc['index']
hls = ohlc[[
'low',
'high',
]]
mxmn = np.empty(2*hls.size, dtype=np.float64)
x = np.empty(2*hls.size, dtype=np.float64)
trace_hl(hls, mxmn, x, index[0])
x = x + index[0]
return mxmn, x
@jit(
# TODO: the type annots..
# float64[:](float64[:],),
nopython=True,
)
def trace_hl(
hl: 'np.ndarray',
out: np.ndarray,
x: np.ndarray,
start: int,
# the "offset" values in the x-domain which
# place the 2 output points around each ``int``
# master index.
margin: float = 0.43,
) -> None:
'''
"Trace" the outline of the high-low values of an ohlc sequence
as a line such that the maximum deviation (aka disperaion) between
bars if preserved.
This routine is expected to modify input arrays in-place.
'''
last_l = hl['low'][0]
last_h = hl['high'][0]
for i in range(hl.size):
row = hl[i]
l, h = row['low'], row['high']
up_diff = h - last_l
down_diff = last_h - l
if up_diff > down_diff:
out[2*i + 1] = h
out[2*i] = last_l
else:
out[2*i + 1] = l
out[2*i] = last_h
last_l = l
last_h = h
x[2*i] = int(i) - margin
x[2*i + 1] = int(i) + margin
return out
def ohlc_flatten(
ohlc: np.ndarray,
use_mxmn: bool = True,
) -> tuple[np.ndarray, np.ndarray]:
'''
Convert an OHLCV struct-array into a flat ready-for-line-plotting
1-d array that is 4 times the size with x-domain values distributed
evenly (by 0.5 steps) over each index.
'''
index = ohlc['index']
if use_mxmn:
# traces a line optimally over highs to lows
# using numba. NOTE: pretty sure this is faster
# and looks about the same as the below output.
flat, x = hl2mxmn(ohlc)
else:
flat = rfn.structured_to_unstructured(
ohlc[['open', 'high', 'low', 'close']]
).flatten()
x = np.linspace(
start=index[0] - 0.5,
stop=index[-1] + 0.5,
num=len(flat),
)
return x, flat
def ds_m4( def ds_m4(
x: np.ndarray, x: np.ndarray,
y: np.ndarray, y: np.ndarray,
@ -70,6 +160,16 @@ def ds_m4(
This is more or less an OHLC style sampling of a line-style series. This is more or less an OHLC style sampling of a line-style series.
''' '''
# NOTE: this method is a so called "visualization driven data
# aggregation" approach. It gives error-free line chart
# downsampling, see
# further scientific paper resources:
# - http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
# - http://www.vldb.org/2014/program/papers/demo/p997-jugel.pdf
# Details on implementation of this algo are based in,
# https://github.com/pikers/piker/issues/109
# XXX: from infinite on downsampling viewable graphics: # XXX: from infinite on downsampling viewable graphics:
# "one thing i remembered about the binning - if you are # "one thing i remembered about the binning - if you are
# picking a range within your timeseries the start and end bin # picking a range within your timeseries the start and end bin
@ -91,14 +191,6 @@ def ds_m4(
x_end = x[-1] # x end value/highest in domain x_end = x[-1] # x end value/highest in domain
xrange = (x_end - x_start) xrange = (x_end - x_start)
if xrange < 0:
log.error(f'-VE M4 X-RANGE: {x_start} -> {x_end}')
# XXX: broken x-range calc-case, likely the x-end points
# are wrong and have some default value set (such as
# x_end -> <some epoch float> while x_start -> 0.5).
# breakpoint()
return None
# XXX: always round up on the input pixels # XXX: always round up on the input pixels
# lnx = len(x) # lnx = len(x)
# uppx *= max(4 / (1 + math.log(uppx, 2)), 1) # uppx *= max(4 / (1 + math.log(uppx, 2)), 1)
@ -131,20 +223,14 @@ def ds_m4(
assert frames >= (xrange / uppx) assert frames >= (xrange / uppx)
# call into ``numba`` # call into ``numba``
( nb, i_win, y_out = _m4(
nb,
x_out,
y_out,
ymn,
ymx,
) = _m4(
x, x,
y, y,
frames, frames,
# TODO: see func below.. # TODO: see func below..
# x_out, # i_win,
# y_out, # y_out,
# first index in x data to start at # first index in x data to start at
@ -157,14 +243,14 @@ def ds_m4(
# filter out any overshoot in the input allocation arrays by # filter out any overshoot in the input allocation arrays by
# removing zero-ed tail entries which should start at a certain # removing zero-ed tail entries which should start at a certain
# index. # index.
x_out = x_out[x_out != 0] i_win = i_win[i_win != 0]
y_out = y_out[:x_out.size] y_out = y_out[:i_win.size]
# print(f'M4 output ymn, ymx: {ymn},{ymx}') return nb, i_win, y_out
return nb, x_out, y_out, ymn, ymx
@njit( @jit(
nopython=True,
nogil=True, nogil=True,
) )
def _m4( def _m4(
@ -174,8 +260,8 @@ def _m4(
frames: int, frames: int,
# TODO: using this approach, having the ``.zeros()`` alloc lines # TODO: using this approach by having the ``.zeros()`` alloc lines
# below in pure python, there were segs faults and alloc crashes.. # below, in put python was causing segs faults and alloc crashes..
# we might need to see how it behaves with shm arrays and consider # we might need to see how it behaves with shm arrays and consider
# allocating them once at startup? # allocating them once at startup?
@ -188,22 +274,14 @@ def _m4(
x_start: int, x_start: int,
step: float, step: float,
) -> tuple[ ) -> int:
int, # nbins = len(i_win)
np.ndarray, # count = len(xs)
np.ndarray,
float,
float,
]:
'''
Implementation of the m4 algorithm in ``numba``:
http://www.vldb.org/pvldb/vol7/p797-jugel.pdf
'''
# these are pre-allocated and mutated by ``numba`` # these are pre-allocated and mutated by ``numba``
# code in-place. # code in-place.
y_out = np.zeros((frames, 4), ys.dtype) y_out = np.zeros((frames, 4), ys.dtype)
x_out = np.zeros(frames, xs.dtype) i_win = np.zeros(frames, xs.dtype)
bincount = 0 bincount = 0
x_left = x_start x_left = x_start
@ -217,34 +295,24 @@ def _m4(
# set all bins in the left-most entry to the starting left-most x value # set all bins in the left-most entry to the starting left-most x value
# (aka a row broadcast). # (aka a row broadcast).
x_out[bincount] = x_left i_win[bincount] = x_left
# set all y-values to the first value passed in. # set all y-values to the first value passed in.
y_out[bincount] = ys[0] y_out[bincount] = ys[0]
# full input y-data mx and mn
mx: float = -np.inf
mn: float = np.inf
# compute OHLC style max / min values per window sized x-frame.
for i in range(len(xs)): for i in range(len(xs)):
x = xs[i] x = xs[i]
y = ys[i] y = ys[i]
if x < x_left + step: # the current window "step" is [bin, bin+1) if x < x_left + step: # the current window "step" is [bin, bin+1)
ymn = y_out[bincount, 1] = min(y, y_out[bincount, 1]) y_out[bincount, 1] = min(y, y_out[bincount, 1])
ymx = y_out[bincount, 2] = max(y, y_out[bincount, 2]) y_out[bincount, 2] = max(y, y_out[bincount, 2])
y_out[bincount, 3] = y y_out[bincount, 3] = y
mx = max(mx, ymx)
mn = min(mn, ymn)
else: else:
# Find the next bin # Find the next bin
while x >= x_left + step: while x >= x_left + step:
x_left += step x_left += step
bincount += 1 bincount += 1
x_out[bincount] = x_left i_win[bincount] = x_left
y_out[bincount] = y y_out[bincount] = y
return bincount, x_out, y_out, mn, mx return bincount, i_win, y_out

View File

@ -18,13 +18,8 @@
Mouse interaction graphics Mouse interaction graphics
""" """
from __future__ import annotations
from functools import partial from functools import partial
from typing import ( from typing import Optional, Callable
Optional,
Callable,
TYPE_CHECKING,
)
import inspect import inspect
import numpy as np import numpy as np
@ -41,12 +36,6 @@ from ._style import (
from ._axes import YAxisLabel, XAxisLabel from ._axes import YAxisLabel, XAxisLabel
from ..log import get_logger from ..log import get_logger
if TYPE_CHECKING:
from ._chart import (
ChartPlotWidget,
LinkedSplits,
)
log = get_logger(__name__) log = get_logger(__name__)
@ -69,9 +58,9 @@ class LineDot(pg.CurvePoint):
curve: pg.PlotCurveItem, curve: pg.PlotCurveItem,
index: int, index: int,
plot: ChartPlotWidget, # type: ingore # noqa plot: 'ChartPlotWidget', # type: ingore # noqa
pos=None, pos=None,
color: str = 'bracket', color: str = 'default_light',
) -> None: ) -> None:
# scale from dpi aware font size # scale from dpi aware font size
@ -162,7 +151,7 @@ class ContentsLabel(pg.LabelItem):
def __init__( def __init__(
self, self,
# chart: ChartPlotWidget, # noqa # chart: 'ChartPlotWidget', # noqa
view: pg.ViewBox, view: pg.ViewBox,
anchor_at: str = ('top', 'right'), anchor_at: str = ('top', 'right'),
@ -198,11 +187,12 @@ class ContentsLabel(pg.LabelItem):
self, self,
name: str, name: str,
ix: int, index: int,
array: np.ndarray, array: np.ndarray,
) -> None: ) -> None:
# this being "html" is the dumbest shit :eyeroll: # this being "html" is the dumbest shit :eyeroll:
first = array[0]['index']
self.setText( self.setText(
"<b>i</b>:{index}<br/>" "<b>i</b>:{index}<br/>"
@ -215,7 +205,7 @@ class ContentsLabel(pg.LabelItem):
"<b>C</b>:{}<br/>" "<b>C</b>:{}<br/>"
"<b>V</b>:{}<br/>" "<b>V</b>:{}<br/>"
"<b>wap</b>:{}".format( "<b>wap</b>:{}".format(
*array[ix][ *array[index - first][
[ [
'time', 'time',
'open', 'open',
@ -227,7 +217,7 @@ class ContentsLabel(pg.LabelItem):
] ]
], ],
name=name, name=name,
index=ix, index=index,
) )
) )
@ -235,12 +225,15 @@ class ContentsLabel(pg.LabelItem):
self, self,
name: str, name: str,
ix: int, index: int,
array: np.ndarray, array: np.ndarray,
) -> None: ) -> None:
data = array[ix][name]
self.setText(f"{name}: {data:.2f}") first = array[0]['index']
if index < array[-1]['index'] and index > first:
data = array[index - first][name]
self.setText(f"{name}: {data:.2f}")
class ContentsLabels: class ContentsLabels:
@ -251,7 +244,7 @@ class ContentsLabels:
''' '''
def __init__( def __init__(
self, self,
linkedsplits: LinkedSplits, # type: ignore # noqa linkedsplits: 'LinkedSplits', # type: ignore # noqa
) -> None: ) -> None:
@ -265,20 +258,17 @@ class ContentsLabels:
def update_labels( def update_labels(
self, self,
x_in: int, index: int,
) -> None: ) -> None:
for chart, name, label, update in self._labels: for chart, name, label, update in self._labels:
viz = chart.get_viz(name) flow = chart._flows[name]
array = viz.shm.array array = flow.shm.array
index = array[viz.index_field]
start = index[0]
stop = index[-1]
if not ( if not (
x_in >= start index >= 0
and x_in <= stop and index < array[-1]['index']
): ):
# out of range # out of range
print('WTF out of range?') print('WTF out of range?')
@ -287,10 +277,7 @@ class ContentsLabels:
# call provided update func with data point # call provided update func with data point
try: try:
label.show() label.show()
ix = np.searchsorted(index, x_in) update(index, array)
if ix > len(array):
breakpoint()
update(ix, array)
except IndexError: except IndexError:
log.exception(f"Failed to update label: {name}") log.exception(f"Failed to update label: {name}")
@ -302,7 +289,7 @@ class ContentsLabels:
def add_label( def add_label(
self, self,
chart: ChartPlotWidget, # type: ignore # noqa chart: 'ChartPlotWidget', # type: ignore # noqa
name: str, name: str,
anchor_at: tuple[str, str] = ('top', 'left'), anchor_at: tuple[str, str] = ('top', 'left'),
update_func: Callable = ContentsLabel.update_from_value, update_func: Callable = ContentsLabel.update_from_value,
@ -329,7 +316,7 @@ class Cursor(pg.GraphicsObject):
def __init__( def __init__(
self, self,
linkedsplits: LinkedSplits, # noqa linkedsplits: 'LinkedSplits', # noqa
digits: int = 0 digits: int = 0
) -> None: ) -> None:
@ -338,8 +325,6 @@ class Cursor(pg.GraphicsObject):
self.linked = linkedsplits self.linked = linkedsplits
self.graphics: dict[str, pg.GraphicsObject] = {} self.graphics: dict[str, pg.GraphicsObject] = {}
self.xaxis_label: Optional[XAxisLabel] = None
self.always_show_xlabel: bool = True
self.plots: list['PlotChartWidget'] = [] # type: ignore # noqa self.plots: list['PlotChartWidget'] = [] # type: ignore # noqa
self.active_plot = None self.active_plot = None
self.digits: int = digits self.digits: int = digits
@ -351,7 +336,7 @@ class Cursor(pg.GraphicsObject):
# XXX: not sure why these are instance variables? # XXX: not sure why these are instance variables?
# It's not like we can change them on the fly..? # It's not like we can change them on the fly..?
self.pen = pg.mkPen( self.pen = pg.mkPen(
color=hcolor('bracket'), color=hcolor('default'),
style=QtCore.Qt.DashLine, style=QtCore.Qt.DashLine,
) )
self.lines_pen = pg.mkPen( self.lines_pen = pg.mkPen(
@ -367,7 +352,7 @@ class Cursor(pg.GraphicsObject):
self._lw = self.pixelWidth() * self.lines_pen.width() self._lw = self.pixelWidth() * self.lines_pen.width()
# xhair label's color name # xhair label's color name
self.label_color: str = 'bracket' self.label_color: str = 'default'
self._y_label_update: bool = True self._y_label_update: bool = True
@ -400,7 +385,7 @@ class Cursor(pg.GraphicsObject):
def add_plot( def add_plot(
self, self,
plot: ChartPlotWidget, # noqa plot: 'ChartPlotWidget', # noqa
digits: int = 0, digits: int = 0,
) -> None: ) -> None:
@ -420,7 +405,7 @@ class Cursor(pg.GraphicsObject):
hl.hide() hl.hide()
yl = YAxisLabel( yl = YAxisLabel(
pi=plot.plotItem, chart=plot,
# parent=plot.getAxis('right'), # parent=plot.getAxis('right'),
parent=plot.pi_overlay.get_axis(plot.plotItem, 'right'), parent=plot.pi_overlay.get_axis(plot.plotItem, 'right'),
digits=digits or self.digits, digits=digits or self.digits,
@ -484,58 +469,39 @@ class Cursor(pg.GraphicsObject):
def add_curve_cursor( def add_curve_cursor(
self, self,
chart: ChartPlotWidget, # noqa plot: 'ChartPlotWidget', # noqa
curve: 'PlotCurveItem', # noqa curve: 'PlotCurveItem', # noqa
) -> LineDot: ) -> LineDot:
# if this chart contains curves add line dot "cursors" to denote # if this plot contains curves add line dot "cursors" to denote
# the current sample under the mouse # the current sample under the mouse
main_viz = chart.get_viz(chart.name) main_flow = plot._flows[plot.name]
# read out last index # read out last index
i = main_viz.shm.array[-1]['index'] i = main_flow.shm.array[-1]['index']
cursor = LineDot( cursor = LineDot(
curve, curve,
index=i, index=i,
plot=chart plot=plot
) )
chart.addItem(cursor) plot.addItem(cursor)
self.graphics[chart].setdefault('cursors', []).append(cursor) self.graphics[plot].setdefault('cursors', []).append(cursor)
return cursor return cursor
def mouseAction( def mouseAction(self, action, plot): # noqa
self,
action: str,
plot: ChartPlotWidget,
) -> None: # noqa
log.debug(f"{(action, plot.name)}") log.debug(f"{(action, plot.name)}")
if action == 'Enter': if action == 'Enter':
self.active_plot = plot self.active_plot = plot
plot.linked.godwidget._active_cursor = self
# show horiz line and y-label # show horiz line and y-label
self.graphics[plot]['hl'].show() self.graphics[plot]['hl'].show()
self.graphics[plot]['yl'].show() self.graphics[plot]['yl'].show()
if ( else: # Leave
not self.always_show_xlabel
and not self.xaxis_label.isVisible()
):
self.xaxis_label.show()
# Leave: hide horiz line and y-label # hide horiz line and y-label
else:
self.graphics[plot]['hl'].hide() self.graphics[plot]['hl'].hide()
self.graphics[plot]['yl'].hide() self.graphics[plot]['yl'].hide()
if (
not self.always_show_xlabel
and self.xaxis_label.isVisible()
):
self.xaxis_label.hide()
def mouseMoved( def mouseMoved(
self, self,
coords: tuple[QPointF], # noqa coords: tuple[QPointF], # noqa
@ -624,17 +590,13 @@ class Cursor(pg.GraphicsObject):
left_axis_width += left.width() left_axis_width += left.width()
# map back to abs (label-local) coordinates # map back to abs (label-local) coordinates
if ( self.xaxis_label.update_label(
self.always_show_xlabel abs_pos=(
or self.xaxis_label.isVisible() plot.mapFromView(QPointF(vl_x, iy)) -
): QPointF(left_axis_width, 0)
self.xaxis_label.update_label( ),
abs_pos=( value=ix,
plot.mapFromView(QPointF(vl_x, iy)) - )
QPointF(left_axis_width, 0)
),
value=ix,
)
self._datum_xy = ix, iy self._datum_xy = ix, iy

View File

@ -28,7 +28,10 @@ from PyQt5.QtWidgets import QGraphicsItem
from PyQt5.QtCore import ( from PyQt5.QtCore import (
Qt, Qt,
QLineF, QLineF,
QSizeF,
QRectF, QRectF,
# QRect,
QPointF,
) )
from PyQt5.QtGui import ( from PyQt5.QtGui import (
QPainter, QPainter,
@ -36,8 +39,11 @@ from PyQt5.QtGui import (
) )
from .._profile import pg_profile_enabled, ms_slower_then from .._profile import pg_profile_enabled, ms_slower_then
from ._style import hcolor from ._style import hcolor
# from ._compression import (
# # ohlc_to_m4_line,
# ds_m4,
# )
from ..log import get_logger from ..log import get_logger
from .._profile import Profiler
log = get_logger(__name__) log = get_logger(__name__)
@ -51,117 +57,7 @@ _line_styles: dict[str, int] = {
} }
class FlowGraphic(pg.GraphicsObject): class Curve(pg.GraphicsObject):
'''
Base class with minimal interface for `QPainterPath` implemented,
real-time updated "data flow" graphics.
See subtypes below.
'''
# sub-type customization methods
declare_paintables: Callable | None = None
sub_paint: Callable | None = None
# XXX-NOTE-XXX: graphics caching B)
# see explanation for different caching modes:
# https://stackoverflow.com/a/39410081
cache_mode: int = QGraphicsItem.DeviceCoordinateCache
# XXX: WARNING item caching seems to only be useful
# if we don't re-generate the entire QPainterPath every time
# don't ever use this - it's a colossal nightmare of artefacts
# and is disastrous for performance.
# QGraphicsItem.ItemCoordinateCache
# TODO: still questions todo with coord-cacheing that we should
# probably talk to a core dev about:
# - if this makes trasform interactions slower (such as zooming)
# and if so maybe if/when we implement a "history" mode for the
# view we disable this in that mode?
def __init__(
self,
*args,
name: str | None = None,
# line styling
color: str = 'bracket',
last_step_color: str | None = None,
fill_color: Optional[str] = None,
style: str = 'solid',
**kwargs
) -> None:
self._name = name
# primary graphics item used for history
self.path: QPainterPath = QPainterPath()
# additional path that can be optionally used for appends which
# tries to avoid triggering an update/redraw of the presumably
# larger historical ``.path`` above. the flag to enable
# this behaviour is found in `Renderer.render()`.
self.fast_path: QPainterPath | None = None
# TODO: evaluating the path capacity stuff and see
# if it really makes much diff pre-allocating it.
# self._last_cap: int = 0
# cap = path.capacity()
# if cap != self._last_cap:
# print(f'NEW CAPACITY: {self._last_cap} -> {cap}')
# self._last_cap = cap
# all history of curve is drawn in single px thickness
self._color: str = color
pen = pg.mkPen(hcolor(color), width=1)
pen.setStyle(_line_styles[style])
if 'dash' in style:
pen.setDashPattern([8, 3])
self._pen = pen
self._brush = pg.functions.mkBrush(
hcolor(fill_color or color)
)
# last segment is drawn in 2px thickness for emphasis
if last_step_color:
self.last_step_pen = pg.mkPen(
hcolor(last_step_color),
width=2,
)
else:
self.last_step_pen = pg.mkPen(
self._pen,
width=2,
)
self._last_line: QLineF = QLineF()
super().__init__(*args, **kwargs)
# apply cache mode
self.setCacheMode(self.cache_mode)
def x_uppx(self) -> int:
px_vecs = self.pixelVectors()[0]
if px_vecs:
return px_vecs.x()
else:
return 0
def x_last(self) -> float | None:
'''
Return the last most x value of the last line segment or if not
drawn yet, ``None``.
'''
return self._last_line.x1() if self._last_line else None
class Curve(FlowGraphic):
''' '''
A faster, simpler, append friendly version of A faster, simpler, append friendly version of
``pyqtgraph.PlotCurveItem`` built for highly customizable real-time ``pyqtgraph.PlotCurveItem`` built for highly customizable real-time
@ -178,7 +74,7 @@ class Curve(FlowGraphic):
lower level graphics data can be rendered in different threads and lower level graphics data can be rendered in different threads and
then read and drawn in this main thread without having to worry then read and drawn in this main thread without having to worry
about dealing with Qt's concurrency primitives. See about dealing with Qt's concurrency primitives. See
``piker.ui._render.Renderer`` for details and logic related to lower ``piker.ui._flows.Renderer`` for details and logic related to lower
level path generation and incremental update. The main differences in level path generation and incremental update. The main differences in
the path generation code include: the path generation code include:
@ -190,38 +86,127 @@ class Curve(FlowGraphic):
updates don't trigger a full path redraw. updates don't trigger a full path redraw.
''' '''
# TODO: can we remove this?
# sub_br: Optional[Callable] = None # sub-type customization methods
sub_br: Optional[Callable] = None
sub_paint: Optional[Callable] = None
declare_paintables: Optional[Callable] = None
def __init__( def __init__(
self, self,
*args, *args,
# color: str = 'default_lightest', step_mode: bool = False,
# fill_color: Optional[str] = None, color: str = 'default_lightest',
# style: str = 'solid', fill_color: Optional[str] = None,
style: str = 'solid',
name: Optional[str] = None,
use_fpath: bool = True,
**kwargs **kwargs
) -> None: ) -> None:
self._name = name
# brutaaalll, see comments within.. # brutaaalll, see comments within..
self.yData = None self.yData = None
self.xData = None self.xData = None
# self._last_cap: int = 0
self.path: Optional[QPainterPath] = None
# additional path used for appends which tries to avoid
# triggering an update/redraw of the presumably larger
# historical ``.path`` above.
self.use_fpath = use_fpath
self.fast_path: Optional[QPainterPath] = None
# TODO: we can probably just dispense with the parent since # TODO: we can probably just dispense with the parent since
# we're basically only using the pen setting now... # we're basically only using the pen setting now...
super().__init__(*args, **kwargs) super().__init__(*args, **kwargs)
self._last_line: QLineF = QLineF() # all history of curve is drawn in single px thickness
pen = pg.mkPen(hcolor(color))
pen.setStyle(_line_styles[style])
if 'dash' in style:
pen.setDashPattern([8, 3])
self._pen = pen
# last segment is drawn in 2px thickness for emphasis
# self.last_step_pen = pg.mkPen(hcolor(color), width=2)
self.last_step_pen = pg.mkPen(pen, width=2)
# self._last_line: Optional[QLineF] = None
self._last_line = QLineF()
self._last_w: float = 1
# flat-top style histogram-like discrete curve
# self._step_mode: bool = step_mode
# self._fill = True # self._fill = True
self._brush = pg.functions.mkBrush(hcolor(fill_color or color))
# NOTE: this setting seems to mostly prevent redraws on mouse
# interaction which is a huge boon for avg interaction latency.
# TODO: one question still remaining is if this makes trasform
# interactions slower (such as zooming) and if so maybe if/when
# we implement a "history" mode for the view we disable this in
# that mode?
# don't enable caching by default for the case where the
# only thing drawn is the "last" line segment which can
# have a weird artifact where it won't be fully drawn to its
# endpoint (something we saw on trade rate curves)
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
# XXX: see explanation for different caching modes:
# https://stackoverflow.com/a/39410081
# seems to only be useful if we don't re-generate the entire
# QPainterPath every time
# curve.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
# don't ever use this - it's a colossal nightmare of artefacts
# and is disastrous for performance.
# curve.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache)
# allow sub-type customization # allow sub-type customization
declare = self.declare_paintables declare = self.declare_paintables
if declare: if declare:
declare() declare()
# TODO: probably stick this in a new parent
# type which will contain our own version of
# what ``PlotCurveItem`` had in terms of base
# functionality? A `FlowGraphic` maybe?
def x_uppx(self) -> int:
px_vecs = self.pixelVectors()[0]
if px_vecs:
xs_in_px = px_vecs.x()
return round(xs_in_px)
else:
return 0
def px_width(self) -> float:
vb = self.getViewBox()
if not vb:
return 0
vr = self.viewRect()
l, r = int(vr.left()), int(vr.right())
start, stop = self._xrange
lbar = max(l, start)
rbar = min(r, stop)
return vb.mapViewToDevice(
QLineF(lbar, 0, rbar, 0)
).length()
# XXX: lol brutal, the internals of `CurvePoint` (inherited by # XXX: lol brutal, the internals of `CurvePoint` (inherited by
# our `LineDot`) required ``.getData()`` to work.. # our `LineDot`) required ``.getData()`` to work..
def getData(self): def getData(self):
@ -245,8 +230,8 @@ class Curve(FlowGraphic):
self.path.clear() self.path.clear()
if self.fast_path: if self.fast_path:
self.fast_path.clear() # self.fast_path.clear()
# self.fast_path = None self.fast_path = None
@cm @cm
def reset_cache(self) -> None: def reset_cache(self) -> None:
@ -266,65 +251,77 @@ class Curve(FlowGraphic):
self.boundingRect = self._path_br self.boundingRect = self._path_br
return self._path_br() return self._path_br()
# Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect
def _path_br(self): def _path_br(self):
''' '''
Post init ``.boundingRect()```. Post init ``.boundingRect()```.
''' '''
# profiler = Profiler( # hb = self.path.boundingRect()
# msg=f'Curve.boundingRect(): `{self._name}`', hb = self.path.controlPointRect()
# disabled=not pg_profile_enabled(), hb_size = hb.size()
# ms_threshold=ms_slower_then,
fp = self.fast_path
if fp:
fhb = fp.controlPointRect()
hb_size = fhb.size() + hb_size
# print(f'hb_size: {hb_size}')
# if self._last_step_rect:
# hb_size += self._last_step_rect.size()
# if self._line:
# br = self._last_step_rect.bottomRight()
# tl = QPointF(
# # self._vr[0],
# # hb.topLeft().y(),
# # 0,
# # hb_size.height() + 1
# ) # )
pr = self.path.controlPointRect()
hb_tl, hb_br = ( # br = self._last_step_rect.bottomRight()
pr.topLeft(),
pr.bottomRight(), w = hb_size.width()
) h = hb_size.height()
mn_y = hb_tl.y()
mx_y = hb_br.y() sbr = self.sub_br
most_left = hb_tl.x() if sbr:
most_right = hb_br.x() w, h = self.sub_br(w, h)
# profiler('calc path vertices') else:
# assume plain line graphic and use
# TODO: if/when we get fast path appends working in the # default unit step in each direction.
# `Renderer`, then we might need to actually use this..
# fp = self.fast_path # only on a plane line do we include
# if fp: # and extra index step's worth of width
# fhb = fp.controlPointRect() # since in the step case the end of the curve
# # hb_size = fhb.size() + hb_size # actually terminates earlier so we don't need
# br = pr.united(fhb) # this for the last step.
w += self._last_w
# XXX: *was* a way to allow sub-types to extend the # ll = self._last_line
# boundingrect calc, but in the one use case for a step curve h += 1 # ll.y2() - ll.y1()
# doesn't seem like we need it as long as the last line segment
# is drawn as it is? # br = QPointF(
# self._vr[-1],
# sbr = self.sub_br # # tl.x() + w,
# if sbr: # tl.y() + h,
# # w, h = self.sub_br(w, h) # )
# sub_br = sbr()
# br = br.united(sub_br) br = QRectF(
# assume plain line graphic and use # top left
# default unit step in each direction. # hb.topLeft()
ll = self._last_line # tl,
y1, y2 = ll.y1(), ll.y2() QPointF(hb.topLeft()),
x1, x2 = ll.x1(), ll.x2()
# br,
ymn = min(y1, y2, mn_y) # total size
ymx = max(y1, y2, mx_y) # QSizeF(hb_size)
most_left = min(x1, x2, most_left) # hb_size,
most_right = max(x1, x2, most_right) QSizeF(w, h)
# profiler('calc last line vertices')
return QRectF(
most_left,
ymn,
most_right - most_left + 1,
ymx,
) )
# print(f'bounding rect: {br}')
return br
def paint( def paint(
self, self,
@ -334,7 +331,7 @@ class Curve(FlowGraphic):
) -> None: ) -> None:
profiler = Profiler( profiler = pg.debug.Profiler(
msg=f'Curve.paint(): `{self._name}`', msg=f'Curve.paint(): `{self._name}`',
disabled=not pg_profile_enabled(), disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then, ms_threshold=ms_slower_then,
@ -342,14 +339,18 @@ class Curve(FlowGraphic):
sub_paint = self.sub_paint sub_paint = self.sub_paint
if sub_paint: if sub_paint:
sub_paint(p) sub_paint(p, profiler)
p.setPen(self.last_step_pen) p.setPen(self.last_step_pen)
p.drawLine(self._last_line) p.drawLine(self._last_line)
profiler('last datum `.drawLine()`') profiler('.drawLine()')
p.setPen(self._pen) p.setPen(self._pen)
path = self.path path = self.path
# cap = path.capacity()
# if cap != self._last_cap:
# print(f'NEW CAPACITY: {self._last_cap} -> {cap}')
# self._last_cap = cap
if path: if path:
p.drawPath(path) p.drawPath(path)
@ -372,30 +373,22 @@ class Curve(FlowGraphic):
self, self,
path: QPainterPath, path: QPainterPath,
src_data: np.ndarray, src_data: np.ndarray,
render_data: np.ndarray,
reset: bool, reset: bool,
array_key: str, array_key: str,
index_field: str,
) -> None: ) -> None:
# default line draw last call # default line draw last call
# with self.reset_cache(): # with self.reset_cache():
x = src_data[index_field] x = render_data['index']
y = src_data[array_key] y = render_data[array_key]
x_last = x[-1]
x_2last = x[-2]
# draw the "current" step graphic segment so it # draw the "current" step graphic segment so it
# lines up with the "middle" of the current # lines up with the "middle" of the current
# (OHLC) sample. # (OHLC) sample.
self._last_line = QLineF( self._last_line = QLineF(
x[-2], y[-2],
# NOTE: currently we draw in x-domain x[-1], y[-1],
# from last datum to current such that
# the end of line touches the "beginning"
# of the current datum step span.
x_2last, y[-2],
x_last, y[-1],
) )
return x, y return x, y
@ -407,20 +400,17 @@ class Curve(FlowGraphic):
# (via it's max / min) even when highly zoomed out. # (via it's max / min) even when highly zoomed out.
class FlattenedOHLC(Curve): class FlattenedOHLC(Curve):
# avoids strange dragging/smearing artifacts when panning..
cache_mode: int = QGraphicsItem.NoCache
def draw_last_datum( def draw_last_datum(
self, self,
path: QPainterPath, path: QPainterPath,
src_data: np.ndarray, src_data: np.ndarray,
render_data: np.ndarray,
reset: bool, reset: bool,
array_key: str, array_key: str,
index_field: str,
) -> None: ) -> None:
lasts = src_data[-2:] lasts = src_data[-2:]
x = lasts[index_field] x = lasts['index']
y = lasts['close'] y = lasts['close']
# draw the "current" step graphic segment so it # draw the "current" step graphic segment so it
@ -444,9 +434,9 @@ class StepCurve(Curve):
self, self,
path: QPainterPath, path: QPainterPath,
src_data: np.ndarray, src_data: np.ndarray,
render_data: np.ndarray,
reset: bool, reset: bool,
array_key: str, array_key: str,
index_field: str,
w: float = 0.5, w: float = 0.5,
@ -455,31 +445,40 @@ class StepCurve(Curve):
# TODO: remove this and instead place all step curve # TODO: remove this and instead place all step curve
# updating into pre-path data render callbacks. # updating into pre-path data render callbacks.
# full input data # full input data
x = src_data[index_field] x = src_data['index']
y = src_data[array_key] y = src_data[array_key]
x_last = x[-1] x_last = x[-1]
x_2last = x[-2]
y_last = y[-1] y_last = y[-1]
step_size = x_last - x_2last
# lol, commenting this makes step curves # lol, commenting this makes step curves
# all "black" for me :eyeroll:.. # all "black" for me :eyeroll:..
self._last_line = QLineF( self._last_line = QLineF(
x_2last, 0, x_last - w, 0,
x_last, 0, x_last + w, 0,
) )
self._last_step_rect = QRectF( self._last_step_rect = QRectF(
x_last, 0, x_last - w, 0,
step_size, y_last, x_last + w, y_last,
) )
return x, y return x, y
def sub_paint( def sub_paint(
self, self,
p: QPainter, p: QPainter,
profiler: pg.debug.Profiler,
) -> None: ) -> None:
# p.drawLines(*tuple(filter(bool, self._last_step_lines))) # p.drawLines(*tuple(filter(bool, self._last_step_lines)))
# p.drawRect(self._last_step_rect) # p.drawRect(self._last_step_rect)
p.fillRect(self._last_step_rect, self._brush) p.fillRect(self._last_step_rect, self._brush)
profiler('.fillRect()')
def sub_br(
self,
path_w: float,
path_h: float,
) -> (float, float):
# passthrough
return path_w, path_h

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -18,27 +18,11 @@
Higher level annotation editors. Higher level annotation editors.
""" """
from __future__ import annotations from dataclasses import dataclass, field
from collections import defaultdict from typing import Optional
from typing import (
Optional,
TYPE_CHECKING
)
import pyqtgraph as pg import pyqtgraph as pg
from pyqtgraph import ( from pyqtgraph import ViewBox, Point, QtCore, QtGui
ViewBox,
Point,
QtCore,
QtWidgets,
)
from PyQt5.QtGui import (
QColor,
)
from PyQt5.QtWidgets import (
QLabel,
)
from pyqtgraph import functions as fn from pyqtgraph import functions as fn
from PyQt5.QtCore import QPointF from PyQt5.QtCore import QPointF
import numpy as np import numpy as np
@ -46,34 +30,28 @@ import numpy as np
from ._style import hcolor, _font from ._style import hcolor, _font
from ._lines import LevelLine from ._lines import LevelLine
from ..log import get_logger from ..log import get_logger
from ..data.types import Struct
if TYPE_CHECKING:
from ._chart import GodWidget
log = get_logger(__name__) log = get_logger(__name__)
class ArrowEditor(Struct): @dataclass
class ArrowEditor:
godw: GodWidget = None # type: ignore # noqa chart: 'ChartPlotWidget' # noqa
_arrows: dict[str, list[pg.ArrowItem]] = {} _arrows: field(default_factory=dict)
def add( def add(
self, self,
plot: pg.PlotItem,
uid: str, uid: str,
x: float, x: float,
y: float, y: float,
color='default', color='default',
pointing: Optional[str] = None, pointing: Optional[str] = None,
) -> pg.ArrowItem: ) -> pg.ArrowItem:
''' """Add an arrow graphic to view at given (x, y).
Add an arrow graphic to view at given (x, y).
''' """
angle = { angle = {
'up': 90, 'up': 90,
'down': -90, 'down': -90,
@ -96,25 +74,25 @@ class ArrowEditor(Struct):
brush=pg.mkBrush(hcolor(color)), brush=pg.mkBrush(hcolor(color)),
) )
arrow.setPos(x, y) arrow.setPos(x, y)
self._arrows.setdefault(uid, []).append(arrow)
self._arrows[uid] = arrow
# render to view # render to view
plot.addItem(arrow) self.chart.plotItem.addItem(arrow)
return arrow return arrow
def remove(self, arrow) -> bool: def remove(self, arrow) -> bool:
for linked in self.godw.iter_linked(): self.chart.plotItem.removeItem(arrow)
linked.chart.plotItem.removeItem(arrow)
class LineEditor(Struct): @dataclass
''' class LineEditor:
The great editor of linez. '''The great editor of linez.
''' '''
godw: GodWidget = None # type: ignore # noqa chart: 'ChartPlotWidget' = None # type: ignore # noqa
_order_lines: defaultdict[str, LevelLine] = defaultdict(list) _order_lines: dict[str, LevelLine] = field(default_factory=dict)
_active_staged_line: LevelLine = None _active_staged_line: LevelLine = None
def stage_line( def stage_line(
@ -122,11 +100,11 @@ class LineEditor(Struct):
line: LevelLine, line: LevelLine,
) -> LevelLine: ) -> LevelLine:
''' """Stage a line at the current chart's cursor position
Stage a line at the current chart's cursor position
and return it. and return it.
''' """
# add a "staged" cursor-tracking line to view # add a "staged" cursor-tracking line to view
# and cash it in a a var # and cash it in a a var
if self._active_staged_line: if self._active_staged_line:
@ -137,25 +115,17 @@ class LineEditor(Struct):
return line return line
def unstage_line(self) -> LevelLine: def unstage_line(self) -> LevelLine:
''' """Inverse of ``.stage_line()``.
Inverse of ``.stage_line()``.
''' """
cursor = self.godw.get_cursor() # chart = self.chart._cursor.active_plot
if not cursor: # # chart.setCursor(QtCore.Qt.ArrowCursor)
return None cursor = self.chart.linked.cursor
# delete "staged" cursor tracking line from view # delete "staged" cursor tracking line from view
line = self._active_staged_line line = self._active_staged_line
if line: if line:
try: cursor._trackers.remove(line)
cursor._trackers.remove(line)
except KeyError:
# when the current cursor doesn't have said line
# registered (probably means that user held order mode
# key while panning to another view) then we just
# ignore the remove error.
pass
line.delete() line.delete()
self._active_staged_line = None self._active_staged_line = None
@ -163,58 +133,55 @@ class LineEditor(Struct):
# show the crosshair y line and label # show the crosshair y line and label
cursor.show_xhair() cursor.show_xhair()
def submit_lines( def submit_line(
self, self,
lines: list[LevelLine], line: LevelLine,
uuid: str, uuid: str,
) -> LevelLine: ) -> LevelLine:
# staged_line = self._active_staged_line staged_line = self._active_staged_line
# if not staged_line: if not staged_line:
# raise RuntimeError("No line is currently staged!?") raise RuntimeError("No line is currently staged!?")
# for now, until submission reponse arrives # for now, until submission reponse arrives
for line in lines: line.hide_labels()
line.hide_labels()
# register for later lookup/deletion # register for later lookup/deletion
self._order_lines[uuid] += lines self._order_lines[uuid] = line
return lines return line
def commit_line(self, uuid: str) -> list[LevelLine]: def commit_line(self, uuid: str) -> LevelLine:
''' """Commit a "staged line" to view.
Commit a "staged line" to view.
Submits the line graphic under the cursor as a (new) permanent Submits the line graphic under the cursor as a (new) permanent
graphic in view. graphic in view.
''' """
lines = self._order_lines[uuid] try:
if lines: line = self._order_lines[uuid]
for line in lines: except KeyError:
line.show_labels() log.warning(f'No line for {uuid} could be found?')
line.hide_markers() return
log.debug(f'Level active for level: {line.value()}') else:
# TODO: other flashy things to indicate the order is active line.show_labels()
return lines # TODO: other flashy things to indicate the order is active
log.debug(f'Level active for level: {line.value()}')
return line
def lines_under_cursor(self) -> list[LevelLine]: def lines_under_cursor(self) -> list[LevelLine]:
''' """Get the line(s) under the cursor position.
Get the line(s) under the cursor position.
''' """
# Delete any hoverable under the cursor # Delete any hoverable under the cursor
return self.godw.get_cursor()._hovered return self.chart.linked.cursor._hovered
def all_lines(self) -> list[LevelLine]: def all_lines(self) -> tuple[LevelLine]:
all_lines = [] return tuple(self._order_lines.values())
for lines in list(self._order_lines.values()):
all_lines.extend(lines)
return all_lines
def remove_line( def remove_line(
self, self,
@ -229,30 +196,29 @@ class LineEditor(Struct):
''' '''
# try to look up line from our registry # try to look up line from our registry
lines = self._order_lines.pop(uuid, None) line = self._order_lines.pop(uuid, line)
if lines: if line:
cursor = self.godw.get_cursor()
if cursor:
for line in lines:
# if hovered remove from cursor set
hovered = cursor._hovered
if line in hovered:
hovered.remove(line)
log.debug(f'deleting {line} with oid: {uuid}') # if hovered remove from cursor set
line.delete() cursor = self.chart.linked.cursor
hovered = cursor._hovered
if line in hovered:
hovered.remove(line)
# make sure the xhair doesn't get left off # make sure the xhair doesn't get left off
# just because we never got a un-hover event # just because we never got a un-hover event
cursor.show_xhair() cursor.show_xhair()
log.debug(f'deleting {line} with oid: {uuid}')
line.delete()
else: else:
log.warning(f'Could not find line for {line}') log.warning(f'Could not find line for {line}')
return lines return line
class SelectRect(QtWidgets.QGraphicsRectItem): class SelectRect(QtGui.QGraphicsRectItem):
def __init__( def __init__(
self, self,
@ -261,12 +227,12 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
) -> None: ) -> None:
super().__init__(0, 0, 1, 1) super().__init__(0, 0, 1, 1)
# self.rbScaleBox = QGraphicsRectItem(0, 0, 1, 1) # self.rbScaleBox = QtGui.QGraphicsRectItem(0, 0, 1, 1)
self.vb = viewbox self.vb = viewbox
self._chart: 'ChartPlotWidget' = None # noqa self._chart: 'ChartPlotWidget' = None # noqa
# override selection box color # override selection box color
color = QColor(hcolor(color)) color = QtGui.QColor(hcolor(color))
self.setPen(fn.mkPen(color, width=1)) self.setPen(fn.mkPen(color, width=1))
color.setAlpha(66) color.setAlpha(66)
self.setBrush(fn.mkBrush(color)) self.setBrush(fn.mkBrush(color))
@ -274,7 +240,7 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
self.hide() self.hide()
self._label = None self._label = None
label = self._label = QLabel() label = self._label = QtGui.QLabel()
label.setTextFormat(0) # markdown label.setTextFormat(0) # markdown
label.setFont(_font.font) label.setFont(_font.font)
label.setMargin(0) label.setMargin(0)
@ -311,8 +277,8 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
# TODO: get bg color working # TODO: get bg color working
palette.setColor( palette.setColor(
self._label.backgroundRole(), self._label.backgroundRole(),
# QColor(chart.backgroundBrush()), # QtGui.QColor(chart.backgroundBrush()),
QColor(hcolor('papas_special')), QtGui.QColor(hcolor('papas_special')),
) )
def update_on_resize(self, vr, r): def update_on_resize(self, vr, r):
@ -360,7 +326,7 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
self.setPos(r.topLeft()) self.setPos(r.topLeft())
self.resetTransform() self.resetTransform()
self.setRect(r) self.scale(r.width(), r.height())
self.show() self.show()
y1, y2 = start_pos.y(), end_pos.y() y1, y2 = start_pos.y(), end_pos.y()
@ -377,7 +343,7 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
nbars = ixmx - ixmn + 1 nbars = ixmx - ixmn + 1
chart = self._chart chart = self._chart
data = chart.get_viz(chart.name).shm.array[ixmn:ixmx] data = chart._flows[chart.name].shm.array[ixmn:ixmx]
if len(data): if len(data):
std = data['close'].std() std = data['close'].std()

View File

@ -18,11 +18,11 @@
Qt event proxying and processing using ``trio`` mem chans. Qt event proxying and processing using ``trio`` mem chans.
""" """
from contextlib import asynccontextmanager as acm from contextlib import asynccontextmanager, AsyncExitStack
from typing import Callable from typing import Callable
from pydantic import BaseModel
import trio import trio
from tractor.trionics import gather_contexts
from PyQt5 import QtCore from PyQt5 import QtCore
from PyQt5.QtCore import QEvent, pyqtBoundSignal from PyQt5.QtCore import QEvent, pyqtBoundSignal
from PyQt5.QtWidgets import QWidget from PyQt5.QtWidgets import QWidget
@ -30,8 +30,6 @@ from PyQt5.QtWidgets import (
QGraphicsSceneMouseEvent as gs_mouse, QGraphicsSceneMouseEvent as gs_mouse,
) )
from ..data.types import Struct
MOUSE_EVENTS = { MOUSE_EVENTS = {
gs_mouse.GraphicsSceneMousePress, gs_mouse.GraphicsSceneMousePress,
@ -45,10 +43,13 @@ MOUSE_EVENTS = {
# TODO: maybe consider some constrained ints down the road? # TODO: maybe consider some constrained ints down the road?
# https://pydantic-docs.helpmanual.io/usage/types/#constrained-types # https://pydantic-docs.helpmanual.io/usage/types/#constrained-types
class KeyboardMsg(Struct): class KeyboardMsg(BaseModel):
'''Unpacked Qt keyboard event data. '''Unpacked Qt keyboard event data.
''' '''
class Config:
arbitrary_types_allowed = True
event: QEvent event: QEvent
etype: int etype: int
key: int key: int
@ -56,13 +57,16 @@ class KeyboardMsg(Struct):
txt: str txt: str
def to_tuple(self) -> tuple: def to_tuple(self) -> tuple:
return tuple(self.to_dict().values()) return tuple(self.dict().values())
class MouseMsg(Struct): class MouseMsg(BaseModel):
'''Unpacked Qt keyboard event data. '''Unpacked Qt keyboard event data.
''' '''
class Config:
arbitrary_types_allowed = True
event: QEvent event: QEvent
etype: int etype: int
button: int button: int
@ -156,7 +160,7 @@ class EventRelay(QtCore.QObject):
return False return False
@acm @asynccontextmanager
async def open_event_stream( async def open_event_stream(
source_widget: QWidget, source_widget: QWidget,
@ -182,7 +186,7 @@ async def open_event_stream(
source_widget.removeEventFilter(kc) source_widget.removeEventFilter(kc)
@acm @asynccontextmanager
async def open_signal_handler( async def open_signal_handler(
signal: pyqtBoundSignal, signal: pyqtBoundSignal,
@ -207,7 +211,7 @@ async def open_signal_handler(
yield yield
@acm @asynccontextmanager
async def open_handlers( async def open_handlers(
source_widgets: list[QWidget], source_widgets: list[QWidget],
@ -216,14 +220,16 @@ async def open_handlers(
**kwargs, **kwargs,
) -> None: ) -> None:
async with ( async with (
trio.open_nursery() as n, trio.open_nursery() as n,
gather_contexts([ AsyncExitStack() as stack,
open_event_stream(widget, event_types, **kwargs)
for widget in source_widgets
]) as streams,
): ):
for widget, event_recv_stream in zip(source_widgets, streams): for widget in source_widgets:
event_recv_stream = await stack.enter_async_context(
open_event_stream(widget, event_types, **kwargs)
)
n.start_soon(async_handler, widget, event_recv_stream) n.start_soon(async_handler, widget, event_recv_stream)
yield yield

View File

@ -20,24 +20,16 @@ Trio - Qt integration
Run ``trio`` in guest mode on top of the Qt event loop. Run ``trio`` in guest mode on top of the Qt event loop.
All global Qt runtime settings are mostly defined here. All global Qt runtime settings are mostly defined here.
""" """
from __future__ import annotations from typing import Tuple, Callable, Dict, Any
from typing import (
Callable,
Any,
Type,
TYPE_CHECKING,
)
import platform import platform
import traceback import traceback
# Qt specific # Qt specific
import PyQt5 # noqa import PyQt5 # noqa
from PyQt5.QtWidgets import ( import pyqtgraph as pg
QWidget, from pyqtgraph import QtGui
QMainWindow,
QApplication,
)
from PyQt5 import QtCore from PyQt5 import QtCore
# from PyQt5.QtGui import QLabel, QStatusBar
from PyQt5.QtCore import ( from PyQt5.QtCore import (
pyqtRemoveInputHook, pyqtRemoveInputHook,
Qt, Qt,
@ -45,19 +37,15 @@ from PyQt5.QtCore import (
) )
import qdarkstyle import qdarkstyle
from qdarkstyle import DarkPalette from qdarkstyle import DarkPalette
# import qdarkgraystyle # TODO: play with it # import qdarkgraystyle
import trio import trio
from outcome import Error from outcome import Error
from .._daemon import ( from .._daemon import maybe_open_pikerd, _tractor_kwargs
maybe_open_pikerd,
get_tractor_runtime_kwargs,
)
from ..log import get_logger from ..log import get_logger
from ._pg_overrides import _do_overrides from ._pg_overrides import _do_overrides
from . import _style from . import _style
log = get_logger(__name__) log = get_logger(__name__)
# pyqtgraph global config # pyqtgraph global config
@ -84,18 +72,17 @@ if platform.system() == "Windows":
def run_qtractor( def run_qtractor(
func: Callable, func: Callable,
args: tuple, args: Tuple,
main_widget_type: Type[QWidget], main_widget: QtGui.QWidget,
tractor_kwargs: dict[str, Any] = {}, tractor_kwargs: Dict[str, Any] = {},
window_type: QMainWindow = None, window_type: QtGui.QMainWindow = None,
) -> None: ) -> None:
# avoids annoying message when entering debugger from qt loop # avoids annoying message when entering debugger from qt loop
pyqtRemoveInputHook() pyqtRemoveInputHook()
app = QApplication.instance() app = QtGui.QApplication.instance()
if app is None: if app is None:
app = QApplication([]) app = PyQt5.QtWidgets.QApplication([])
# TODO: we might not need this if it's desired # TODO: we might not need this if it's desired
# to cancel the tractor machinery on Qt loop # to cancel the tractor machinery on Qt loop
@ -169,11 +156,11 @@ def run_qtractor(
# hook into app focus change events # hook into app focus change events
app.focusChanged.connect(window.on_focus_change) app.focusChanged.connect(window.on_focus_change)
instance = main_widget_type() instance = main_widget()
instance.window = window instance.window = window
# override tractor's defaults # override tractor's defaults
tractor_kwargs.update(get_tractor_runtime_kwargs()) tractor_kwargs.update(_tractor_kwargs)
# define tractor entrypoint # define tractor entrypoint
async def main(): async def main():
@ -191,7 +178,7 @@ def run_qtractor(
# restrict_keyboard_interrupt_to_checkpoints=True, # restrict_keyboard_interrupt_to_checkpoints=True,
) )
window.godwidget: GodWidget = instance window.main_widget = main_widget
window.setCentralWidget(instance) window.setCentralWidget(instance)
if is_windows: if is_windows:
window.configure_to_desktop() window.configure_to_desktop()

1247
piker/ui/_flows.py 100644

File diff suppressed because it is too large Load Diff

View File

@ -619,7 +619,7 @@ class FillStatusBar(QProgressBar):
# color: #19232D; # color: #19232D;
# width: 10px; # width: 10px;
self.setRange(0, int(slots)) self.setRange(0, slots)
self.setValue(value) self.setValue(value)
@ -644,7 +644,7 @@ def mk_fill_status_bar(
# TODO: calc this height from the ``ChartnPane`` # TODO: calc this height from the ``ChartnPane``
chart_h = round(parent_pane.height() * 5/8) chart_h = round(parent_pane.height() * 5/8)
bar_h = chart_h * 0.375*0.9 bar_h = chart_h * 0.375
# TODO: once things are sized to screen # TODO: once things are sized to screen
bar_label_font_size = label_font_size or _font.px_size - 2 bar_label_font_size = label_font_size or _font.px_size - 2

View File

@ -27,13 +27,12 @@ from itertools import cycle
from typing import Optional, AsyncGenerator, Any from typing import Optional, AsyncGenerator, Any
import numpy as np import numpy as np
import msgspec from pydantic import create_model
import tractor import tractor
import pyqtgraph as pg import pyqtgraph as pg
import trio import trio
from trio_typing import TaskStatus from trio_typing import TaskStatus
from piker.data.types import Struct
from ._axes import PriceAxis from ._axes import PriceAxis
from .._cacheables import maybe_open_context from .._cacheables import maybe_open_context
from ..calc import humanize from ..calc import humanize
@ -42,8 +41,6 @@ from ..data._sharedmem import (
_Token, _Token,
try_read, try_read,
) )
from ..data.feed import Flume
from ..data._source import Symbol
from ._chart import ( from ._chart import (
ChartPlotWidget, ChartPlotWidget,
LinkedSplits, LinkedSplits,
@ -53,18 +50,14 @@ from ._forms import (
mk_form, mk_form,
open_form_input_handling, open_form_input_handling,
) )
from ..fsp._api import ( from ..fsp._api import maybe_mk_fsp_shm, Fsp
maybe_mk_fsp_shm,
Fsp,
)
from ..fsp import cascade from ..fsp import cascade
from ..fsp._volume import ( from ..fsp._volume import (
# tina_vwap, tina_vwap,
dolla_vlm, dolla_vlm,
flow_rates, flow_rates,
) )
from ..log import get_logger from ..log import get_logger
from .._profile import Profiler
log = get_logger(__name__) log = get_logger(__name__)
@ -78,14 +71,15 @@ def has_vlm(ohlcv: ShmArray) -> bool:
def update_fsp_chart( def update_fsp_chart(
viz, chart: ChartPlotWidget,
flow,
graphics_name: str, graphics_name: str,
array_key: Optional[str], array_key: Optional[str],
**kwargs, **kwargs,
) -> None: ) -> None:
shm = viz.shm shm = flow.shm
if not shm: if not shm:
return return
@ -100,15 +94,18 @@ def update_fsp_chart(
# update graphics # update graphics
# NOTE: this does a length check internally which allows it # NOTE: this does a length check internally which allows it
# staying above the last row check below.. # staying above the last row check below..
viz.update_graphics() chart.update_graphics_from_flow(
graphics_name,
array_key=array_key or graphics_name,
**kwargs,
)
# XXX: re: ``array_key``: fsp func names must be unique meaning we # XXX: re: ``array_key``: fsp func names must be unique meaning we
# can't have duplicates of the underlying data even if multiple # can't have duplicates of the underlying data even if multiple
# sub-charts reference it under different 'named charts'. # sub-charts reference it under different 'named charts'.
# read from last calculated value and update any label # read from last calculated value and update any label
last_val_sticky = viz.plot.getAxis( last_val_sticky = chart._ysticks.get(graphics_name)
'right')._stickies.get(graphics_name)
if last_val_sticky: if last_val_sticky:
last = last_row[array_key] last = last_row[array_key]
last_val_sticky.update_from_data(-1, last) last_val_sticky.update_from_data(-1, last)
@ -156,13 +153,12 @@ async def open_fsp_sidepane(
) )
# https://pydantic-docs.helpmanual.io/usage/models/#dynamic-model-creation # https://pydantic-docs.helpmanual.io/usage/models/#dynamic-model-creation
FspConfig = msgspec.defstruct( FspConfig = create_model(
"Point", 'FspConfig',
[('name', name)] + list(params.items()), name=name,
bases=(Struct,), **params,
) )
model = FspConfig(name=name, **params) sidepane.model = FspConfig()
sidepane.model = model
# just a logger for now until we get fsp configs up and running. # just a logger for now until we get fsp configs up and running.
async def settings_change( async def settings_change(
@ -192,7 +188,7 @@ async def open_fsp_actor_cluster(
from tractor._clustering import open_actor_cluster from tractor._clustering import open_actor_cluster
# profiler = Profiler( # profiler = pg.debug.Profiler(
# delayed=False, # delayed=False,
# disabled=False # disabled=False
# ) # )
@ -209,12 +205,12 @@ async def open_fsp_actor_cluster(
async def run_fsp_ui( async def run_fsp_ui(
linkedsplits: LinkedSplits, linkedsplits: LinkedSplits,
flume: Flume, shm: ShmArray,
started: trio.Event, started: trio.Event,
target: Fsp, target: Fsp,
conf: dict[str, dict], conf: dict[str, dict],
loglevel: str, loglevel: str,
# profiler: Profiler, # profiler: pg.debug.Profiler,
# _quote_throttle_rate: int = 58, # _quote_throttle_rate: int = 58,
) -> None: ) -> None:
@ -246,11 +242,9 @@ async def run_fsp_ui(
else: else:
chart = linkedsplits.subplots[overlay_with] chart = linkedsplits.subplots[overlay_with]
shm = flume.rt_shm
chart.draw_curve( chart.draw_curve(
name, name=name,
shm, shm=shm,
flume,
overlay=True, overlay=True,
color='default_light', color='default_light',
array_key=name, array_key=name,
@ -260,9 +254,8 @@ async def run_fsp_ui(
else: else:
# create a new sub-chart widget for this fsp # create a new sub-chart widget for this fsp
chart = linkedsplits.add_plot( chart = linkedsplits.add_plot(
name, name=name,
shm, shm=shm,
flume,
array_key=name, array_key=name,
sidepane=sidepane, sidepane=sidepane,
@ -282,10 +275,9 @@ async def run_fsp_ui(
# profiler(f'fsp:{name} chart created') # profiler(f'fsp:{name} chart created')
# first UI update, usually from shm pushed history # first UI update, usually from shm pushed history
viz = chart.get_viz(array_key)
update_fsp_chart( update_fsp_chart(
chart, chart,
viz, chart._flows[array_key],
name, name,
array_key=array_key, array_key=array_key,
) )
@ -312,7 +304,7 @@ async def run_fsp_ui(
# level_line(chart, 70, orient_v='bottom') # level_line(chart, 70, orient_v='bottom')
# level_line(chart, 80, orient_v='top') # level_line(chart, 80, orient_v='top')
chart.view._set_yrange(viz=viz) chart.view._set_yrange()
# done() # status updates # done() # status updates
# profiler(f'fsp:{func_name} starting update loop') # profiler(f'fsp:{func_name} starting update loop')
@ -353,9 +345,6 @@ async def run_fsp_ui(
# last = time.time() # last = time.time()
# TODO: maybe this should be our ``Viz`` type since it maps
# one flume to the next? The machinery for task/actor mgmt should
# be part of the instantiation API?
class FspAdmin: class FspAdmin:
''' '''
Client API for orchestrating FSP actors and displaying Client API for orchestrating FSP actors and displaying
@ -367,7 +356,7 @@ class FspAdmin:
tn: trio.Nursery, tn: trio.Nursery,
cluster: dict[str, tractor.Portal], cluster: dict[str, tractor.Portal],
linked: LinkedSplits, linked: LinkedSplits,
flume: Flume, src_shm: ShmArray,
) -> None: ) -> None:
self.tn = tn self.tn = tn
@ -379,11 +368,7 @@ class FspAdmin:
tuple[tractor.MsgStream, ShmArray] tuple[tractor.MsgStream, ShmArray]
] = {} ] = {}
self._flow_registry: dict[_Token, str] = {} self._flow_registry: dict[_Token, str] = {}
self.src_shm = src_shm
# TODO: make this a `.src_flume` and add
# a `dst_flume`?
# (=> but then wouldn't this be the most basic `Viz`?)
self.flume = flume
def rr_next_portal(self) -> tractor.Portal: def rr_next_portal(self) -> tractor.Portal:
name, portal = next(self._rr_next_actor) name, portal = next(self._rr_next_actor)
@ -396,7 +381,7 @@ class FspAdmin:
complete: trio.Event, complete: trio.Event,
started: trio.Event, started: trio.Event,
fqsn: str, fqsn: str,
dst_fsp_flume: Flume, dst_shm: ShmArray,
conf: dict, conf: dict,
target: Fsp, target: Fsp,
loglevel: str, loglevel: str,
@ -417,10 +402,9 @@ class FspAdmin:
# data feed key # data feed key
fqsn=fqsn, fqsn=fqsn,
# TODO: pass `Flume.to_msg()`s here?
# mems # mems
src_shm_token=self.flume.rt_shm.token, src_shm_token=self.src_shm.token,
dst_shm_token=dst_fsp_flume.rt_shm.token, dst_shm_token=dst_shm.token,
# target # target
ns_path=ns_path, ns_path=ns_path,
@ -437,14 +421,12 @@ class FspAdmin:
ctx.open_stream() as stream, ctx.open_stream() as stream,
): ):
dst_fsp_flume.stream: tractor.MsgStream = stream
# register output data # register output data
self._registry[ self._registry[
(fqsn, ns_path) (fqsn, ns_path)
] = ( ] = (
stream, stream,
dst_fsp_flume.rt_shm, dst_shm,
complete complete
) )
@ -458,9 +440,7 @@ class FspAdmin:
# if the chart isn't hidden try to update # if the chart isn't hidden try to update
# the data on screen. # the data on screen.
if not self.linked.isHidden(): if not self.linked.isHidden():
log.debug( log.debug(f'Re-syncing graphics for fsp: {ns_path}')
f'Re-syncing graphics for fsp: {ns_path}'
)
self.linked.graphics_cycle( self.linked.graphics_cycle(
trigger_all=True, trigger_all=True,
prepend_update_index=info['first'], prepend_update_index=info['first'],
@ -479,9 +459,9 @@ class FspAdmin:
worker_name: Optional[str] = None, worker_name: Optional[str] = None,
loglevel: str = 'info', loglevel: str = 'info',
) -> (Flume, trio.Event): ) -> (ShmArray, trio.Event):
fqsn = self.flume.symbol.fqsn fqsn = self.linked.symbol.front_fqsn()
# allocate an output shm array # allocate an output shm array
key, dst_shm, opened = maybe_mk_fsp_shm( key, dst_shm, opened = maybe_mk_fsp_shm(
@ -489,36 +469,16 @@ class FspAdmin:
target=target, target=target,
readonly=True, readonly=True,
) )
self._flow_registry[
portal = self.cluster.get(worker_name) or self.rr_next_portal() (self.src_shm._token, target.name)
provider_tag = portal.channel.uid ] = dst_shm._token
symbol = Symbol(
key=key,
broker_info={
provider_tag: {'asset_type': 'fsp'},
},
)
dst_fsp_flume = Flume(
symbol=symbol,
_rt_shm_token=dst_shm.token,
first_quote={},
# set to 0 presuming for now that we can't load
# FSP history (though we should eventually).
izero_hist=0,
izero_rt=0,
)
self._flow_registry[(
self.flume.rt_shm._token,
target.name
)] = dst_shm._token
# if not opened: # if not opened:
# raise RuntimeError( # raise RuntimeError(
# f'Already started FSP `{fqsn}:{func_name}`' # f'Already started FSP `{fqsn}:{func_name}`'
# ) # )
portal = self.cluster.get(worker_name) or self.rr_next_portal()
complete = trio.Event() complete = trio.Event()
started = trio.Event() started = trio.Event()
self.tn.start_soon( self.tn.start_soon(
@ -527,13 +487,13 @@ class FspAdmin:
complete, complete,
started, started,
fqsn, fqsn,
dst_fsp_flume, dst_shm,
conf, conf,
target, target,
loglevel, loglevel,
) )
return dst_fsp_flume, started return dst_shm, started
async def open_fsp_chart( async def open_fsp_chart(
self, self,
@ -545,7 +505,7 @@ class FspAdmin:
) -> (trio.Event, ChartPlotWidget): ) -> (trio.Event, ChartPlotWidget):
flume, started = await self.start_engine_task( shm, started = await self.start_engine_task(
target, target,
conf, conf,
loglevel, loglevel,
@ -557,7 +517,7 @@ class FspAdmin:
run_fsp_ui, run_fsp_ui,
self.linked, self.linked,
flume, shm,
started, started,
target, target,
@ -571,7 +531,7 @@ class FspAdmin:
@acm @acm
async def open_fsp_admin( async def open_fsp_admin(
linked: LinkedSplits, linked: LinkedSplits,
flume: Flume, src_shm: ShmArray,
**kwargs, **kwargs,
) -> AsyncGenerator[dict, dict[str, tractor.Portal]]: ) -> AsyncGenerator[dict, dict[str, tractor.Portal]]:
@ -592,7 +552,7 @@ async def open_fsp_admin(
tn, tn,
cluster_map, cluster_map,
linked, linked,
flume, src_shm,
) )
try: try:
yield admin yield admin
@ -606,7 +566,7 @@ async def open_fsp_admin(
async def open_vlm_displays( async def open_vlm_displays(
linked: LinkedSplits, linked: LinkedSplits,
flume: Flume, ohlcv: ShmArray,
dvlm: bool = True, dvlm: bool = True,
task_status: TaskStatus[ChartPlotWidget] = trio.TASK_STATUS_IGNORED, task_status: TaskStatus[ChartPlotWidget] = trio.TASK_STATUS_IGNORED,
@ -628,8 +588,6 @@ async def open_vlm_displays(
sig = inspect.signature(flow_rates.func) sig = inspect.signature(flow_rates.func)
params = sig.parameters params = sig.parameters
ohlcv: ShmArray = flume.rt_shm
async with ( async with (
open_fsp_sidepane( open_fsp_sidepane(
linked, { linked, {
@ -649,7 +607,7 @@ async def open_vlm_displays(
} }
}, },
) as sidepane, ) as sidepane,
open_fsp_admin(linked, flume) as admin, open_fsp_admin(linked, ohlcv) as admin,
): ):
# TODO: support updates # TODO: support updates
# period_field = sidepane.fields['period'] # period_field = sidepane.fields['period']
@ -657,21 +615,12 @@ async def open_vlm_displays(
# str(period_param.default) # str(period_param.default)
# ) # )
# use slightly less light (then bracket) gray
# for volume from "main exchange" and a more "bluey"
# gray for "dark" vlm.
vlm_color = 'i3'
dark_vlm_color = 'charcoal'
# built-in vlm which we plot ASAP since it's # built-in vlm which we plot ASAP since it's
# usually data provided directly with OHLC history. # usually data provided directly with OHLC history.
shm = ohlcv shm = ohlcv
# ohlc_chart = linked.chart chart = linked.add_plot(
vlm_chart = linked.add_plot(
name='volume', name='volume',
shm=shm, shm=shm,
flume=flume,
array_key='volume', array_key='volume',
sidepane=sidepane, sidepane=sidepane,
@ -684,47 +633,63 @@ async def open_vlm_displays(
# the curve item internals are pretty convoluted. # the curve item internals are pretty convoluted.
style='step', style='step',
) )
vlm_viz = vlm_chart._vizs['volume']
# force 0 to always be in view
def multi_maxmin(
names: list[str],
) -> tuple[float, float]:
mx = 0
for name in names:
mxmn = chart.maxmin(name=name)
if mxmn:
ymax = mxmn[1]
if ymax > mx:
mx = ymax
return 0, mx
chart.view.maxmin = partial(multi_maxmin, names=['volume'])
# TODO: fix the x-axis label issue where if you put # TODO: fix the x-axis label issue where if you put
# the axis on the left it's totally not lined up... # the axis on the left it's totally not lined up...
# show volume units value on LHS (for dinkus) # show volume units value on LHS (for dinkus)
# vlm_chart.hideAxis('right') # chart.hideAxis('right')
# vlm_chart.showAxis('left') # chart.showAxis('left')
# send back new chart to caller # send back new chart to caller
task_status.started(vlm_chart) task_status.started(chart)
# should **not** be the same sub-chart widget # should **not** be the same sub-chart widget
assert vlm_chart.name != linked.chart.name assert chart.name != linked.chart.name
# sticky only on sub-charts atm # sticky only on sub-charts atm
last_val_sticky = vlm_chart.plotItem.getAxis( last_val_sticky = chart._ysticks[chart.name]
'right')._stickies.get(vlm_chart.name)
# read from last calculated value # read from last calculated value
value = shm.array['volume'][-1] value = shm.array['volume'][-1]
last_val_sticky.update_from_data(-1, value) last_val_sticky.update_from_data(-1, value)
_, _, vlm_curve = vlm_chart.update_graphics_from_flow( vlm_curve = chart.update_graphics_from_flow(
'volume', 'volume',
# shm.array,
) )
# size view to data once at outset # size view to data once at outset
vlm_chart.view._set_yrange( chart.view._set_yrange()
viz=vlm_viz
)
# add axis title # add axis title
axis = vlm_chart.getAxis('right') axis = chart.getAxis('right')
axis.set_title(' vlm') axis.set_title(' vlm')
if dvlm: if dvlm:
tasks_ready = [] tasks_ready = []
# spawn and overlay $ vlm on the same subchart # spawn and overlay $ vlm on the same subchart
dvlm_flume, started = await admin.start_engine_task( dvlm_shm, started = await admin.start_engine_task(
dolla_vlm, dolla_vlm,
{ # fsp engine conf { # fsp engine conf
@ -743,7 +708,7 @@ async def open_vlm_displays(
# FIXME: we should error on starting the same fsp right # FIXME: we should error on starting the same fsp right
# since it might collide with existing shm.. or wait we # since it might collide with existing shm.. or wait we
# had this before?? # had this before??
# dolla_vlm # dolla_vlm,
tasks_ready.append(started) tasks_ready.append(started)
# profiler(f'created shm for fsp actor: {display_name}') # profiler(f'created shm for fsp actor: {display_name}')
@ -757,29 +722,22 @@ async def open_vlm_displays(
# XXX: the main chart already contains a vlm "units" axis # XXX: the main chart already contains a vlm "units" axis
# so here we add an overlay wth a y-range in # so here we add an overlay wth a y-range in
# $ liquidity-value units (normally a fiat like USD). # $ liquidity-value units (normally a fiat like USD).
dvlm_pi = vlm_chart.overlay_plotitem( dvlm_pi = chart.overlay_plotitem(
'dolla_vlm', 'dolla_vlm',
index=0, # place axis on inside (nearest to chart) index=0, # place axis on inside (nearest to chart)
axis_title=' $vlm', axis_title=' $vlm',
axis_side='left', axis_side='right',
axis_kwargs={ axis_kwargs={
'typical_max_str': ' 100.0 M ', 'typical_max_str': ' 100.0 M ',
'formatter': partial( 'formatter': partial(
humanize, humanize,
digits=2, digits=2,
), ),
'text_color': vlm_color,
}, },
) )
# TODO: should this maybe be implicit based on input args to
# `.overlay_plotitem()` above?
dvlm_pi.hideAxis('bottom')
# all to be overlayed curve names # all to be overlayed curve names
dvlm_fields = [ fields = [
'dolla_vlm', 'dolla_vlm',
'dark_vlm', 'dark_vlm',
] ]
@ -792,18 +750,32 @@ async def open_vlm_displays(
'dark_trade_rate', 'dark_trade_rate',
] ]
group_mxmn = partial(
multi_maxmin,
# keep both regular and dark vlm in view
names=fields,
# names=fields + dvlm_rate_fields,
)
# add custom auto range handler
dvlm_pi.vb._maxmin = group_mxmn
# use slightly less light (then bracket) gray
# for volume from "main exchange" and a more "bluey"
# gray for "dark" vlm.
vlm_color = 'i3'
dark_vlm_color = 'charcoal'
# add dvlm (step) curves to common view # add dvlm (step) curves to common view
def chart_curves( def chart_curves(
names: list[str], names: list[str],
pi: pg.PlotItem, pi: pg.PlotItem,
shm: ShmArray, shm: ShmArray,
flume: Flume,
step_mode: bool = False, step_mode: bool = False,
style: str = 'solid', style: str = 'solid',
) -> None: ) -> None:
for name in names: for name in names:
if 'dark' in name: if 'dark' in name:
color = dark_vlm_color color = dark_vlm_color
elif 'rate' in name: elif 'rate' in name:
@ -811,13 +783,9 @@ async def open_vlm_displays(
else: else:
color = 'bracket' color = 'bracket'
assert isinstance(shm, ShmArray) curve, _ = chart.draw_curve(
assert isinstance(flume, Flume) name=name,
shm=shm,
viz = vlm_chart.draw_curve(
name,
shm,
flume,
array_key=name, array_key=name,
overlay=pi, overlay=pi,
color=color, color=color,
@ -825,24 +793,29 @@ async def open_vlm_displays(
style=style, style=style,
pi=pi, pi=pi,
) )
assert viz.plot is pi
# TODO: we need a better API to do this..
# specially store ref to shm for lookup in display loop
# since only a placeholder of `None` is entered in
# ``.draw_curve()``.
flow = chart._flows[name]
assert flow.plot is pi
chart_curves( chart_curves(
dvlm_fields, fields,
dvlm_pi, dvlm_pi,
dvlm_flume.rt_shm, dvlm_shm,
dvlm_flume,
step_mode=True, step_mode=True,
) )
# spawn flow rates fsp **ONLY AFTER** the 'dolla_vlm' fsp is # spawn flow rates fsp **ONLY AFTER** the 'dolla_vlm' fsp is
# up since this one depends on it. # up since this one depends on it.
fr_flume, started = await admin.start_engine_task( fr_shm, started = await admin.start_engine_task(
flow_rates, flow_rates,
{ # fsp engine conf { # fsp engine conf
'func_name': 'flow_rates', 'func_name': 'flow_rates',
'zero_on_step': True, 'zero_on_step': False,
}, },
# loglevel, # loglevel,
) )
@ -851,7 +824,7 @@ async def open_vlm_displays(
# chart_curves( # chart_curves(
# dvlm_rate_fields, # dvlm_rate_fields,
# dvlm_pi, # dvlm_pi,
# fr_flume.rt_shm, # fr_shm,
# ) # )
# TODO: is there a way to "sync" the dual axes such that only # TODO: is there a way to "sync" the dual axes such that only
@ -860,24 +833,24 @@ async def open_vlm_displays(
# displayed and the curves are effectively the same minus # displayed and the curves are effectively the same minus
# liquidity events (well at least on low OHLC periods - 1s). # liquidity events (well at least on low OHLC periods - 1s).
vlm_curve.hide() vlm_curve.hide()
vlm_chart.removeItem(vlm_curve) chart.removeItem(vlm_curve)
vlm_viz = vlm_chart._vizs['volume'] vflow = chart._flows['volume']
vlm_viz.render = False vflow.render = False
# avoid range sorting on volume once disabled # avoid range sorting on volume once disabled
vlm_chart.view.disable_auto_yrange() chart.view.disable_auto_yrange()
# Trade rate overlay # Trade rate overlay
# XXX: requires an additional overlay for # XXX: requires an additional overlay for
# a trades-per-period (time) y-range. # a trades-per-period (time) y-range.
tr_pi = vlm_chart.overlay_plotitem( tr_pi = chart.overlay_plotitem(
'trade_rates', 'trade_rates',
# TODO: dynamically update period (and thus this axis?) # TODO: dynamically update period (and thus this axis?)
# title from user input. # title from user input.
axis_title='clears', axis_title='clears',
axis_side='left',
axis_side='left',
axis_kwargs={ axis_kwargs={
'typical_max_str': ' 10.0 M ', 'typical_max_str': ' 10.0 M ',
'formatter': partial( 'formatter': partial(
@ -888,13 +861,17 @@ async def open_vlm_displays(
}, },
) )
tr_pi.hideAxis('bottom') # add custom auto range handler
tr_pi.vb.maxmin = partial(
multi_maxmin,
# keep both regular and dark vlm in view
names=trade_rate_fields,
)
chart_curves( chart_curves(
trade_rate_fields, trade_rate_fields,
tr_pi, tr_pi,
fr_flume.rt_shm, fr_shm,
fr_flume,
# step_mode=True, # step_mode=True,
# dashed line to represent "individual trades" being # dashed line to represent "individual trades" being
@ -928,7 +905,7 @@ async def open_vlm_displays(
async def start_fsp_displays( async def start_fsp_displays(
linked: LinkedSplits, linked: LinkedSplits,
flume: Flume, ohlcv: ShmArray,
group_status_key: str, group_status_key: str,
loglevel: str, loglevel: str,
@ -963,7 +940,7 @@ async def start_fsp_displays(
# }, # },
# }, # },
} }
profiler = Profiler( profiler = pg.debug.Profiler(
delayed=False, delayed=False,
disabled=False disabled=False
) )
@ -971,10 +948,7 @@ async def start_fsp_displays(
async with ( async with (
# NOTE: this admin internally opens an actor cluster # NOTE: this admin internally opens an actor cluster
open_fsp_admin( open_fsp_admin(linked, ohlcv) as admin,
linked,
flume,
) as admin,
): ):
statuses = [] statuses = []
for target, conf in fsp_conf.items(): for target, conf in fsp_conf.items():

View File

@ -20,13 +20,8 @@ Chart view box primitives
""" """
from __future__ import annotations from __future__ import annotations
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from functools import partial
import time import time
from typing import ( from typing import Optional, Callable
Optional,
Callable,
TYPE_CHECKING,
)
import pyqtgraph as pg import pyqtgraph as pg
# from pyqtgraph.GraphicsScene import mouseEvents # from pyqtgraph.GraphicsScene import mouseEvents
@ -38,16 +33,11 @@ import numpy as np
import trio import trio
from ..log import get_logger from ..log import get_logger
from .._profile import Profiler
from .._profile import pg_profile_enabled, ms_slower_then from .._profile import pg_profile_enabled, ms_slower_then
# from ._style import _min_points_to_show # from ._style import _min_points_to_show
from ._editors import SelectRect from ._editors import SelectRect
from . import _event from . import _event
if TYPE_CHECKING:
from ._chart import ChartPlotWidget
from ._dataviz import Viz
log = get_logger(__name__) log = get_logger(__name__)
@ -85,6 +75,7 @@ async def handle_viewmode_kb_inputs(
pressed: set[str] = set() pressed: set[str] = set()
last = time.time() last = time.time()
trigger_mode: str
action: str action: str
on_next_release: Optional[Callable] = None on_next_release: Optional[Callable] = None
@ -150,16 +141,13 @@ async def handle_viewmode_kb_inputs(
Qt.Key_Space, Qt.Key_Space,
} }
): ):
godw = view._chart.linked.godwidget view._chart.linked.godwidget.search.focus()
godw.hist_linked.resize_sidepanes(from_linked=godw.rt_linked)
godw.search.focus()
# esc and ctrl-c # esc and ctrl-c
if key == Qt.Key_Escape or (ctrl and key == Qt.Key_C): if key == Qt.Key_Escape or (ctrl and key == Qt.Key_C):
# ctrl-c as cancel # ctrl-c as cancel
# https://forum.qt.io/topic/532/how-to-catch-ctrl-c-on-a-widget/9 # https://forum.qt.io/topic/532/how-to-catch-ctrl-c-on-a-widget/9
view.select_box.clear() view.select_box.clear()
view.linked.focus()
# cancel order or clear graphics # cancel order or clear graphics
if key == Qt.Key_C or key == Qt.Key_Delete: if key == Qt.Key_C or key == Qt.Key_Delete:
@ -190,17 +178,17 @@ async def handle_viewmode_kb_inputs(
if key in pressed: if key in pressed:
pressed.remove(key) pressed.remove(key)
# QUERY/QUOTE MODE # QUERY/QUOTE MODE #
# ----------------
if {Qt.Key_Q}.intersection(pressed): if {Qt.Key_Q}.intersection(pressed):
view.linked.cursor.in_query_mode = True view.linkedsplits.cursor.in_query_mode = True
else: else:
view.linked.cursor.in_query_mode = False view.linkedsplits.cursor.in_query_mode = False
# SELECTION MODE # SELECTION MODE
# -------------- # --------------
if shift: if shift:
if view.state['mouseMode'] == ViewBox.PanMode: if view.state['mouseMode'] == ViewBox.PanMode:
view.setMouseMode(ViewBox.RectMode) view.setMouseMode(ViewBox.RectMode)
@ -221,27 +209,18 @@ async def handle_viewmode_kb_inputs(
# ORDER MODE # ORDER MODE
# ---------- # ----------
# live vs. dark trigger + an action {buy, sell, alert} # live vs. dark trigger + an action {buy, sell, alert}
order_keys_pressed = ORDER_MODE.intersection(pressed) order_keys_pressed = ORDER_MODE.intersection(pressed)
if order_keys_pressed: if order_keys_pressed:
# TODO: it seems like maybe the composition should be # show the pp size label
# reversed here? Like, maybe we should have the nav have order_mode.current_pp.show()
# access to the pos state and then make encapsulated logic
# that shows the right stuff on screen instead or order mode
# and position-related abstractions doing this?
# show the pp size label only if there is
# a non-zero pos existing
tracker = order_mode.current_pp
if tracker.live_pp.size:
tracker.nav.show()
# TODO: show pp config mini-params in status bar widget # TODO: show pp config mini-params in status bar widget
# mode.pp_config.show() # mode.pp_config.show()
trigger_type: str = 'dark'
if ( if (
# 's' for "submit" to activate "live" order # 's' for "submit" to activate "live" order
Qt.Key_S in pressed or Qt.Key_S in pressed or
@ -249,6 +228,9 @@ async def handle_viewmode_kb_inputs(
): ):
trigger_type: str = 'live' trigger_type: str = 'live'
else:
trigger_type: str = 'dark'
# order mode trigger "actions" # order mode trigger "actions"
if Qt.Key_D in pressed: # for "damp eet" if Qt.Key_D in pressed: # for "damp eet"
action = 'sell' action = 'sell'
@ -277,8 +259,8 @@ async def handle_viewmode_kb_inputs(
Qt.Key_S in pressed or Qt.Key_S in pressed or
order_keys_pressed or order_keys_pressed or
Qt.Key_O in pressed Qt.Key_O in pressed
) ) and
and key in NUMBER_LINE key in NUMBER_LINE
): ):
# hot key to set order slots size. # hot key to set order slots size.
# change edit field to current number line value, # change edit field to current number line value,
@ -296,7 +278,7 @@ async def handle_viewmode_kb_inputs(
else: # none active else: # none active
# hide pp label # hide pp label
order_mode.current_pp.nav.hide_info() order_mode.current_pp.hide_info()
# if none are pressed, remove "staged" level # if none are pressed, remove "staged" level
# line under cursor position # line under cursor position
@ -337,6 +319,7 @@ async def handle_viewmode_mouse(
): ):
# when in order mode, submit execution # when in order mode, submit execution
# msg.event.accept() # msg.event.accept()
# breakpoint()
view.order_mode.submit_order() view.order_mode.submit_order()
@ -353,6 +336,16 @@ class ChartView(ViewBox):
''' '''
mode_name: str = 'view' mode_name: str = 'view'
# "relay events" for making overlaid views work.
# NOTE: these MUST be defined here (and can't be monkey patched
# on later) due to signal construction requiring refs to be
# in place during the run of meta-class machinery.
mouseDragEventRelay = QtCore.Signal(object, object, object)
wheelEventRelay = QtCore.Signal(object, object, object)
event_relay_source: 'Optional[ViewBox]' = None
relays: dict[str, QtCore.Signal] = {}
def __init__( def __init__(
self, self,
@ -374,6 +367,7 @@ class ChartView(ViewBox):
) )
# for "known y-range style" # for "known y-range style"
self._static_yrange = static_yrange self._static_yrange = static_yrange
self._maxmin = None
# disable vertical scrolling # disable vertical scrolling
self.setMouseEnabled( self.setMouseEnabled(
@ -381,8 +375,8 @@ class ChartView(ViewBox):
y=True, y=True,
) )
self.linked = None self.linkedsplits = None
self._chart: ChartPlotWidget | None = None # noqa self._chart: 'ChartPlotWidget' = None # noqa
# add our selection box annotator # add our selection box annotator
self.select_box = SelectRect(self) self.select_box = SelectRect(self)
@ -393,7 +387,6 @@ class ChartView(ViewBox):
self.setFocusPolicy(QtCore.Qt.StrongFocus) self.setFocusPolicy(QtCore.Qt.StrongFocus)
self._ic = None self._ic = None
self._yranger: Callable | None = None
def start_ic( def start_ic(
self, self,
@ -404,11 +397,8 @@ class ChartView(ViewBox):
''' '''
if self._ic is None: if self._ic is None:
try: self.chart.pause_all_feeds()
self.chart.pause_all_feeds() self._ic = trio.Event()
self._ic = trio.Event()
except RuntimeError:
pass
def signal_ic( def signal_ic(
self, self,
@ -421,12 +411,9 @@ class ChartView(ViewBox):
''' '''
if self._ic: if self._ic:
try: self._ic.set()
self._ic.set() self._ic = None
self._ic = None self.chart.resume_all_feeds()
self.chart.resume_all_feeds()
except RuntimeError:
pass
@asynccontextmanager @asynccontextmanager
async def open_async_input_handler( async def open_async_input_handler(
@ -454,18 +441,29 @@ class ChartView(ViewBox):
yield self yield self
@property @property
def chart(self) -> ChartPlotWidget: # type: ignore # noqa def chart(self) -> 'ChartPlotWidget': # type: ignore # noqa
return self._chart return self._chart
@chart.setter @chart.setter
def chart(self, chart: ChartPlotWidget) -> None: # type: ignore # noqa def chart(self, chart: 'ChartPlotWidget') -> None: # type: ignore # noqa
self._chart = chart self._chart = chart
self.select_box.chart = chart self.select_box.chart = chart
if self._maxmin is None:
self._maxmin = chart.maxmin
@property
def maxmin(self) -> Callable:
return self._maxmin
@maxmin.setter
def maxmin(self, callback: Callable) -> None:
self._maxmin = callback
def wheelEvent( def wheelEvent(
self, self,
ev, ev,
axis=None, axis=None,
relayed_from: ChartView = None,
): ):
''' '''
Override "center-point" location for scrolling. Override "center-point" location for scrolling.
@ -476,34 +474,27 @@ class ChartView(ViewBox):
TODO: PR a method into ``pyqtgraph`` to make this configurable TODO: PR a method into ``pyqtgraph`` to make this configurable
''' '''
linked = self.linked
if (
not linked
):
return
if axis in (0, 1): if axis in (0, 1):
mask = [False, False] mask = [False, False]
mask[axis] = self.state['mouseEnabled'][axis] mask[axis] = self.state['mouseEnabled'][axis]
else: else:
mask = self.state['mouseEnabled'][:] mask = self.state['mouseEnabled'][:]
chart = self.linked.chart chart = self.linkedsplits.chart
# don't zoom more then the min points setting # don't zoom more then the min points setting
viz = chart.get_viz(chart.name) l, lbar, rbar, r = chart.bars_range()
vl, lbar, rbar, vr = viz.bars_range() # vl = r - l
# TODO: max/min zoom limits incorporating time step size. # if ev.delta() > 0 and vl <= _min_points_to_show:
# rl = vr - vl # log.debug("Max zoom bruh...")
# if ev.delta() > 0 and rl <= _min_points_to_show:
# log.warning("Max zoom bruh...")
# return # return
# if ( # if (
# ev.delta() < 0 # ev.delta() < 0
# and rl >= len(chart._vizs[chart.name].shm.array) + 666 # and vl >= len(chart._flows[chart.name].shm.array) + 666
# ): # ):
# log.warning("Min zoom bruh...") # log.debug("Min zoom bruh...")
# return # return
# actual scaling factor # actual scaling factor
@ -534,17 +525,49 @@ class ChartView(ViewBox):
self.scaleBy(s, center) self.scaleBy(s, center)
else: else:
# use right-most point of current curve graphic
xl = viz.graphics.x_last() # center = pg.Point(
# fn.invertQTransform(self.childGroup.transform()).map(ev.pos())
# )
# XXX: scroll "around" the right most element in the view
# which stays "pinned" in place.
# furthest_right_coord = self.boundingRect().topRight()
# yaxis = pg.Point(
# fn.invertQTransform(
# self.childGroup.transform()
# ).map(furthest_right_coord)
# )
# This seems like the most "intuitive option, a hybrid of
# tws and tv styles
last_bar = pg.Point(int(rbar)) + 1
ryaxis = chart.getAxis('right')
r_axis_x = ryaxis.pos().x()
end_of_l1 = pg.Point(
round(
chart.cv.mapToView(
pg.Point(r_axis_x - chart._max_l1_line_len)
# QPointF(chart._max_l1_line_len, 0)
).x()
)
) # .x()
# self.state['viewRange'][0][1] = end_of_l1
# focal = pg.Point((last_bar.x() + end_of_l1)/2)
focal = min( focal = min(
xl, last_bar,
vr, end_of_l1,
key=lambda p: p.x()
) )
# focal = pg.Point(last_bar.x() + end_of_l1)
self._resetTarget() self._resetTarget()
# NOTE: scroll "around" the right most datum-element in view
# gives the feeling of staying "pinned" in place.
self.scaleBy(s, focal) self.scaleBy(s, focal)
# XXX: the order of the next 2 lines i'm pretty sure # XXX: the order of the next 2 lines i'm pretty sure
@ -570,8 +593,10 @@ class ChartView(ViewBox):
self, self,
ev, ev,
axis: Optional[int] = None, axis: Optional[int] = None,
relayed_from: ChartView = None,
) -> None: ) -> None:
pos = ev.pos() pos = ev.pos()
lastPos = ev.lastPos() lastPos = ev.lastPos()
dif = pos - lastPos dif = pos - lastPos
@ -641,10 +666,10 @@ class ChartView(ViewBox):
# PANNING MODE # PANNING MODE
else: else:
try: # XXX: WHY
self.start_ic() ev.accept()
except RuntimeError:
pass self.start_ic()
# if self._ic is None: # if self._ic is None:
# self.chart.pause_all_feeds() # self.chart.pause_all_feeds()
# self._ic = trio.Event() # self._ic = trio.Event()
@ -672,9 +697,6 @@ class ChartView(ViewBox):
# self._ic = None # self._ic = None
# self.chart.resume_all_feeds() # self.chart.resume_all_feeds()
# XXX: WHY
ev.accept()
# WEIRD "RIGHT-CLICK CENTER ZOOM" MODE # WEIRD "RIGHT-CLICK CENTER ZOOM" MODE
elif button & QtCore.Qt.RightButton: elif button & QtCore.Qt.RightButton:
@ -720,12 +742,7 @@ class ChartView(ViewBox):
*, *,
yrange: Optional[tuple[float, float]] = None, yrange: Optional[tuple[float, float]] = None,
viz: Viz | None = None, range_margin: float = 0.06,
# NOTE: this value pairs (more or less) with L1 label text
# height offset from from the bid/ask lines.
range_margin: float = 0.09,
bars_range: Optional[tuple[int, int, int, int]] = None, bars_range: Optional[tuple[int, int, int, int]] = None,
# flag to prevent triggering sibling charts from the same linked # flag to prevent triggering sibling charts from the same linked
@ -744,7 +761,7 @@ class ChartView(ViewBox):
''' '''
name = self.name name = self.name
# print(f'YRANGE ON {name}') # print(f'YRANGE ON {name}')
profiler = Profiler( profiler = pg.debug.Profiler(
msg=f'`ChartView._set_yrange()`: `{name}`', msg=f'`ChartView._set_yrange()`: `{name}`',
disabled=not pg_profile_enabled(), disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then, ms_threshold=ms_slower_then,
@ -778,28 +795,18 @@ class ChartView(ViewBox):
# XXX: only compute the mxmn range # XXX: only compute the mxmn range
# if none is provided as input! # if none is provided as input!
if not yrange: if not yrange:
# flow = chart._flows[name]
if not viz: yrange = self._maxmin()
breakpoint()
out = viz.maxmin()
if out is None:
log.warning(f'No yrange provided for {name}!?')
return
(
ixrng,
_,
yrange
) = out
profiler(f'`{self.name}:Viz.maxmin()` -> {ixrng}=>{yrange}')
if yrange is None: if yrange is None:
log.warning(f'No yrange provided for {name}!?') log.warning(f'No yrange provided for {name}!?')
print(f"WTF NO YRANGE {name}")
return return
ylow, yhigh = yrange ylow, yhigh = yrange
profiler(f'callback ._maxmin(): {yrange}')
# view margins: stay within a % of the "true range" # view margins: stay within a % of the "true range"
diff = yhigh - ylow diff = yhigh - ylow
ylow = ylow - (diff * range_margin) ylow = ylow - (diff * range_margin)
@ -819,55 +826,54 @@ class ChartView(ViewBox):
def enable_auto_yrange( def enable_auto_yrange(
self, self,
viz: Viz,
src_vb: Optional[ChartView] = None, src_vb: Optional[ChartView] = None,
) -> None: ) -> None:
''' '''
Assign callbacks for rescaling and resampling y-axis data Assign callback for rescaling y-axis automatically
automatically based on data contents and ``ViewBox`` state. based on data contents and ``ViewBox`` state.
''' '''
if src_vb is None: if src_vb is None:
src_vb = self src_vb = self
if self._yranger is None: # splitter(s) resizing
self._yranger = partial( src_vb.sigResized.connect(self._set_yrange)
self._set_yrange,
viz=viz,
)
# widget-UIs/splitter(s) resizing
src_vb.sigResized.connect(self._yranger)
# mouse wheel doesn't emit XRangeChanged
src_vb.sigRangeChangedManually.connect(self._yranger)
# re-sampling trigger:
# TODO: a smarter way to avoid calling this needlessly? # TODO: a smarter way to avoid calling this needlessly?
# 2 things i can think of: # 2 things i can think of:
# - register downsample-able graphics specially and only # - register downsample-able graphics specially and only
# iterate those. # iterate those.
# - only register this when certain downsample-able graphics are # - only register this when certain downsampleable graphics are
# "added to scene". # "added to scene".
src_vb.sigRangeChangedManually.connect( src_vb.sigRangeChangedManually.connect(
self.maybe_downsample_graphics self.maybe_downsample_graphics
) )
# mouse wheel doesn't emit XRangeChanged
src_vb.sigRangeChangedManually.connect(self._set_yrange)
# src_vb.sigXRangeChanged.connect(self._set_yrange)
# src_vb.sigXRangeChanged.connect(
# self.maybe_downsample_graphics
# )
def disable_auto_yrange(self) -> None: def disable_auto_yrange(self) -> None:
# XXX: not entirely sure why we can't de-reg this..
self.sigResized.disconnect( self.sigResized.disconnect(
self._yranger, self._set_yrange,
) )
self.sigRangeChangedManually.disconnect(
self._yranger,
)
self.sigRangeChangedManually.disconnect( self.sigRangeChangedManually.disconnect(
self.maybe_downsample_graphics self.maybe_downsample_graphics
) )
self.sigRangeChangedManually.disconnect(
self._set_yrange,
)
# self.sigXRangeChanged.disconnect(self._set_yrange)
# self.sigXRangeChanged.disconnect(
# self.maybe_downsample_graphics
# )
def x_uppx(self) -> float: def x_uppx(self) -> float:
''' '''
@ -876,7 +882,7 @@ class ChartView(ViewBox):
graphics items which are our children. graphics items which are our children.
''' '''
graphics = [f.graphics for f in self._chart._vizs.values()] graphics = [f.graphics for f in self._chart._flows.values()]
if not graphics: if not graphics:
return 0 return 0
@ -889,9 +895,10 @@ class ChartView(ViewBox):
def maybe_downsample_graphics( def maybe_downsample_graphics(
self, self,
autoscale_overlays: bool = False, autoscale_overlays: bool = True,
): ):
profiler = Profiler(
profiler = pg.debug.Profiler(
msg=f'ChartView.maybe_downsample_graphics() for {self.name}', msg=f'ChartView.maybe_downsample_graphics() for {self.name}',
disabled=not pg_profile_enabled(), disabled=not pg_profile_enabled(),
@ -905,14 +912,10 @@ class ChartView(ViewBox):
# TODO: a faster single-loop-iterator way of doing this XD # TODO: a faster single-loop-iterator way of doing this XD
chart = self._chart chart = self._chart
plots = {chart.name: chart} linked = self.linkedsplits
plots = linked.subplots | {chart.name: chart}
linked = self.linked
if linked:
plots |= linked.subplots
for chart_name, chart in plots.items(): for chart_name, chart in plots.items():
for name, flow in chart._vizs.items(): for name, flow in chart._flows.items():
if ( if (
not flow.render not flow.render
@ -920,24 +923,25 @@ class ChartView(ViewBox):
# XXX: super important to be aware of this. # XXX: super important to be aware of this.
# or not flow.graphics.isVisible() # or not flow.graphics.isVisible()
): ):
# print(f'skipping {flow.name}')
continue continue
# pass in no array which will read and render from the last # pass in no array which will read and render from the last
# passed array (normally provided by the display loop.) # passed array (normally provided by the display loop.)
chart.update_graphics_from_flow(name) chart.update_graphics_from_flow(
name,
use_vr=True,
)
# for each overlay on this chart auto-scale the # for each overlay on this chart auto-scale the
# y-range to max-min values. # y-range to max-min values.
# if autoscale_overlays: if autoscale_overlays:
# overlay = chart.pi_overlay overlay = chart.pi_overlay
# if overlay: if overlay:
# for pi in overlay.overlays: for pi in overlay.overlays:
# pi.vb._set_yrange( pi.vb._set_yrange(
# # TODO: get the range once up front... # TODO: get the range once up front...
# # bars_range=br, # bars_range=br,
# viz=pi.viz, )
# ) profiler('autoscaled linked plots')
# profiler('autoscaled linked plots')
profiler(f'<{chart_name}>.update_graphics_from_flow({name})') profiler(f'<{chart_name}>.update_graphics_from_flow({name})')

View File

@ -26,24 +26,22 @@ from PyQt5.QtCore import QPointF
from ._axes import YAxisLabel from ._axes import YAxisLabel
from ._style import hcolor from ._style import hcolor
from ._pg_overrides import PlotItem
class LevelLabel(YAxisLabel): class LevelLabel(YAxisLabel):
''' """Y-axis (vertically) oriented, horizontal label that sticks to
Y-axis (vertically) oriented, horizontal label that sticks to
where it's placed despite chart resizing and supports displaying where it's placed despite chart resizing and supports displaying
multiple fields. multiple fields.
TODO: replace the rectangle-text part with our new ``Label`` type. TODO: replace the rectangle-text part with our new ``Label`` type.
''' """
_x_br_offset: float = -16 _x_margin = 0
_y_txt_h_scaling: float = 2 _y_margin = 0
# adjustment "further away from" anchor point # adjustment "further away from" anchor point
_x_offset = 0 _x_offset = 9
_y_offset = 0 _y_offset = 0
# fields to be displayed in the label string # fields to be displayed in the label string
@ -59,12 +57,12 @@ class LevelLabel(YAxisLabel):
chart, chart,
parent, parent,
color: str = 'default_light', color: str = 'bracket',
orient_v: str = 'bottom', orient_v: str = 'bottom',
orient_h: str = 'right', orient_h: str = 'left',
opacity: float = 1, opacity: float = 0,
# makes order line labels offset from their parent axis # makes order line labels offset from their parent axis
# such that they don't collide with the L1/L2 lines/prices # such that they don't collide with the L1/L2 lines/prices
@ -100,15 +98,13 @@ class LevelLabel(YAxisLabel):
self._h_shift = { self._h_shift = {
'left': -1., 'left': -1.,
'right': 0., 'right': 0.
}[orient_h] }[orient_h]
self.fields = self._fields.copy() self.fields = self._fields.copy()
# ensure default format fields are in correct # ensure default format fields are in correct
self.set_fmt_str(self._fmt_str, self.fields) self.set_fmt_str(self._fmt_str, self.fields)
self.setZValue(10)
@property @property
def color(self): def color(self):
return self._hcolor return self._hcolor
@ -116,10 +112,7 @@ class LevelLabel(YAxisLabel):
@color.setter @color.setter
def color(self, color: str) -> None: def color(self, color: str) -> None:
self._hcolor = color self._hcolor = color
self._pen = self.pen = pg.mkPen( self._pen = self.pen = pg.mkPen(hcolor(color))
hcolor(color),
width=3,
)
def update_on_resize(self, vr, r): def update_on_resize(self, vr, r):
"""Tiis is a ``.sigRangeChanged()`` handler. """Tiis is a ``.sigRangeChanged()`` handler.
@ -131,16 +124,15 @@ class LevelLabel(YAxisLabel):
self, self,
fields: dict = None, fields: dict = None,
) -> None: ) -> None:
''' """Update the label's text contents **and** position from
Update the label's text contents **and** position from
a view box coordinate datum. a view box coordinate datum.
''' """
self.fields.update(fields) self.fields.update(fields)
level = self.fields['level'] level = self.fields['level']
# map "level" to local coords # map "level" to local coords
abs_xy = self._pi.mapFromView(QPointF(0, level)) abs_xy = self._chart.mapFromView(QPointF(0, level))
self.update_label( self.update_label(
abs_xy, abs_xy,
@ -157,7 +149,7 @@ class LevelLabel(YAxisLabel):
h, w = self.set_label_str(fields) h, w = self.set_label_str(fields)
if self._adjust_to_l1: if self._adjust_to_l1:
self._x_offset = self._pi.chart_widget._max_l1_line_len self._x_offset = self._chart._max_l1_line_len
self.setPos(QPointF( self.setPos(QPointF(
self._h_shift * (w + self._x_offset), self._h_shift * (w + self._x_offset),
@ -182,8 +174,7 @@ class LevelLabel(YAxisLabel):
fields: dict, fields: dict,
): ):
# use space as e3 delim # use space as e3 delim
self.label_str = self._fmt_str.format( self.label_str = self._fmt_str.format(**fields).replace(',', ' ')
**fields).replace(',', ' ')
br = self.boundingRect() br = self.boundingRect()
h, w = br.height(), br.width() h, w = br.height(), br.width()
@ -196,14 +187,14 @@ class LevelLabel(YAxisLabel):
self, self,
p: QtGui.QPainter, p: QtGui.QPainter,
rect: QtCore.QRectF rect: QtCore.QRectF
) -> None: ) -> None:
p.setPen(self._pen) p.setPen(self._pen)
rect = self.rect rect = self.rect
if self._orient_v == 'bottom': if self._orient_v == 'bottom':
lp, rp = rect.topLeft(), rect.topRight() lp, rp = rect.topLeft(), rect.topRight()
# p.drawLine(rect.topLeft(), rect.topRight())
elif self._orient_v == 'top': elif self._orient_v == 'top':
lp, rp = rect.bottomLeft(), rect.bottomRight() lp, rp = rect.bottomLeft(), rect.bottomRight()
@ -217,11 +208,6 @@ class LevelLabel(YAxisLabel):
]) ])
) )
p.fillRect(
self.rect,
self.bg_color,
)
def highlight(self, pen) -> None: def highlight(self, pen) -> None:
self._pen = pen self._pen = pen
self.update() self.update()
@ -250,46 +236,43 @@ class L1Label(LevelLabel):
# Set a global "max L1 label length" so we can # Set a global "max L1 label length" so we can
# look it up on order lines and adjust their # look it up on order lines and adjust their
# labels not to overlap with it. # labels not to overlap with it.
chart = self._pi.chart_widget chart = self._chart
chart._max_l1_line_len: float = max( chart._max_l1_line_len: float = max(
chart._max_l1_line_len, chart._max_l1_line_len,
w, w
) )
return h, w return h, w
class L1Labels: class L1Labels:
''' """Level 1 bid ask labels for dynamic update on price-axis.
Level 1 bid ask labels for dynamic update on price-axis.
''' """
def __init__( def __init__(
self, self,
plotitem: PlotItem, chart: 'ChartPlotWidget', # noqa
digits: int = 2, digits: int = 2,
size_digits: int = 3, size_digits: int = 3,
font_size: str = 'small', font_size: str = 'small',
) -> None: ) -> None:
chart = self.chart = plotitem.chart_widget self.chart = chart
raxis = plotitem.getAxis('right') raxis = chart.getAxis('right')
kwargs = { kwargs = {
'chart': plotitem, 'chart': chart,
'parent': raxis, 'parent': raxis,
'opacity': .9, 'opacity': 1,
'font_size': font_size, 'font_size': font_size,
'fg_color': 'default_light', 'fg_color': chart.pen_color,
'bg_color': chart.view_color, # normally 'papas_special' 'bg_color': chart.view_color,
} }
# TODO: add humanized source-asset
# info format.
fmt_str = ( fmt_str = (
' {size:.{size_digits}f} u' ' {size:.{size_digits}f} x '
# '{level:,.{level_digits}f} ' '{level:,.{level_digits}f} '
) )
fields = { fields = {
'level': 0, 'level': 0,
@ -302,17 +285,12 @@ class L1Labels:
orient_v='bottom', orient_v='bottom',
**kwargs, **kwargs,
) )
bid.set_fmt_str( bid.set_fmt_str(fmt_str=fmt_str, fields=fields)
fmt_str='\n' + fmt_str,
fields=fields,
)
bid.show() bid.show()
ask = self.ask_label = L1Label( ask = self.ask_label = L1Label(
orient_v='top', orient_v='top',
**kwargs, **kwargs,
) )
ask.set_fmt_str( ask.set_fmt_str(fmt_str=fmt_str, fields=fields)
fmt_str=fmt_str,
fields=fields)
ask.show() ask.show()

View File

@ -233,36 +233,6 @@ class Label:
def delete(self) -> None: def delete(self) -> None:
self.vb.scene().removeItem(self.txt) self.vb.scene().removeItem(self.txt)
# NOTE: pulled out from ``ChartPlotWidget`` from way way old code.
# def _label_h(self, yhigh: float, ylow: float) -> float:
# # compute contents label "height" in view terms
# # to avoid having data "contents" overlap with them
# if self._labels:
# label = self._labels[self.name][0]
# rect = label.itemRect()
# tl, br = rect.topLeft(), rect.bottomRight()
# vb = self.plotItem.vb
# try:
# # on startup labels might not yet be rendered
# top, bottom = (vb.mapToView(tl).y(), vb.mapToView(br).y())
# # XXX: magic hack, how do we compute exactly?
# label_h = (top - bottom) * 0.42
# except np.linalg.LinAlgError:
# label_h = 0
# else:
# label_h = 0
# # print(f'label height {self.name}: {label_h}')
# if label_h > yhigh - ylow:
# label_h = 0
# print(f"bounds (ylow, yhigh): {(ylow, yhigh)}")
class FormatLabel(QLabel): class FormatLabel(QLabel):
''' '''

View File

@ -18,14 +18,9 @@
Lines for orders, alerts, L2. Lines for orders, alerts, L2.
""" """
from __future__ import annotations
from functools import partial from functools import partial
from math import floor from math import floor
from typing import ( from typing import Optional, Callable
Optional,
Callable,
TYPE_CHECKING,
)
import pyqtgraph as pg import pyqtgraph as pg
from pyqtgraph import Point, functions as fn from pyqtgraph import Point, functions as fn
@ -42,9 +37,6 @@ from ..calc import humanize
from ._label import Label from ._label import Label
from ._style import hcolor, _font from ._style import hcolor, _font
if TYPE_CHECKING:
from ._cursor import Cursor
# TODO: probably worth investigating if we can # TODO: probably worth investigating if we can
# make .boundingRect() faster: # make .boundingRect() faster:
@ -92,7 +84,7 @@ class LevelLine(pg.InfiniteLine):
self._marker = None self._marker = None
self.only_show_markers_on_hover = only_show_markers_on_hover self.only_show_markers_on_hover = only_show_markers_on_hover
self.track_marker_pos: bool = False self.show_markers: bool = True # presuming the line is hovered at init
# should line go all the way to far end or leave a "margin" # should line go all the way to far end or leave a "margin"
# space for other graphics (eg. L1 book) # space for other graphics (eg. L1 book)
@ -130,9 +122,6 @@ class LevelLine(pg.InfiniteLine):
self._y_incr_mult = 1 / chart.linked.symbol.tick_size self._y_incr_mult = 1 / chart.linked.symbol.tick_size
self._right_end_sc: float = 0 self._right_end_sc: float = 0
# use px caching
self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
def txt_offsets(self) -> tuple[int, int]: def txt_offsets(self) -> tuple[int, int]:
return 0, 0 return 0, 0
@ -227,23 +216,20 @@ class LevelLine(pg.InfiniteLine):
y: float y: float
) -> None: ) -> None:
''' '''Chart coordinates cursor tracking callback.
Chart coordinates cursor tracking callback.
this is called by our ``Cursor`` type once this line is set to this is called by our ``Cursor`` type once this line is set to
track the cursor: for every movement this callback is invoked to track the cursor: for every movement this callback is invoked to
reposition the line with the current view coordinates. reposition the line with the current view coordinates.
''' '''
self.movable = True self.movable = True
self.set_level(y) # implictly calls reposition handler self.set_level(y) # implictly calls reposition handler
def mouseDragEvent(self, ev): def mouseDragEvent(self, ev):
''' """Override the ``InfiniteLine`` handler since we need more
Override the ``InfiniteLine`` handler since we need more
detailed control and start end signalling. detailed control and start end signalling.
''' """
cursor = self._chart.linked.cursor cursor = self._chart.linked.cursor
# hide y-crosshair # hide y-crosshair
@ -295,20 +281,10 @@ class LevelLine(pg.InfiniteLine):
# show y-crosshair again # show y-crosshair again
cursor.show_xhair() cursor.show_xhair()
def get_cursor(self) -> Optional[Cursor]:
chart = self._chart
cur = chart.linked.cursor
if self in cur._hovered:
return cur
return None
def delete(self) -> None: def delete(self) -> None:
''' """Remove this line from containing chart/view/scene.
Remove this line from containing chart/view/scene.
''' """
scene = self.scene() scene = self.scene()
if scene: if scene:
for label in self._labels: for label in self._labels:
@ -322,8 +298,9 @@ class LevelLine(pg.InfiniteLine):
# remove from chart/cursor states # remove from chart/cursor states
chart = self._chart chart = self._chart
cur = self.get_cursor() cur = chart.linked.cursor
if cur:
if self in cur._hovered:
cur._hovered.remove(self) cur._hovered.remove(self)
chart.plotItem.removeItem(self) chart.plotItem.removeItem(self)
@ -331,8 +308,8 @@ class LevelLine(pg.InfiniteLine):
def mouseDoubleClickEvent( def mouseDoubleClickEvent(
self, self,
ev: QtGui.QMouseEvent, ev: QtGui.QMouseEvent,
) -> None: ) -> None:
# TODO: enter labels edit mode # TODO: enter labels edit mode
print(f'double click {ev}') print(f'double click {ev}')
@ -357,22 +334,30 @@ class LevelLine(pg.InfiniteLine):
line_end, marker_right, r_axis_x = self._chart.marker_right_points() line_end, marker_right, r_axis_x = self._chart.marker_right_points()
# (legacy) NOTE: at one point this seemed slower when moving around if self.show_markers and self.markers:
# order lines.. not sure if that's still true or why but we've
# dropped the original hacky `.pain()` transform stuff for inf p.setPen(self.pen)
# line markers now - check the git history if it needs to be qgo_draw_markers(
# reverted. self.markers,
if self._marker: self.pen.color(),
if self.track_marker_pos: p,
# make the line end at the marker's x pos vb_left,
line_end = marker_right = self._marker.pos().x() vb_right,
marker_right,
)
# marker_size = self.markers[0][2]
self._maxMarkerSize = max([m[2] / 2. for m in self.markers])
# this seems slower when moving around
# order lines.. not sure wtf is up with that.
# for now we're just using it on the position line.
elif self._marker:
# TODO: make this label update part of a scene-aware-marker # TODO: make this label update part of a scene-aware-marker
# composed annotation # composed annotation
self._marker.setPos( self._marker.setPos(
QPointF(marker_right, self.scene_y()) QPointF(marker_right, self.scene_y())
) )
if hasattr(self._marker, 'label'): if hasattr(self._marker, 'label'):
self._marker.label.update() self._marker.label.update()
@ -394,14 +379,16 @@ class LevelLine(pg.InfiniteLine):
def hide(self) -> None: def hide(self) -> None:
super().hide() super().hide()
mkr = self._marker if self._marker:
if mkr: self._marker.hide()
mkr.hide() # needed for ``order_line()`` lines currently
self._marker.label.hide()
def show(self) -> None: def show(self) -> None:
super().show() super().show()
if self._marker: if self._marker:
self._marker.show() self._marker.show()
# self._marker.label.show()
def scene_y(self) -> float: def scene_y(self) -> float:
return self.getViewBox().mapFromView( return self.getViewBox().mapFromView(
@ -434,10 +421,6 @@ class LevelLine(pg.InfiniteLine):
return path return path
@property
def marker(self) -> LevelMarker:
return self._marker
def hoverEvent(self, ev): def hoverEvent(self, ev):
''' '''
Mouse hover callback. Mouse hover callback.
@ -446,16 +429,17 @@ class LevelLine(pg.InfiniteLine):
cur = self._chart.linked.cursor cur = self._chart.linked.cursor
# hovered # hovered
if ( if (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.LeftButton):
not ev.isExit()
and ev.acceptDrags(QtCore.Qt.LeftButton)
):
# if already hovered we don't need to run again # if already hovered we don't need to run again
if self.mouseHovering is True: if self.mouseHovering is True:
return return
if self.only_show_markers_on_hover: if self.only_show_markers_on_hover:
self.show_markers() self.show_markers = True
if self._marker:
self._marker.show()
# highlight if so configured # highlight if so configured
if self.highlight_on_hover: if self.highlight_on_hover:
@ -498,7 +482,11 @@ class LevelLine(pg.InfiniteLine):
cur._hovered.remove(self) cur._hovered.remove(self)
if self.only_show_markers_on_hover: if self.only_show_markers_on_hover:
self.hide_markers() self.show_markers = False
if self._marker:
self._marker.hide()
self._marker.label.hide()
if self not in cur._trackers: if self not in cur._trackers:
cur.show_xhair(y_label_level=self.value()) cur.show_xhair(y_label_level=self.value())
@ -510,15 +498,6 @@ class LevelLine(pg.InfiniteLine):
self.update() self.update()
def hide_markers(self) -> None:
if self._marker:
self._marker.hide()
self._marker.label.hide()
def show_markers(self) -> None:
if self._marker:
self._marker.show()
def level_line( def level_line(
@ -539,10 +518,9 @@ def level_line(
**kwargs, **kwargs,
) -> LevelLine: ) -> LevelLine:
''' """Convenience routine to add a styled horizontal line to a plot.
Convenience routine to add a styled horizontal line to a plot.
''' """
hl_color = color + '_light' if highlight_on_hover else color hl_color = color + '_light' if highlight_on_hover else color
line = LevelLine( line = LevelLine(
@ -724,7 +702,7 @@ def order_line(
marker = LevelMarker( marker = LevelMarker(
chart=chart, chart=chart,
style=marker_style, style=marker_style,
get_level=line.value, # callback get_level=line.value,
size=marker_size, size=marker_size,
keep_in_view=False, keep_in_view=False,
) )
@ -733,8 +711,7 @@ def order_line(
marker = line.add_marker(marker) marker = line.add_marker(marker)
# XXX: DON'T COMMENT THIS! # XXX: DON'T COMMENT THIS!
# this fixes it the artifact issue! # this fixes it the artifact issue! .. of course, bounding rect stuff
# .. of course, bounding rect stuff
line._maxMarkerSize = marker_size line._maxMarkerSize = marker_size
assert line._marker is marker assert line._marker is marker
@ -755,8 +732,7 @@ def order_line(
if action != 'alert': if action != 'alert':
# add a partial position label if we also added a level # add a partial position label if we also added a level marker
# marker
pp_size_label = Label( pp_size_label = Label(
view=view, view=view,
color=line.color, color=line.color,
@ -790,9 +766,9 @@ def order_line(
# XXX: without this the pp proportion label next the marker # XXX: without this the pp proportion label next the marker
# seems to lag? this is the same issue we had with position # seems to lag? this is the same issue we had with position
# lines which we handle with ``.update_graphcis()``. # lines which we handle with ``.update_graphcis()``.
# marker._on_paint=lambda marker: pp_size_label.update()
marker._on_paint = lambda marker: pp_size_label.update() marker._on_paint = lambda marker: pp_size_label.update()
# XXX: THIS IS AN UNTYPED MONKEY PATCH!?!?!
marker.label = label marker.label = label
# sanity check # sanity check

View File

@ -1,108 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Notifications utils.
"""
import os
import platform
import subprocess
from typing import Optional
import trio
from ..log import get_logger
from ..clearing._messages import (
Status,
)
log = get_logger(__name__)
_dbus_uid: Optional[str] = ''
async def notify_from_ems_status_msg(
msg: Status,
duration: int = 3000,
is_subproc: bool = False,
) -> None:
'''
Send a linux desktop notification.
Handle subprocesses by discovering the dbus user id
on first call.
'''
if platform.system() != "Linux":
return
# TODO: this in another task?
# not sure if this will ever be a bottleneck,
# we probably could do graphics stuff first tho?
if is_subproc:
global _dbus_uid
su = os.environ.get('SUDO_USER')
if (
not _dbus_uid
and su
):
# TODO: use `trio` but we need to use nursery.start()
# to use pipes?
# result = await trio.run_process(
result = subprocess.run(
[
'id',
'-u',
su,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# check=True
)
_dbus_uid = result.stdout.decode("utf-8").replace('\n', '')
os.environ['DBUS_SESSION_BUS_ADDRESS'] = (
f'unix:path=/run/user/{_dbus_uid}/bus'
)
try:
result = await trio.run_process(
[
'notify-send',
'-u', 'normal',
'-t', f'{duration}',
'piker',
# TODO: add in standard fill/exec info that maybe we
# pack in a broker independent way?
f"'{msg.pformat()}'",
],
capture_stdout=True,
capture_stderr=True,
check=False,
)
if result.returncode != 0:
log.warn(f'Notification daemon crashed stderr: {result.stderr}')
log.runtime(result)
except FileNotFoundError:
log.warn('Tried to send a notification but \'notify-send\' not present')

View File

@ -18,23 +18,23 @@ Super fast OHLC sampling graphics types.
""" """
from __future__ import annotations from __future__ import annotations
from typing import (
Optional,
TYPE_CHECKING,
)
import numpy as np import numpy as np
from PyQt5 import ( import pyqtgraph as pg
QtGui, from PyQt5 import QtCore, QtGui, QtWidgets
QtWidgets, from PyQt5.QtCore import QLineF, QPointF
)
from PyQt5.QtCore import (
QLineF,
QRectF,
)
from PyQt5.QtWidgets import QGraphicsItem
from PyQt5.QtGui import QPainterPath from PyQt5.QtGui import QPainterPath
from ._curve import FlowGraphic
from .._profile import pg_profile_enabled, ms_slower_then from .._profile import pg_profile_enabled, ms_slower_then
from ._style import hcolor
from ..log import get_logger from ..log import get_logger
from .._profile import Profiler
if TYPE_CHECKING:
from ._chart import LinkedSplits
log = get_logger(__name__) log = get_logger(__name__)
@ -43,8 +43,7 @@ log = get_logger(__name__)
def bar_from_ohlc_row( def bar_from_ohlc_row(
row: np.ndarray, row: np.ndarray,
# 0.5 is no overlap between arms, 1.0 is full overlap # 0.5 is no overlap between arms, 1.0 is full overlap
bar_w: float, w: float = 0.43
bar_gap: float = 0.16
) -> tuple[QLineF]: ) -> tuple[QLineF]:
''' '''
@ -52,7 +51,8 @@ def bar_from_ohlc_row(
OHLC "bar" for use in the "last datum" of a series. OHLC "bar" for use in the "last datum" of a series.
''' '''
open, high, low, close, index = row open, high, low, close, index = row[
['open', 'high', 'low', 'close', 'index']]
# TODO: maybe consider using `QGraphicsLineItem` ?? # TODO: maybe consider using `QGraphicsLineItem` ??
# gives us a ``.boundingRect()`` on the objects which may make # gives us a ``.boundingRect()`` on the objects which may make
@ -60,11 +60,9 @@ def bar_from_ohlc_row(
# history path faster since it's done in C++: # history path faster since it's done in C++:
# https://doc.qt.io/qt-5/qgraphicslineitem.html # https://doc.qt.io/qt-5/qgraphicslineitem.html
mid: float = (bar_w / 2) + index
# high -> low vertical (body) line # high -> low vertical (body) line
if low != high: if low != high:
hl = QLineF(mid, low, mid, high) hl = QLineF(index, low, index, high)
else: else:
# XXX: if we don't do it renders a weird rectangle? # XXX: if we don't do it renders a weird rectangle?
# see below for filtering this later... # see below for filtering this later...
@ -75,55 +73,48 @@ def bar_from_ohlc_row(
# the index's range according to the view mapping coordinates. # the index's range according to the view mapping coordinates.
# open line # open line
o = QLineF(index + bar_gap, open, mid, open) o = QLineF(index - w, open, index, open)
# close line # close line
c = QLineF( c = QLineF(index, close, index + w, close)
mid, close,
index + bar_w - bar_gap, close,
)
return [hl, o, c] return [hl, o, c]
class BarItems(FlowGraphic): class BarItems(pg.GraphicsObject):
''' '''
"Price range" bars graphics rendered from a OHLC sampled sequence. "Price range" bars graphics rendered from a OHLC sampled sequence.
''' '''
# XXX: causes this weird jitter bug when click-drag panning
# where the path curve will awkwardly flicker back and forth?
cache_mode: int = QGraphicsItem.NoCache
def __init__( def __init__(
self, self,
*args, linked: LinkedSplits,
**kwargs, plotitem: 'pg.PlotItem', # noqa
pen_color: str = 'bracket',
last_bar_color: str = 'bracket',
name: Optional[str] = None,
) -> None: ) -> None:
super().__init__()
self.linked = linked
# XXX: for the mega-lulz increasing width here increases draw
# latency... so probably don't do it until we figure that out.
self._color = pen_color
self.bars_pen = pg.mkPen(hcolor(pen_color), width=1)
self.last_bar_pen = pg.mkPen(hcolor(last_bar_color), width=2)
self._name = name
super().__init__(*args, **kwargs) self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
self._last_bar_lines: tuple[QLineF, ...] | None = None self.path = QPainterPath()
self._last_bar_lines: Optional[tuple[QLineF, ...]] = None
def x_last(self) -> None | float: def x_uppx(self) -> int:
''' # we expect the downsample curve report this.
Return the last most x value of the close line segment return 0
or if not drawn yet, ``None``.
'''
if self._last_bar_lines:
close_arm_line = self._last_bar_lines[-1]
return close_arm_line.x2() if close_arm_line else None
else:
return None
# Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect
def boundingRect(self): def boundingRect(self):
# profiler = Profiler( # Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect
# msg=f'BarItems.boundingRect(): `{self._name}`',
# disabled=not pg_profile_enabled(),
# ms_threshold=ms_slower_then,
# )
# TODO: Can we do rect caching to make this faster # TODO: Can we do rect caching to make this faster
# like `pg.PlotCurveItem` does? In theory it's just # like `pg.PlotCurveItem` does? In theory it's just
@ -143,37 +134,32 @@ class BarItems(FlowGraphic):
hb.topLeft(), hb.topLeft(),
hb.bottomRight(), hb.bottomRight(),
) )
mn_y = hb_tl.y()
mx_y = hb_br.y()
most_left = hb_tl.x()
most_right = hb_br.x()
# profiler('calc path vertices')
# need to include last bar height or BR will be off # need to include last bar height or BR will be off
# OHLC line segments: [hl, o, c] mx_y = hb_br.y()
last_lines: tuple[QLineF] | None = self._last_bar_lines mn_y = hb_tl.y()
last_lines = self._last_bar_lines
if last_lines: if last_lines:
( body_line = self._last_bar_lines[0]
hl, if body_line:
o, mx_y = max(mx_y, max(body_line.y1(), body_line.y2()))
c, mn_y = min(mn_y, min(body_line.y1(), body_line.y2()))
) = last_lines
most_right = c.x2() + 1
ymx = ymn = c.y2()
if hl: return QtCore.QRectF(
y1, y2 = hl.y1(), hl.y2()
ymn = min(y1, y2) # top left
ymx = max(y1, y2) QPointF(
mx_y = max(ymx, mx_y) hb_tl.x(),
mn_y = min(ymn, mn_y) mn_y,
# profiler('calc last bar vertices') ),
# bottom right
QPointF(
hb_br.x() + 1,
mx_y,
)
return QRectF(
most_left,
mn_y,
most_right - most_left + 1,
mx_y - mn_y,
) )
def paint( def paint(
@ -184,7 +170,7 @@ class BarItems(FlowGraphic):
) -> None: ) -> None:
profiler = Profiler( profiler = pg.debug.Profiler(
disabled=not pg_profile_enabled(), disabled=not pg_profile_enabled(),
ms_threshold=ms_slower_then, ms_threshold=ms_slower_then,
) )
@ -197,12 +183,12 @@ class BarItems(FlowGraphic):
# as is necesarry for what's in "view". Not sure if this will # as is necesarry for what's in "view". Not sure if this will
# lead to any perf gains other then when zoomed in to less bars # lead to any perf gains other then when zoomed in to less bars
# in view. # in view.
p.setPen(self.last_step_pen) p.setPen(self.last_bar_pen)
if self._last_bar_lines: if self._last_bar_lines:
p.drawLines(*tuple(filter(bool, self._last_bar_lines))) p.drawLines(*tuple(filter(bool, self._last_bar_lines)))
profiler('draw last bar') profiler('draw last bar')
p.setPen(self._pen) p.setPen(self.bars_pen)
p.drawPath(self.path) p.drawPath(self.path)
profiler(f'draw history path: {self.path.capacity()}') profiler(f'draw history path: {self.path.capacity()}')
@ -210,40 +196,29 @@ class BarItems(FlowGraphic):
self, self,
path: QPainterPath, path: QPainterPath,
src_data: np.ndarray, src_data: np.ndarray,
render_data: np.ndarray,
reset: bool, reset: bool,
array_key: str, array_key: str,
index_field: str,
) -> None:
# relevant fields
fields: list[str] = [ fields: list[str] = [
'index',
'open', 'open',
'high', 'high',
'low', 'low',
'close', 'close',
index_field, ],
]
) -> None:
# relevant fields
ohlc = src_data[fields] ohlc = src_data[fields]
# last_row = ohlc[-1:] last_row = ohlc[-1:]
# individual values # individual values
last_row = o, h, l, last, i = ohlc[-1] last_row = i, o, h, l, last = ohlc[-1]
# times = src_data['time']
# if times[-1] - times[-2]:
# breakpoint()
index = src_data[index_field]
step_size = index[-1] - index[-2]
# generate new lines objects for updatable "current bar" # generate new lines objects for updatable "current bar"
bg: float = 0.16 * step_size self._last_bar_lines = bar_from_ohlc_row(last_row)
self._last_bar_lines = bar_from_ohlc_row(
last_row,
bar_w=step_size,
bar_gap=bg,
)
# assert i == graphics.start_index - 1 # assert i == graphics.start_index - 1
# assert i == last_index # assert i == last_index
@ -258,16 +233,10 @@ class BarItems(FlowGraphic):
if l != h: # noqa if l != h: # noqa
if body is None: if body is None:
body = self._last_bar_lines[0] = QLineF( body = self._last_bar_lines[0] = QLineF(i, l, i, h)
i + bg, l,
i + step_size - bg, h,
)
else: else:
# update body # update body
body.setLine( body.setLine(i, l, i, h)
body.x1(), l,
body.x2(), h,
)
# XXX: pretty sure this is causing an issue where the # XXX: pretty sure this is causing an issue where the
# bar has a large upward move right before the next # bar has a large upward move right before the next
@ -278,4 +247,4 @@ class BarItems(FlowGraphic):
# date / from some previous sample. It's weird though # date / from some previous sample. It's weird though
# because i've seen it do this to bars i - 3 back? # because i've seen it do this to bars i - 3 back?
return ohlc[index_field], ohlc['close'] return ohlc['index'], ohlc['close']

View File

@ -22,9 +22,12 @@ from __future__ import annotations
from typing import ( from typing import (
Optional, Generic, Optional, Generic,
TypeVar, Callable, TypeVar, Callable,
Literal,
) )
import enum
import sys
# from pydantic import BaseModel, validator from pydantic import BaseModel, validator
from pydantic.generics import GenericModel from pydantic.generics import GenericModel
from PyQt5.QtWidgets import ( from PyQt5.QtWidgets import (
QWidget, QWidget,
@ -35,7 +38,6 @@ from ._forms import (
# FontScaledDelegate, # FontScaledDelegate,
Edit, Edit,
) )
from ..data.types import Struct
DataType = TypeVar('DataType') DataType = TypeVar('DataType')
@ -60,7 +62,7 @@ class Selection(Field[DataType], Generic[DataType]):
options: dict[str, DataType] options: dict[str, DataType]
# value: DataType = None # value: DataType = None
# @validator('value') # , always=True) @validator('value') # , always=True)
def set_value_first( def set_value_first(
cls, cls,
@ -98,7 +100,7 @@ class Edit(Field[DataType], Generic[DataType]):
widget_factory = Edit widget_factory = Edit
class AllocatorPane(Struct): class AllocatorPane(BaseModel):
account = Selection[str]( account = Selection[str](
options=dict.fromkeys( options=dict.fromkeys(

View File

@ -18,27 +18,23 @@
Charting overlay helpers. Charting overlay helpers.
''' '''
from collections import defaultdict from typing import Callable, Optional
from functools import partial
from typing import ( from pyqtgraph.Qt.QtCore import (
Callable, # QObject,
Optional, # Signal,
Qt,
# QEvent,
) )
from pyqtgraph.graphicsItems.AxisItem import AxisItem from pyqtgraph.graphicsItems.AxisItem import AxisItem
from pyqtgraph.graphicsItems.ViewBox import ViewBox from pyqtgraph.graphicsItems.ViewBox import ViewBox
# from pyqtgraph.graphicsItems.GraphicsWidget import GraphicsWidget from pyqtgraph.graphicsItems.GraphicsWidget import GraphicsWidget
from pyqtgraph.graphicsItems.PlotItem.PlotItem import PlotItem from pyqtgraph.graphicsItems.PlotItem.PlotItem import PlotItem
from pyqtgraph.Qt.QtCore import ( from pyqtgraph.Qt.QtCore import QObject, Signal, QEvent
QObject, from pyqtgraph.Qt.QtWidgets import QGraphicsGridLayout, QGraphicsLinearLayout
Signal,
QEvent, from ._interaction import ChartView
Qt,
)
from pyqtgraph.Qt.QtWidgets import (
# QGraphicsGridLayout,
QGraphicsLinearLayout,
)
__all__ = ["PlotItemOverlay"] __all__ = ["PlotItemOverlay"]
@ -84,20 +80,25 @@ class ComposedGridLayout:
``<axis_name>i`` in the layout. ``<axis_name>i`` in the layout.
The ``item: PlotItem`` passed to the constructor's grid layout is The ``item: PlotItem`` passed to the constructor's grid layout is
used verbatim as the "main plot" who's view box is given precedence used verbatim as the "main plot" who's view box is give precedence
for input handling. The main plot's axes are removed from its for input handling. The main plot's axes are removed from it's
layout and placed in the surrounding exterior layouts to allow for layout and placed in the surrounding exterior layouts to allow for
re-ordering if desired. re-ordering if desired.
''' '''
def __init__( def __init__(
self, self,
pi: PlotItem, item: PlotItem,
grid: QGraphicsGridLayout,
reverse: bool = False, # insert items to the "center"
) -> None: ) -> None:
self.items: list[PlotItem] = []
# self.grid = grid
self.reverse = reverse
self.pitems: list[PlotItem] = [] # TODO: use a ``bidict`` here?
self._pi2axes: dict[ # TODO: use a ``bidict`` here? self._pi2axes: dict[
int, int,
dict[str, AxisItem], dict[str, AxisItem],
] = {} ] = {}
@ -119,13 +120,12 @@ class ComposedGridLayout:
if name in ('top', 'bottom'): if name in ('top', 'bottom'):
orient = Qt.Vertical orient = Qt.Vertical
elif name in ('left', 'right'): elif name in ('left', 'right'):
orient = Qt.Horizontal orient = Qt.Horizontal
layout.setOrientation(orient) layout.setOrientation(orient)
self.insert_plotitem(0, pi) self.insert(0, item)
# insert surrounding linear layouts into the parent pi's layout # insert surrounding linear layouts into the parent pi's layout
# such that additional axes can be appended arbitrarily without # such that additional axes can be appended arbitrarily without
@ -135,14 +135,13 @@ class ComposedGridLayout:
# TODO: do we need this? # TODO: do we need this?
# axis should have been removed during insert above # axis should have been removed during insert above
index = _axes_layout_indices[name] index = _axes_layout_indices[name]
axis = pi.layout.itemAt(*index) axis = item.layout.itemAt(*index)
if axis and axis.isVisible(): if axis and axis.isVisible():
assert linlayout.itemAt(0) is axis assert linlayout.itemAt(0) is axis
# XXX: see comment in ``.insert_plotitem()``... # item.layout.removeItem(axis)
# pi.layout.removeItem(axis) item.layout.addItem(linlayout, *index)
pi.layout.addItem(linlayout, *index) layout = item.layout.itemAt(*index)
layout = pi.layout.itemAt(*index)
assert layout is linlayout assert layout is linlayout
def _register_item( def _register_item(
@ -158,32 +157,27 @@ class ComposedGridLayout:
self._pi2axes.setdefault(name, {})[index] = axis self._pi2axes.setdefault(name, {})[index] = axis
# enter plot into list for index tracking # enter plot into list for index tracking
self.pitems.insert(index, plotitem) self.items.insert(index, plotitem)
def insert_plotitem( def insert(
self, self,
index: int, index: int,
plotitem: PlotItem, plotitem: PlotItem,
) -> tuple[int, list[AxisItem]]: ) -> (int, int):
''' '''
Place item at index by inserting all axes into the grid Place item at index by inserting all axes into the grid
at list-order appropriate position. at list-order appropriate position.
''' '''
if index < 0: if index < 0:
raise ValueError( raise ValueError('`insert()` only supports an index >= 0')
'`.insert_plotitem()` only supports an index >= 0'
)
inserted_axes: list[AxisItem] = []
# add plot's axes in sequence to the embedded linear layouts # add plot's axes in sequence to the embedded linear layouts
# for each "side" thus avoiding graphics collisions. # for each "side" thus avoiding graphics collisions.
for name, axis_info in plotitem.axes.copy().items(): for name, axis_info in plotitem.axes.copy().items():
linlayout, axes = self.sides[name] linlayout, axes = self.sides[name]
axis = axis_info['item'] axis = axis_info['item']
inserted_axes.append(axis)
if axis in axes: if axis in axes:
# TODO: re-order using ``.pop()`` ? # TODO: re-order using ``.pop()`` ?
@ -196,20 +190,19 @@ class ComposedGridLayout:
if ( if (
not axis.isVisible() not axis.isVisible()
# XXX: we never skip moving the axes for the *root* # XXX: we never skip moving the axes for the *first*
# plotitem inserted (even if not shown) since we need to # plotitem inserted (even if not shown) since we need to
# move all the hidden axes into linear sub-layouts for # move all the hidden axes into linear sub-layouts for
# that "central" plot in the overlay. Also if we don't # that "central" plot in the overlay. Also if we don't
# do it there's weird geomoetry calc offsets that make # do it there's weird geomoetry calc offsets that make
# view coords slightly off somehow .. smh # view coords slightly off somehow .. smh
and not len(self.pitems) == 0 and not len(self.items) == 0
): ):
continue continue
# XXX: Remove old axis? # XXX: Remove old axis? No, turns out we don't need this?
# No, turns out we don't need this? # DON'T unlink it since we the original ``ViewBox``
# DON'T UNLINK IT since we need the original ``ViewBox`` to # to still drive it B)
# still drive it with events/handlers B)
# popped = plotitem.removeAxis(name, unlink=False) # popped = plotitem.removeAxis(name, unlink=False)
# assert axis is popped # assert axis is popped
@ -225,9 +218,9 @@ class ComposedGridLayout:
self._register_item(index, plotitem) self._register_item(index, plotitem)
return (index, inserted_axes) return index
def append_plotitem( def append(
self, self,
item: PlotItem, item: PlotItem,
@ -239,7 +232,7 @@ class ComposedGridLayout:
''' '''
# for left and bottom axes we have to first remove # for left and bottom axes we have to first remove
# items and re-insert to maintain a list-order. # items and re-insert to maintain a list-order.
return self.insert_plotitem(len(self.pitems), item) return self.insert(len(self.items), item)
def get_axis( def get_axis(
self, self,
@ -252,20 +245,20 @@ class ComposedGridLayout:
if axis for that name is not shown. if axis for that name is not shown.
''' '''
index = self.pitems.index(plot) index = self.items.index(plot)
named = self._pi2axes[name] named = self._pi2axes[name]
return named.get(index) return named.get(index)
# def pop( def pop(
# self, self,
# item: PlotItem, item: PlotItem,
# ) -> PlotItem: ) -> PlotItem:
# ''' '''
# Remove item and restack all axes in list-order. Remove item and restack all axes in list-order.
# ''' '''
# raise NotImplementedError raise NotImplementedError
# Unimplemented features TODO: # Unimplemented features TODO:
@ -286,6 +279,194 @@ class ComposedGridLayout:
# axis? # axis?
# TODO: we might want to enabled some kind of manual flag to disable
# this method wrapping during type creation? As example a user could
# definitively decide **not** to enable broadcasting support by
# setting something like ``ViewBox.disable_relays = True``?
def mk_relay_method(
signame: str,
slot: Callable[
[ViewBox,
'QEvent',
Optional[AxisItem]],
None,
],
) -> Callable[
[
ViewBox,
# lol, there isn't really a generic type thanks
# to the rewrite of Qt's event system XD
'QEvent',
'Optional[AxisItem]',
'Optional[ViewBox]', # the ``relayed_from`` arg we provide
],
None,
]:
def maybe_broadcast(
vb: 'ViewBox',
ev: 'QEvent',
axis: 'Optional[int]' = None,
relayed_from: 'ViewBox' = None,
) -> None:
'''
(soon to be) Decorator which makes an event handler
"broadcastable" to overlayed ``GraphicsWidget``s.
Adds relay signals based on the decorated handler's name
and conducts a signal broadcast of the relay signal if there
are consumers registered.
'''
# When no relay source has been set just bypass all
# the broadcast machinery.
if vb.event_relay_source is None:
ev.accept()
return slot(
vb,
ev,
axis=axis,
)
if relayed_from:
assert axis is None
# this is a relayed event and should be ignored (so it does not
# halt/short circuit the graphicscene loop). Further the
# surrounding handler for this signal must be allowed to execute
# and get processed by **this consumer**.
# print(f'{vb.name} rx relayed from {relayed_from.name}')
ev.ignore()
return slot(
vb,
ev,
axis=axis,
)
if axis is not None:
# print(f'{vb.name} handling axis event:\n{str(ev)}')
ev.accept()
return slot(
vb,
ev,
axis=axis,
)
elif (
relayed_from is None
and vb.event_relay_source is vb # we are the broadcaster
and axis is None
):
# Broadcast case: this is a source event which will be
# relayed to attached consumers and accepted after all
# consumers complete their own handling followed by this
# routine's processing. Sequence is,
# - pre-relay to all consumers *first* - ``.emit()`` blocks
# until all downstream relay handlers have run.
# - run the source handler for **this** event and accept
# the event
# Access the "bound signal" that is created
# on the widget type as part of instantiation.
signal = getattr(vb, signame)
# print(f'{vb.name} emitting {signame}')
# TODO/NOTE: we could also just bypass a "relay" signal
# entirely and instead call the handlers manually in
# a loop? This probably is a lot simpler and also doesn't
# have any downside, and allows not touching target widget
# internals.
signal.emit(
ev,
axis,
# passing this demarks a broadcasted/relayed event
vb,
)
# accept event so no more relays are fired.
ev.accept()
# call underlying wrapped method with an extra
# ``relayed_from`` value to denote that this is a relayed
# event handling case.
return slot(
vb,
ev,
axis=axis,
)
return maybe_broadcast
# XXX: :( can't define signals **after** class compile time
# so this is not really useful.
# def mk_relay_signal(
# func,
# name: str = None,
# ) -> Signal:
# (
# args,
# varargs,
# varkw,
# defaults,
# kwonlyargs,
# kwonlydefaults,
# annotations
# ) = inspect.getfullargspec(func)
# # XXX: generate a relay signal with 1 extra
# # argument for a ``relayed_from`` kwarg. Since
# # ``'self'`` is already ignored by signals we just need
# # to count the arguments since we're adding only 1 (and
# # ``args`` will capture that).
# numargs = len(args + list(defaults))
# signal = Signal(*tuple(numargs * [object]))
# signame = name or func.__name__ + 'Relay'
# return signame, signal
def enable_relays(
widget: GraphicsWidget,
handler_names: list[str],
) -> list[Signal]:
'''
Method override helper which enables relay of a particular
``Signal`` from some chosen broadcaster widget to a set of
consumer widgets which should operate their event handlers normally
but instead of signals "relayed" from the broadcaster.
Mostly useful for overlaying widgets that handle user input
that you want to overlay graphically. The target ``widget`` type must
define ``QtCore.Signal``s each with a `'Relay'` suffix for each
name provided in ``handler_names: list[str]``.
'''
signals = []
for name in handler_names:
handler = getattr(widget, name)
signame = name + 'Relay'
# ensure the target widget defines a relay signal
relay = getattr(widget, signame)
widget.relays[signame] = name
signals.append(relay)
method = mk_relay_method(signame, handler)
setattr(widget, name, method)
return signals
enable_relays(
ChartView,
['wheelEvent', 'mouseDragEvent']
)
class PlotItemOverlay: class PlotItemOverlay:
''' '''
A composite for managing overlaid ``PlotItem`` instances such that A composite for managing overlaid ``PlotItem`` instances such that
@ -301,191 +482,86 @@ class PlotItemOverlay:
) -> None: ) -> None:
self.root_plotitem: PlotItem = root_plotitem self.root_plotitem: PlotItem = root_plotitem
self.relay_handlers: defaultdict[
str,
list[Callable],
] = defaultdict(list)
# NOTE: required for scene layering/relaying; this guarantees vb = root_plotitem.vb
# the "root" plot receives priority for interaction vb.event_relay_source = vb # TODO: maybe change name?
# events/signals. vb.setZValue(1000) # XXX: critical for scene layering/relaying
root_plotitem.vb.setZValue(10)
self.layout = ComposedGridLayout(root_plotitem) self.overlays: list[PlotItem] = []
self.layout = ComposedGridLayout(
root_plotitem,
root_plotitem.layout,
)
self._relays: dict[str, Signal] = {} self._relays: dict[str, Signal] = {}
@property
def overlays(self) -> list[PlotItem]:
return self.layout.pitems
def add_plotitem( def add_plotitem(
self, self,
plotitem: PlotItem, plotitem: PlotItem,
index: Optional[int] = None, index: Optional[int] = None,
# event/signal names which will be broadcasted to all added # TODO: we could also put the ``ViewBox.XAxis``
# (relayee) ``PlotItem``s (eg. ``ViewBox.mouseDragEvent``). # style enum here?
relay_events: list[str] = [],
# (0,), # link x # (0,), # link x
# (1,), # link y # (1,), # link y
# (0, 1), # link both # (0, 1), # link both
link_axes: tuple[int] = (), link_axes: tuple[int] = (),
) -> tuple[int, list[AxisItem]]: ) -> None:
index = index or len(self.overlays)
root = self.root_plotitem root = self.root_plotitem
# layout: QGraphicsGridLayout = root.layout
self.overlays.insert(index, plotitem)
vb: ViewBox = plotitem.vb vb: ViewBox = plotitem.vb
# mark this consumer overlay as ready to expect relayed events
# from the root plotitem.
vb.event_relay_source = root.vb
# TODO: some sane way to allow menu event broadcast XD # TODO: some sane way to allow menu event broadcast XD
# vb.setMenuEnabled(False) # vb.setMenuEnabled(False)
# wire up any relay signal(s) from the source plot to added # TODO: inside the `maybe_broadcast()` (soon to be) decorator
# "overlays". We use a plain loop instead of mucking with # we need have checks that consumers have been attached to
# re-connecting signal/slots which tends to be more invasive and # these relay signals.
# harder to implement and provides no measurable performance if link_axes != (0, 1):
# gain.
if relay_events:
for ev_name in relay_events:
relayee_handler: Callable[
[
ViewBox,
# lol, there isn't really a generic type thanks
# to the rewrite of Qt's event system XD
QEvent,
AxisItem | None, # wire up relay signals
], for relay_signal_name, handler_name in vb.relays.items():
None, # print(handler_name)
] = getattr(vb, ev_name) # XXX: Signal class attrs are bound after instantiation
# of the defining type, so we need to access that bound
sub_handlers: list[Callable] = self.relay_handlers[ev_name] # version here.
signal = getattr(root.vb, relay_signal_name)
# on the first registry of a relayed event we pop the handler = getattr(vb, handler_name)
# root's handler and override it to a custom broadcaster signal.connect(handler)
# routine.
if not sub_handlers:
src_handler = getattr(
root.vb,
ev_name,
)
def broadcast(
ev: 'QEvent',
# TODO: drop this viewbox specific input and
# allow a predicate to be passed in by user.
axis: 'Optional[int]' = None,
*,
# these are bound in by the ``partial`` below
# and ensure a unique broadcaster per event.
ev_name: str = None,
src_handler: Callable = None,
relayed_from: 'ViewBox' = None,
# remaining inputs the source handler expects
**kwargs,
) -> None:
'''
Broadcast signal or event: this is a source
event which will be relayed to attached
"relayee" plot item consumers.
The event is accepted halting any further
handlers from being triggered.
Sequence is,
- pre-relay to all consumers *first* - exactly
like how a ``Signal.emit()`` blocks until all
downstream relay handlers have run.
- run the event's source handler event
'''
ev.accept()
# broadcast first to relayees *first*. trigger
# relay of event to all consumers **before**
# processing/consumption in the source handler.
relayed_handlers = self.relay_handlers[ev_name]
assert getattr(vb, ev_name).__name__ == ev_name
# TODO: generalize as an input predicate
if axis is None:
for handler in relayed_handlers:
handler(
ev,
axis=axis,
**kwargs,
)
# run "source" widget's handler last
src_handler(
ev,
axis=axis,
)
# dynamic handler override on the publisher plot
setattr(
root.vb,
ev_name,
partial(
broadcast,
ev_name=ev_name,
src_handler=src_handler
),
)
else:
assert getattr(root.vb, ev_name)
assert relayee_handler not in sub_handlers
# append relayed-to widget's handler to relay table
sub_handlers.append(relayee_handler)
# link dim-axes to root if requested by user. # link dim-axes to root if requested by user.
# TODO: solve more-then-wanted scaled panning on click drag
# which seems to be due to broadcast. So we probably need to
# disable broadcast when axes are linked in a particular
# dimension?
for dim in link_axes: for dim in link_axes:
# link x and y axes to new view box such that the top level # link x and y axes to new view box such that the top level
# viewbox propagates to the root (and whatever other # viewbox propagates to the root (and whatever other
# plotitem overlays that have been added). # plotitem overlays that have been added).
vb.linkView(dim, root.vb) vb.linkView(dim, root.vb)
# => NOTE: in order to prevent "more-then-linear" scaled # make overlaid viewbox impossible to focus since the top
# panning moves on (for eg. click-drag) certain range change # level should handle all input and relay to overlays.
# signals (i.e. ``.sigXRangeChanged``), the user needs to be # NOTE: this was solved with the `setZValue()` above!
# careful that any broadcasted ``relay_events`` are are short
# circuited in sub-handlers (aka relayee's) implementations. As
# an example if a ``ViewBox.mouseDragEvent`` is broadcasted, the
# overlayed implementations need to be sure they either don't
# also link the x-axes (by not providing ``link_axes=(0,)``
# above) or that the relayee ``.mouseDragEvent()`` handlers are
# ready to "``return`` early" in the case that
# ``.sigXRangeChanged`` is emitted as part of linked axes.
# For more details on such signalling mechanics peek in
# ``ViewBox.linkView()``.
# make overlaid viewbox impossible to focus since the top level # TODO: we will probably want to add a "focus" api such that
# should handle all input and relay to overlays. Note that the # a new "top level" ``PlotItem`` can be selected dynamically
# "root" plot item gettingn interaction priority is configured # (and presumably the axes dynamically sorted to match).
# with the ``.setZValue()`` during init.
vb.setFlag( vb.setFlag(
vb.GraphicsItemFlag.ItemIsFocusable, vb.GraphicsItemFlag.ItemIsFocusable,
False False
) )
vb.setFocusPolicy(Qt.NoFocus) vb.setFocusPolicy(Qt.NoFocus)
# => TODO: add a "focus" api for switching the "top level"
# ``PlotItem`` dynamically.
# append-compose into the layout all axes from this plot # append-compose into the layout all axes from this plot
if index is None: self.layout.insert(index, plotitem)
insert_index, axes = self.layout.append_plotitem(plotitem)
else:
insert_index, axes = self.layout.insert_plotitem(index, plotitem)
plotitem.setGeometry(root.vb.sceneBoundingRect()) plotitem.setGeometry(root.vb.sceneBoundingRect())
@ -503,12 +579,24 @@ class PlotItemOverlay:
root.vb.setFocus() root.vb.setFocus()
assert root.vb.focusWidget() assert root.vb.focusWidget()
vb.setZValue(100) # XXX: do we need this? Why would you build then destroy?
def remove_plotitem(self, plotItem: PlotItem) -> None:
'''
Remove this ``PlotItem`` from the overlayed set making not shown
and unable to accept input.
return ( '''
index, ...
axes,
) # TODO: i think this would be super hot B)
def focus_item(self, plotitem: PlotItem) -> PlotItem:
'''
Apply focus to a contained PlotItem thus making it the "top level"
item in the overlay able to accept peripheral's input from the user
and responsible for zoom and panning control via its ``ViewBox``.
'''
...
def get_axis( def get_axis(
self, self,
@ -542,9 +630,8 @@ class PlotItemOverlay:
return axes return axes
# XXX: untested as of now. # TODO: i guess we need this if you want to detach existing plots
# TODO: need this as part of selecting a different root/source # dynamically? XXX: untested as of now.
# plot to rewire interaction event broadcast dynamically.
def _disconnect_all( def _disconnect_all(
self, self,
plotitem: PlotItem, plotitem: PlotItem,
@ -559,22 +646,3 @@ class PlotItemOverlay:
disconnected.append(sig) disconnected.append(sig)
return disconnected return disconnected
# XXX: do we need this? Why would you build then destroy?
# def remove_plotitem(self, plotItem: PlotItem) -> None:
# '''
# Remove this ``PlotItem`` from the overlayed set making not shown
# and unable to accept input.
# '''
# ...
# TODO: i think this would be super hot B)
# def focus_plotitem(self, plotitem: PlotItem) -> PlotItem:
# '''
# Apply focus to a contained PlotItem thus making it the "top level"
# item in the overlay able to accept peripheral's input from the user
# and responsible for zoom and panning control via its ``ViewBox``.
# '''
# ...

View File

@ -0,0 +1,236 @@
# piker: trading gear for hackers
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Super fast ``QPainterPath`` generation related operator routines.
"""
from __future__ import annotations
from typing import (
# Optional,
TYPE_CHECKING,
)
import numpy as np
from numpy.lib import recfunctions as rfn
from numba import njit, float64, int64 # , optional
# import pyqtgraph as pg
from PyQt5 import QtGui
# from PyQt5.QtCore import QLineF, QPointF
from ..data._sharedmem import (
ShmArray,
)
# from .._profile import pg_profile_enabled, ms_slower_then
from ._compression import (
ds_m4,
)
if TYPE_CHECKING:
from ._flows import Renderer
def xy_downsample(
x,
y,
uppx,
x_spacer: float = 0.5,
) -> tuple[np.ndarray, np.ndarray]:
# downsample whenever more then 1 pixels per datum can be shown.
# always refresh data bounds until we get diffing
# working properly, see above..
bins, x, y = ds_m4(
x,
y,
uppx,
)
# flatten output to 1d arrays suitable for path-graphics generation.
x = np.broadcast_to(x[:, None], y.shape)
x = (x + np.array(
[-x_spacer, 0, 0, x_spacer]
)).flatten()
y = y.flatten()
return x, y
@njit(
# TODO: for now need to construct this manually for readonly arrays, see
# https://github.com/numba/numba/issues/4511
# ntypes.tuple((float64[:], float64[:], float64[:]))(
# numba_ohlc_dtype[::1], # contiguous
# int64,
# optional(float64),
# ),
nogil=True
)
def path_arrays_from_ohlc(
data: np.ndarray,
start: int64,
bar_gap: float64 = 0.43,
) -> np.ndarray:
'''
Generate an array of lines objects from input ohlc data.
'''
size = int(data.shape[0] * 6)
x = np.zeros(
# data,
shape=size,
dtype=float64,
)
y, c = x.copy(), x.copy()
# TODO: report bug for assert @
# /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
for i, q in enumerate(data[start:], start):
# TODO: ask numba why this doesn't work..
# open, high, low, close, index = q[
# ['open', 'high', 'low', 'close', 'index']]
open = q['open']
high = q['high']
low = q['low']
close = q['close']
index = float64(q['index'])
istart = i * 6
istop = istart + 6
# x,y detail the 6 points which connect all vertexes of a ohlc bar
x[istart:istop] = (
index - bar_gap,
index,
index,
index,
index,
index + bar_gap,
)
y[istart:istop] = (
open,
open,
low,
high,
close,
close,
)
# specifies that the first edge is never connected to the
# prior bars last edge thus providing a small "gap"/"space"
# between bars determined by ``bar_gap``.
c[istart:istop] = (1, 1, 1, 1, 1, 0)
return x, y, c
def gen_ohlc_qpath(
r: Renderer,
data: np.ndarray,
array_key: str, # we ignore this
vr: tuple[int, int],
start: int = 0, # XXX: do we need this?
# 0.5 is no overlap between arms, 1.0 is full overlap
w: float = 0.43,
) -> QtGui.QPainterPath:
'''
More or less direct proxy to ``path_arrays_from_ohlc()``
but with closed in kwargs for line spacing.
'''
x, y, c = path_arrays_from_ohlc(
data,
start,
bar_gap=w,
)
return x, y, c
def ohlc_to_line(
ohlc_shm: ShmArray,
data_field: str,
fields: list[str] = ['open', 'high', 'low', 'close']
) -> tuple[
np.ndarray,
np.ndarray,
]:
'''
Convert an input struct-array holding OHLC samples into a pair of
flattened x, y arrays with the same size (datums wise) as the source
data.
'''
y_out = ohlc_shm.ustruct(fields)
first = ohlc_shm._first.value
last = ohlc_shm._last.value
# write pushed data to flattened copy
y_out[first:last] = rfn.structured_to_unstructured(
ohlc_shm.array[fields]
)
# generate an flat-interpolated x-domain
x_out = (
np.broadcast_to(
ohlc_shm._array['index'][:, None],
(
ohlc_shm._array.size,
# 4, # only ohlc
y_out.shape[1],
),
) + np.array([-0.5, 0, 0, 0.5])
)
assert y_out.any()
return (
x_out,
y_out,
)
def to_step_format(
shm: ShmArray,
data_field: str,
index_field: str = 'index',
) -> tuple[int, np.ndarray, np.ndarray]:
'''
Convert an input 1d shm array to a "step array" format
for use by path graphics generation.
'''
i = shm._array['index'].copy()
out = shm._array[data_field].copy()
x_out = np.broadcast_to(
i[:, None],
(i.size, 2),
) + np.array([-0.5, 0.5])
y_out = np.empty((len(out), 2), dtype=out.dtype)
y_out[:] = out[:, np.newaxis]
# start y at origin level
y_out[0, 0] = 0
return x_out, y_out

View File

@ -15,19 +15,13 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>. # along with this program. If not, see <https://www.gnu.org/licenses/>.
""" """
Customization of ``pyqtgraph`` core routines and various types normally Customization of ``pyqtgraph`` core routines to speed up our use mostly
for speedups. based on not requiring "scentific precision" for pixel perfect view
transforms.
Generally, our does not require "scentific precision" for pixel perfect
view transforms.
""" """
from typing import Optional
import pyqtgraph as pg import pyqtgraph as pg
from ._axes import Axis
def invertQTransform(tr): def invertQTransform(tr):
"""Return a QTransform that is the inverse of *tr*. """Return a QTransform that is the inverse of *tr*.
@ -52,236 +46,3 @@ def _do_overrides() -> None:
""" """
# we don't care about potential fp issues inside Qt # we don't care about potential fp issues inside Qt
pg.functions.invertQTransform = invertQTransform pg.functions.invertQTransform = invertQTransform
pg.PlotItem = PlotItem
# enable "QPainterPathPrivate for faster arrayToQPath" from
# https://github.com/pyqtgraph/pyqtgraph/pull/2324
pg.setConfigOption('enableExperimental', True)
# NOTE: the below customized type contains all our changes on a method
# by method basis as per the diff:
# https://github.com/pyqtgraph/pyqtgraph/commit/8e60bc14234b6bec1369ff4192dbfb82f8682920#diff-a2b5865955d2ba703dbc4c35ff01aa761aa28d2aeaac5e68d24e338bc82fb5b1R500
class PlotItem(pg.PlotItem):
'''
Overrides for the core plot object mostly pertaining to overlayed
multi-view management as it relates to multi-axis managment.
This object is the combination of a ``ViewBox`` and multiple
``AxisItem``s and so far we've added additional functionality and
APIs for:
- removal of axes
---
From ``pyqtgraph`` super type docs:
- Manage placement of ViewBox, AxisItems, and LabelItems
- Create and manage a list of PlotDataItems displayed inside the
ViewBox
- Implement a context menu with commonly used display and analysis
options
'''
def __init__(
self,
parent=None,
name=None,
labels=None,
title=None,
viewBox=None,
axisItems=None,
default_axes=['left', 'bottom'],
enableMenu=True,
**kargs
):
super().__init__(
parent=parent,
name=name,
labels=labels,
title=title,
viewBox=viewBox,
axisItems=axisItems,
# default_axes=default_axes,
enableMenu=enableMenu,
kargs=kargs,
)
self.name = name
self.chart_widget = None
# self.setAxisItems(
# axisItems,
# default_axes=default_axes,
# )
# NOTE: this is an entirely new method not in upstream.
def removeAxis(
self,
name: str,
unlink: bool = True,
) -> Optional[pg.AxisItem]:
"""
Remove an axis from the contained axis items
by ```name: str```.
This means the axis graphics object will be removed
from the ``.layout: QGraphicsGridLayout`` as well as unlinked
from the underlying associated ``ViewBox``.
If the ``unlink: bool`` is set to ``False`` then the axis will
stay linked to its view and will only be removed from the
layoutonly be removed from the layout.
If no axis with ``name: str`` is found then this is a noop.
Return the axis instance that was removed.
"""
entry = self.axes.pop(name, None)
if not entry:
return
axis = entry['item']
self.layout.removeItem(axis)
axis.scene().removeItem(axis)
if unlink:
axis.unlinkFromView()
self.update()
return axis
# Why do we need to always have all axes created?
#
# I don't understand this at all.
#
# Everything seems to work if you just always apply the
# set passed to this method **EXCEPT** for some super weird reason
# the view box geometry still computes as though the space for the
# `'bottom'` axis is always there **UNLESS** you always add that
# axis but hide it?
#
# Why in tf would this be the case!?!?
def setAxisItems(
self,
# XXX: yeah yeah, i know we can't use type annots like this yet.
axisItems: Optional[dict[str, pg.AxisItem]] = None,
add_to_layout: bool = True,
default_axes: list[str] = ['left', 'bottom'],
):
"""
Override axis item setting to only
"""
axisItems = axisItems or {}
# XXX: wth is is this even saying?!?
# Array containing visible axis items
# Also containing potentially hidden axes, but they are not
# touched so it does not matter
# visibleAxes = ['left', 'bottom']
# Note that it does not matter that this adds
# some values to visibleAxes a second time
# XXX: uhhh wat^ ..?
visibleAxes = list(default_axes) + list(axisItems.keys())
# TODO: we should probably invert the loop here to not loop the
# predefined "axis name set" and instead loop the `axisItems`
# input and lookup indices from a predefined map.
for name, pos in (
('top', (1, 1)),
('bottom', (3, 1)),
('left', (2, 0)),
('right', (2, 2))
):
if (
name in self.axes and
name in axisItems
):
# we already have an axis entry for this name
# so remove the existing entry.
self.removeAxis(name)
# elif name not in axisItems:
# # this axis entry is not provided in this call
# # so remove any old/existing entry.
# self.removeAxis(name)
# Create new axis
if name in axisItems:
axis = axisItems[name]
if axis.scene() is not None:
if (
name not in self.axes
or axis != self.axes[name]["item"]
):
raise RuntimeError(
"Can't add an axis to multiple plots. Shared axes"
" can be achieved with multiple AxisItem instances"
" and set[X/Y]Link.")
else:
# Set up new axis
# XXX: ok but why do we want to add axes for all entries
# if not desired by the user? The only reason I can see
# adding this is without it there's some weird
# ``ViewBox`` geometry bug.. where a gap for the
# 'bottom' axis is somehow left in?
# axis = pg.AxisItem(orientation=name, parent=self)
axis = Axis(
self,
orientation=name,
parent=self,
)
axis.linkToView(self.vb)
# XXX: shouldn't you already know the ``pos`` from the name?
# Oh right instead of a global map that would let you
# reasily look that up it's redefined over and over and over
# again in methods..
self.axes[name] = {'item': axis, 'pos': pos}
# NOTE: in the overlay case the axis may be added to some
# other layout and should not be added here.
if add_to_layout:
self.layout.addItem(axis, *pos)
# place axis above images at z=0, items that want to draw
# over the axes should be placed at z>=1:
axis.setZValue(0.5)
axis.setFlag(
axis.GraphicsItemFlag.ItemNegativeZStacksBehindParent
)
if name in visibleAxes:
self.showAxis(name, True)
else:
# why do we need to insert all axes to ``.axes`` and
# only hide the ones the user doesn't specify? It all
# seems to work fine without doing this except for this
# weird gap for the 'bottom' axis that always shows up
# in the view box geometry??
self.hideAxis(name)
def updateGrid(
self,
*args,
):
alpha = self.ctrl.gridAlphaSlider.value()
x = alpha if self.ctrl.xGridCheck.isChecked() else False
y = alpha if self.ctrl.yGridCheck.isChecked() else False
for name, dim in (
('top', x),
('bottom', x),
('left', y),
('right', y)
):
if name in self.axes:
self.getAxis(name).setGrid(dim)
# self.getAxis('bottom').setGrid(x)
# self.getAxis('left').setGrid(y)
# self.getAxis('right').setGrid(y)

File diff suppressed because it is too large Load Diff

View File

@ -1,320 +0,0 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
'''
High level streaming graphics primitives.
This is an intermediate layer which associates real-time low latency
graphics primitives with underlying stream/flow related data structures
for fast incremental update.
'''
from __future__ import annotations
from typing import (
TYPE_CHECKING,
)
import msgspec
import numpy as np
import pyqtgraph as pg
from PyQt5.QtGui import QPainterPath
from ..data._formatters import (
IncrementalFormatter,
)
from ..data._pathops import (
xy_downsample,
)
from ..log import get_logger
from .._profile import (
Profiler,
)
if TYPE_CHECKING:
from ._dataviz import Viz
log = get_logger(__name__)
class Renderer(msgspec.Struct):
viz: Viz
fmtr: IncrementalFormatter
# output graphics rendering, the main object
# processed in ``QGraphicsObject.paint()``
path: QPainterPath | None = None
fast_path: QPainterPath | None = None
# downsampling state
_last_uppx: float = 0
_in_ds: bool = False
def draw_path(
self,
x: np.ndarray,
y: np.ndarray,
connect: str | np.ndarray = 'all',
path: QPainterPath | None = None,
redraw: bool = False,
) -> QPainterPath:
path_was_none = path is None
if redraw and path:
path.clear()
# TODO: avoid this?
if self.fast_path:
self.fast_path.clear()
path = pg.functions.arrayToQPath(
x,
y,
connect=connect,
finiteCheck=False,
# reserve mem allocs see:
# - https://doc.qt.io/qt-5/qpainterpath.html#reserve
# - https://doc.qt.io/qt-5/qpainterpath.html#capacity
# - https://doc.qt.io/qt-5/qpainterpath.html#clear
# XXX: right now this is based on ad-hoc checks on a
# hidpi 3840x2160 4k monitor but we should optimize for
# the target display(s) on the sys.
# if no_path_yet:
# graphics.path.reserve(int(500e3))
# path=path, # path re-use / reserving
)
# avoid mem allocs if possible
if path_was_none:
path.reserve(path.capacity())
return path
def render(
self,
new_read,
array_key: str,
profiler: Profiler,
uppx: float = 1,
# redraw and ds flags
should_redraw: bool = False,
new_sample_rate: bool = False,
should_ds: bool = False,
showing_src_data: bool = True,
do_append: bool = True,
use_fpath: bool = True,
# only render datums "in view" of the ``ChartView``
use_vr: bool = True,
) -> tuple[QPainterPath, bool]:
'''
Render the current graphics path(s)
There are (at least) 3 stages from source data to graphics data:
- a data transform (which can be stored in additional shm)
- a graphics transform which converts discrete basis data to
a `float`-basis view-coords graphics basis. (eg. ``ohlc_flatten()``,
``step_path_arrays_from_1d()``, etc.)
- blah blah blah (from notes)
'''
# TODO: can the renderer just call ``Viz.read()`` directly?
# unpack latest source data read
fmtr = self.fmtr
(
_,
_,
array,
ivl,
ivr,
in_view,
) = new_read
# xy-path data transform: convert source data to a format
# able to be passed to a `QPainterPath` rendering routine.
fmt_out = fmtr.format_to_1d(
new_read,
array_key,
profiler,
slice_to_inview=use_vr,
)
# no history in view case
if not fmt_out:
# XXX: this might be why the profiler only has exits?
return
(
x_1d,
y_1d,
connect,
prepend_length,
append_length,
view_changed,
# append_tres,
) = fmt_out
# redraw conditions
if (
prepend_length > 0
or new_sample_rate
or view_changed
# NOTE: comment this to try and make "append paths"
# work below..
or append_length > 0
):
should_redraw = True
path: QPainterPath = self.path
fast_path: QPainterPath = self.fast_path
reset: bool = False
self.viz.yrange = None
# redraw the entire source data if we have either of:
# - no prior path graphic rendered or,
# - we always intend to re-render the data only in view
if (
path is None
or should_redraw
):
# print(f"{self.viz.name} -> REDRAWING BRUH")
if new_sample_rate and showing_src_data:
log.info(f'DE-downsampling -> {array_key}')
self._in_ds = False
elif should_ds and uppx > 1:
ds_out = xy_downsample(
x_1d,
y_1d,
uppx,
)
if ds_out is not None:
x_1d, y_1d, ymn, ymx = ds_out
self.viz.yrange = ymn, ymx
# print(f'{self.viz.name} post ds: ymn, ymx: {ymn},{ymx}')
reset = True
profiler(f'FULL PATH downsample redraw={should_ds}')
self._in_ds = True
path = self.draw_path(
x=x_1d,
y=y_1d,
connect=connect,
path=path,
redraw=True,
)
profiler(
'generated fresh path. '
f'(should_redraw: {should_redraw} '
f'should_ds: {should_ds} new_sample_rate: {new_sample_rate})'
)
# TODO: get this piecewise prepend working - right now it's
# giving heck on vwap...
# elif prepend_length:
# prepend_path = pg.functions.arrayToQPath(
# x[0:prepend_length],
# y[0:prepend_length],
# connect='all'
# )
# # swap prepend path in "front"
# old_path = graphics.path
# graphics.path = prepend_path
# # graphics.path.moveTo(new_x[0], new_y[0])
# graphics.path.connectPath(old_path)
elif (
append_length > 0
and do_append
):
profiler(f'sliced append path {append_length}')
# (
# x_1d,
# y_1d,
# connect,
# ) = append_tres
profiler(
f'diffed array input, append_length={append_length}'
)
# if should_ds and uppx > 1:
# new_x, new_y = xy_downsample(
# new_x,
# new_y,
# uppx,
# )
# profiler(f'fast path downsample redraw={should_ds}')
append_path = self.draw_path(
x=x_1d,
y=y_1d,
connect=connect,
path=fast_path,
)
profiler('generated append qpath')
if use_fpath:
# an attempt at trying to make append-updates faster..
if fast_path is None:
fast_path = append_path
# fast_path.reserve(int(6e3))
else:
# print(
# f'{self.viz.name}: FAST PATH\n'
# f"append_path br: {append_path.boundingRect()}\n"
# f"path size: {size}\n"
# f"append_path len: {append_path.length()}\n"
# f"fast_path len: {fast_path.length()}\n"
# )
fast_path.connectPath(append_path)
size = fast_path.capacity()
profiler(f'connected fast path w size: {size}')
# graphics.path.moveTo(new_x[0], new_y[0])
# path.connectPath(append_path)
# XXX: lol this causes a hang..
# graphics.path = graphics.path.simplified()
else:
size = path.capacity()
profiler(f'connected history path w size: {size}')
path.connectPath(append_path)
self.path = path
self.fast_path = fast_path
return self.path, reset

View File

@ -35,13 +35,9 @@ from collections import defaultdict
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from functools import partial from functools import partial
from typing import ( from typing import (
Optional, Optional, Callable,
Callable, Awaitable, Sequence,
Awaitable, Any, AsyncIterator
Sequence,
Any,
AsyncIterator,
Iterator,
) )
import time import time
# from pprint import pformat # from pprint import pformat
@ -123,7 +119,7 @@ class CompleterView(QTreeView):
# TODO: size this based on DPI font # TODO: size this based on DPI font
self.setIndentation(_font.px_size) self.setIndentation(_font.px_size)
self.setUniformRowHeights(True) # self.setUniformRowHeights(True)
# self.setColumnWidth(0, 3) # self.setColumnWidth(0, 3)
# self.setVerticalBarPolicy(Qt.ScrollBarAlwaysOff) # self.setVerticalBarPolicy(Qt.ScrollBarAlwaysOff)
# self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustIgnored) # self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustIgnored)
@ -142,31 +138,15 @@ class CompleterView(QTreeView):
model.setHorizontalHeaderLabels(labels) model.setHorizontalHeaderLabels(labels)
self._font_size: int = 0 # pixels self._font_size: int = 0 # pixels
self._init: bool = False
async def on_pressed( async def on_pressed(self, idx: QModelIndex) -> None:
self, '''Mouse pressed on view handler.
idx: QModelIndex,
) -> None:
'''
Mouse pressed on view handler.
''' '''
search = self.parent() search = self.parent()
await search.chart_current_item(clear_to_cache=False)
await search.chart_current_item(
clear_to_cache=True,
)
# XXX: this causes Qt to hang and segfault..lovely
# self.show_cache_entries(
# only=True,
# keep_current_item_selected=True,
# )
search.focus() search.focus()
def set_font_size(self, size: int = 18): def set_font_size(self, size: int = 18):
# print(size) # print(size)
if size < 0: if size < 0:
@ -176,64 +156,56 @@ class CompleterView(QTreeView):
self.setStyleSheet(f"font: {size}px") self.setStyleSheet(f"font: {size}px")
def resize_to_results( # def resizeEvent(self, event: 'QEvent') -> None:
self, # event.accept()
w: Optional[float] = 0, # super().resizeEvent(event)
h: Optional[float] = None,
) -> None: def on_resize(self) -> None:
'''
Resize relay event from god.
'''
self.resize_to_results()
def resize_to_results(self):
model = self.model() model = self.model()
cols = model.columnCount() cols = model.columnCount()
cidx = self.selectionModel().currentIndex() # rows = model.rowCount()
rows = model.rowCount()
self.expandAll()
# compute the approx height in pixels needed to include
# all result rows in view.
row_h = rows_h = self.rowHeight(cidx) * (rows + 1)
for idx, item in self.iter_df_rows():
row_h = self.rowHeight(idx)
rows_h += row_h
# print(f'row_h: {row_h}\nrows_h: {rows_h}')
# TODO: could we just break early here on detection
# of ``rows_h >= h``?
col_w_tot = 0 col_w_tot = 0
for i in range(cols): for i in range(cols):
# only slap in a rows's height's worth
# of padding once at startup.. no idea
if (
not self._init
and row_h
):
col_w_tot = row_h
self._init = True
self.resizeColumnToContents(i) self.resizeColumnToContents(i)
col_w_tot += self.columnWidth(i) col_w_tot += self.columnWidth(i)
# NOTE: if the heigh `h` set here is **too large** then the win = self.window()
# resize event will perpetually trigger as the window causes win_h = win.height()
# some kind of recompute of callbacks.. so we have to ensure edit_h = self.parent().bar.height()
# it's limited. sb_h = win.statusBar().height()
if h:
h: int = round(h)
abs_mx = round(0.91 * h)
self.setMaximumHeight(abs_mx)
if rows_h <= abs_mx: # TODO: probably make this more general / less hacky
# self.setMinimumHeight(rows_h) # we should figure out the exact number of rows to allow
self.setMinimumHeight(rows_h) # inclusive of search bar and header "rows", in pixel terms.
# self.setFixedHeight(rows_h) # Eventually when we have an "info" widget below the results we
# will want space for it and likely terminating the results-view
# space **exactly on a row** would be ideal.
# if row_px > 0:
# rows = ceil(window_h / row_px) - 4
# else:
# rows = 16
# self.setFixedHeight(rows * row_px)
# self.resize(self.width(), rows * row_px)
else: # NOTE: if the heigh set here is **too large** then the resize
self.setMinimumHeight(abs_mx) # event will perpetually trigger as the window causes some kind
# of recompute of callbacks.. so we have to ensure it's limited.
h = win_h - (edit_h + 1.666*sb_h)
assert h > 0
self.setFixedHeight(round(h))
# dyncamically size to width of longest result seen # size to width of longest result seen thus far
curr_w = self.width() # TODO: should we always dynamically scale to longest result?
if curr_w < col_w_tot: if self.width() < col_w_tot:
self.setMinimumWidth(col_w_tot) self.setFixedWidth(col_w_tot)
self.update() self.update()
@ -302,7 +274,7 @@ class CompleterView(QTreeView):
def select_first(self) -> QStandardItem: def select_first(self) -> QStandardItem:
''' '''
Select the first depth >= 2 entry from the completer tree and Select the first depth >= 2 entry from the completer tree and
return its item. return it's item.
''' '''
# ensure we're **not** selecting the first level parent node and # ensure we're **not** selecting the first level parent node and
@ -359,23 +331,6 @@ class CompleterView(QTreeView):
item = model.itemFromIndex(idx) item = model.itemFromIndex(idx)
yield idx, item yield idx, item
def iter_df_rows(
self,
iparent: QModelIndex = QModelIndex(),
) -> Iterator[tuple[QModelIndex, QStandardItem]]:
model = self.model()
isections = model.rowCount(iparent)
for i in range(isections):
idx = model.index(i, 0, iparent)
item = model.itemFromIndex(idx)
yield idx, item
if model.hasChildren(idx):
# recursively yield child items depth-first
yield from self.iter_df_rows(idx)
def find_section( def find_section(
self, self,
section: str, section: str,
@ -399,8 +354,7 @@ class CompleterView(QTreeView):
status_field: str = None, status_field: str = None,
) -> None: ) -> None:
''' '''Clear all result-rows from under the depth = 1 section.
Clear all result-rows from under the depth = 1 section.
''' '''
idx = self.find_section(section) idx = self.find_section(section)
@ -421,6 +375,8 @@ class CompleterView(QTreeView):
else: else:
model.setItem(idx.row(), 1, QStandardItem()) model.setItem(idx.row(), 1, QStandardItem())
self.resize_to_results()
return idx return idx
else: else:
return None return None
@ -430,26 +386,12 @@ class CompleterView(QTreeView):
section: str, section: str,
values: Sequence[str], values: Sequence[str],
clear_all: bool = False, clear_all: bool = False,
reverse: bool = False,
) -> None: ) -> None:
''' '''
Set result-rows for depth = 1 tree section ``section``. Set result-rows for depth = 1 tree section ``section``.
''' '''
if (
values
and not isinstance(values[0], str)
):
flattened: list[str] = []
for val in values:
flattened.extend(val)
values = flattened
if reverse:
values = reversed(values)
model = self.model() model = self.model()
if clear_all: if clear_all:
# XXX: rewrite the model from scratch if caller requests it # XXX: rewrite the model from scratch if caller requests it
@ -502,22 +444,9 @@ class CompleterView(QTreeView):
self.show_matches() self.show_matches()
def show_matches( def show_matches(self) -> None:
self,
wh: Optional[tuple[float, float]] = None,
) -> None:
if wh:
self.resize_to_results(*wh)
else:
# case where it's just an update from results and *NOT*
# a resize of some higher level parent-container widget.
search = self.parent()
w, h = search.space_dims()
self.resize_to_results(w=w, h=h)
self.show() self.show()
self.resize_to_results()
class SearchBar(Edit): class SearchBar(Edit):
@ -537,15 +466,18 @@ class SearchBar(Edit):
self.godwidget = godwidget self.godwidget = godwidget
super().__init__(parent, **kwargs) super().__init__(parent, **kwargs)
self.view: CompleterView = view self.view: CompleterView = view
godwidget._widgets[view.mode_name] = view
def show(self) -> None:
super().show()
self.view.show_matches()
def unfocus(self) -> None: def unfocus(self) -> None:
self.parent().hide() self.parent().hide()
self.clearFocus() self.clearFocus()
def hide(self) -> None:
if self.view: if self.view:
self.view.hide() self.view.hide()
super().hide()
class SearchWidget(QtWidgets.QWidget): class SearchWidget(QtWidgets.QWidget):
@ -564,16 +496,15 @@ class SearchWidget(QtWidgets.QWidget):
parent=None, parent=None,
) -> None: ) -> None:
super().__init__(parent) super().__init__(parent or godwidget)
# size it as we specify # size it as we specify
self.setSizePolicy( self.setSizePolicy(
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed,
QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding,
) )
self.godwidget = godwidget self.godwidget = godwidget
godwidget.reg_for_resize(self)
self.vbox = QtWidgets.QVBoxLayout(self) self.vbox = QtWidgets.QVBoxLayout(self)
self.vbox.setContentsMargins(0, 4, 4, 0) self.vbox.setContentsMargins(0, 4, 4, 0)
@ -623,53 +554,20 @@ class SearchWidget(QtWidgets.QWidget):
self.vbox.setAlignment(self.view, Qt.AlignTop | Qt.AlignLeft) self.vbox.setAlignment(self.view, Qt.AlignTop | Qt.AlignLeft)
def focus(self) -> None: def focus(self) -> None:
self.show()
if self.view.model().rowCount(QModelIndex()) == 0:
# fill cache list if nothing existing
self.view.set_section_entries(
'cache',
list(reversed(self.godwidget._chart_cache)),
clear_all=True,
)
self.bar.focus() self.bar.focus()
self.show()
def show_cache_entries( def get_current_item(self) -> Optional[tuple[str, str]]:
self, '''Return the current completer tree selection as
only: bool = False,
keep_current_item_selected: bool = False,
) -> None:
'''
Clear the search results view and show only cached (aka recently
loaded with active data) feeds in the results section.
'''
godw = self.godwidget
# first entry in the cache is the current symbol(s)
fqsns = set()
for multi_fqsns in list(godw._chart_cache):
for fqsn in set(multi_fqsns):
fqsns.add(fqsn)
if keep_current_item_selected:
sel = self.view.selectionModel()
cidx = sel.currentIndex()
self.view.set_section_entries(
'cache',
list(fqsns),
# remove all other completion results except for cache
clear_all=only,
reverse=True,
)
if (
keep_current_item_selected
and cidx.isValid()
):
# set current selection back to what it was before filling out
# the view results.
self.view.select_from_idx(cidx)
else:
self.view.select_first()
def get_current_item(self) -> tuple[QModelIndex, str, str] | None:
'''
Return the current completer tree selection as
a tuple ``(parent: str, child: str)`` if valid, else ``None``. a tuple ``(parent: str, child: str)`` if valid, else ``None``.
''' '''
@ -695,11 +593,7 @@ class SearchWidget(QtWidgets.QWidget):
if provider == 'cache': if provider == 'cache':
symbol, _, provider = symbol.rpartition('.') symbol, _, provider = symbol.rpartition('.')
return ( return provider, symbol
cidx,
provider,
symbol,
)
else: else:
return None return None
@ -709,8 +603,7 @@ class SearchWidget(QtWidgets.QWidget):
clear_to_cache: bool = True, clear_to_cache: bool = True,
) -> Optional[str]: ) -> Optional[str]:
''' '''Attempt to load and switch the current selected
Attempt to load and switch the current selected
completion result to the affiliated chart app. completion result to the affiliated chart app.
Return any loaded symbol. Return any loaded symbol.
@ -720,16 +613,15 @@ class SearchWidget(QtWidgets.QWidget):
if value is None: if value is None:
return None return None
cidx, provider, symbol = value provider, symbol = value
godw = self.godwidget chart = self.godwidget
fqsn = f'{symbol}.{provider}' log.info(f'Requesting symbol: {symbol}.{provider}')
log.info(f'Requesting symbol: {fqsn}')
# assert provider in symbol await chart.load_symbol(
await godw.load_symbols( provider,
fqsns=[fqsn], symbol,
loglevel='info', 'info',
) )
# fully qualified symbol name (SNS i guess is what we're # fully qualified symbol name (SNS i guess is what we're
@ -743,48 +635,18 @@ class SearchWidget(QtWidgets.QWidget):
# Re-order the symbol cache on the chart to display in # Re-order the symbol cache on the chart to display in
# LIFO order. this is normally only done internally by # LIFO order. this is normally only done internally by
# the chart on new symbols being loaded into memory # the chart on new symbols being loaded into memory
godw.set_chart_symbols( chart.set_chart_symbol(fqsn, chart.linkedsplits)
(fqsn,), (
godw.hist_linked, self.view.set_section_entries(
godw.rt_linked, 'cache',
) values=list(reversed(chart._chart_cache)),
)
self.show_cache_entries( # remove all other completion results except for cache
only=True, clear_all=True,
) )
self.bar.focus()
return fqsn return fqsn
def space_dims(self) -> tuple[float, float]:
'''
Compute and return the "available space dimentions" for this
search widget in terms of px space for results by return the
pair of width and height.
'''
# XXX: dun need dis rite?
# win = self.window()
# win_h = win.height()
# sb_h = win.statusBar().height()
godw = self.godwidget
hl = godw.hist_linked
edit_h = self.bar.height()
h = hl.height() - edit_h
w = hl.width()
return w, h
def on_resize(self) -> None:
'''
Resize relay event from god, resize all child widgets.
Right now this is just view to contents and/or the fast chart
height.
'''
w, h = self.space_dims()
self.bar.view.show_matches(wh=(w, h))
_search_active: trio.Event = trio.Event() _search_active: trio.Event = trio.Event()
_search_enabled: bool = False _search_enabled: bool = False
@ -820,10 +682,9 @@ async def pack_matches(
with trio.CancelScope() as cs: with trio.CancelScope() as cs:
task_status.started(cs) task_status.started(cs)
# ensure ^ status is updated # ensure ^ status is updated
results = list(await search(pattern)) results = await search(pattern)
# XXX: don't cache the cache results xD if provider != 'cache': # XXX: don't cache the cache results xD
if provider != 'cache':
matches[(provider, pattern)] = results matches[(provider, pattern)] = results
# print(f'results from {provider}: {results}') # print(f'results from {provider}: {results}')
@ -851,11 +712,10 @@ async def fill_results(
max_pause_time: float = 6/16 + 0.001, max_pause_time: float = 6/16 + 0.001,
) -> None: ) -> None:
''' """Task to search through providers and fill in possible
Task to search through providers and fill in possible
completion results. completion results.
''' """
global _search_active, _search_enabled, _searcher_cache global _search_active, _search_enabled, _searcher_cache
bar = search.bar bar = search.bar
@ -869,10 +729,6 @@ async def fill_results(
matches = defaultdict(list) matches = defaultdict(list)
has_results: defaultdict[str, set[str]] = defaultdict(set) has_results: defaultdict[str, set[str]] = defaultdict(set)
# show cached feed list at startup
search.show_cache_entries()
search.on_resize()
while True: while True:
await _search_active.wait() await _search_active.wait()
period = None period = None
@ -886,7 +742,7 @@ async def fill_results(
pattern = await recv_chan.receive() pattern = await recv_chan.receive()
period = time.time() - wait_start period = time.time() - wait_start
log.debug(f'{pattern} after {period}') print(f'{pattern} after {period}')
# during fast multiple key inputs, wait until a pause # during fast multiple key inputs, wait until a pause
# (in typing) to initiate search # (in typing) to initiate search
@ -924,9 +780,8 @@ async def fill_results(
# it hasn't already been searched with the current # it hasn't already been searched with the current
# input pattern (in which case just look up the old # input pattern (in which case just look up the old
# results). # results).
if ( if (period >= pause) and (
period >= pause provider not in already_has_results
and provider not in already_has_results
): ):
# TODO: it may make more sense TO NOT search the # TODO: it may make more sense TO NOT search the
@ -934,9 +789,7 @@ async def fill_results(
# cpu-bound. # cpu-bound.
if provider != 'cache': if provider != 'cache':
view.clear_section( view.clear_section(
provider, provider, status_field='-> searchin..')
status_field='-> searchin..',
)
await n.start( await n.start(
pack_matches, pack_matches,
@ -957,20 +810,11 @@ async def fill_results(
# re-searching it's ``dict`` since it's easier # re-searching it's ``dict`` since it's easier
# but it also causes it to be slower then cached # but it also causes it to be slower then cached
# results from other providers on occasion. # results from other providers on occasion.
if ( if results and provider != 'cache':
results view.set_section_entries(
): section=provider,
if provider != 'cache': values=results,
view.set_section_entries( )
section=provider,
values=results,
)
else:
# if provider == 'cache':
# for the cache just show what we got
# that matches
search.show_cache_entries()
else: else:
view.clear_section(provider) view.clear_section(provider)
@ -992,11 +836,13 @@ async def handle_keyboard_input(
global _search_active, _search_enabled global _search_active, _search_enabled
# startup # startup
searchw = searchbar.parent() bar = searchbar
godwidget = searchw.godwidget search = searchbar.parent()
view = searchbar.view godwidget = search.godwidget
view.set_font_size(searchbar.dpi_font.px_size) view = bar.view
send, recv = trio.open_memory_channel(616) view.set_font_size(bar.dpi_font.px_size)
send, recv = trio.open_memory_channel(16)
async with trio.open_nursery() as n: async with trio.open_nursery() as n:
@ -1006,15 +852,11 @@ async def handle_keyboard_input(
n.start_soon( n.start_soon(
partial( partial(
fill_results, fill_results,
searchw, search,
recv, recv,
) )
) )
searchbar.focus()
searchw.show_cache_entries()
await trio.sleep(0)
async for kbmsg in recv_chan: async for kbmsg in recv_chan:
event, etype, key, mods, txt = kbmsg.to_tuple() event, etype, key, mods, txt = kbmsg.to_tuple()
@ -1024,29 +866,19 @@ async def handle_keyboard_input(
if mods == Qt.ControlModifier: if mods == Qt.ControlModifier:
ctl = True ctl = True
if key in ( if key in (Qt.Key_Enter, Qt.Key_Return):
Qt.Key_Enter,
Qt.Key_Return await search.chart_current_item(clear_to_cache=True)
):
_search_enabled = False _search_enabled = False
await searchw.chart_current_item(clear_to_cache=True) continue
# XXX: causes hang and segfault.. elif not ctl and not bar.text():
# searchw.show_cache_entries( # if nothing in search text show the cache
# only=True, view.set_section_entries(
# keep_current_item_selected=True, 'cache',
# ) list(reversed(godwidget._chart_cache)),
clear_all=True,
view.show_matches() )
searchw.focus()
elif (
not ctl
and not searchbar.text()
):
# TODO: really should factor this somewhere..bc
# we're doin it in another spot as well..
searchw.show_cache_entries(only=True)
continue continue
# cancel and close # cancel and close
@ -1055,7 +887,7 @@ async def handle_keyboard_input(
Qt.Key_Space, # i feel like this is the "native" one Qt.Key_Space, # i feel like this is the "native" one
Qt.Key_Alt, Qt.Key_Alt,
}: }:
searchbar.unfocus() search.bar.unfocus()
# kill the search and focus back on main chart # kill the search and focus back on main chart
if godwidget: if godwidget:
@ -1063,95 +895,68 @@ async def handle_keyboard_input(
continue continue
if ( if ctl and key in {
ctl Qt.Key_L,
and key in {Qt.Key_L} }:
):
# like url (link) highlight in a web browser # like url (link) highlight in a web browser
searchbar.focus() bar.focus()
# selection navigation controls # selection navigation controls
elif ( elif ctl and key in {
ctl Qt.Key_D,
and key in {Qt.Key_D} }:
):
view.next_section(direction='down') view.next_section(direction='down')
_search_enabled = False _search_enabled = False
elif ( elif ctl and key in {
ctl Qt.Key_U,
and key in {Qt.Key_U} }:
):
view.next_section(direction='up') view.next_section(direction='up')
_search_enabled = False _search_enabled = False
# selection navigation controls # selection navigation controls
elif ( elif (ctl and key in {
ctl and (
key in {
Qt.Key_K,
Qt.Key_J,
}
or key in { Qt.Key_K,
Qt.Key_Up, Qt.Key_J,
Qt.Key_Down,
} }) or key in {
)
): Qt.Key_Up,
Qt.Key_Down,
}:
_search_enabled = False _search_enabled = False
if key in {Qt.Key_K, Qt.Key_Up}:
if key in {
Qt.Key_K,
Qt.Key_Up
}:
item = view.select_previous() item = view.select_previous()
elif key in { elif key in {Qt.Key_J, Qt.Key_Down}:
Qt.Key_J,
Qt.Key_Down,
}:
item = view.select_next() item = view.select_next()
if item: if item:
parent_item = item.parent() parent_item = item.parent()
# if we're in the cache section and thus the next if parent_item and parent_item.text() == 'cache':
# selection is a cache item, switch and show it
# immediately since it should be very fast. # if it's a cache item, switch and show it immediately
if ( await search.chart_current_item(clear_to_cache=False)
parent_item
and parent_item.text() == 'cache'
):
await searchw.chart_current_item(clear_to_cache=False)
# ACTUAL SEARCH BLOCK #
# where we fuzzy complete and fill out sections.
elif not ctl: elif not ctl:
# relay to completer task # relay to completer task
_search_enabled = True _search_enabled = True
send.send_nowait(searchw.bar.text()) send.send_nowait(search.bar.text())
_search_active.set() _search_active.set()
async def search_simple_dict( async def search_simple_dict(
text: str, text: str,
source: dict, source: dict,
) -> dict[str, Any]: ) -> dict[str, Any]:
tokens = []
for key in source:
if not isinstance(key, str):
tokens.extend(key)
else:
tokens.append(key)
# search routine can be specified as a function such # search routine can be specified as a function such
# as in the case of the current app's local symbol cache # as in the case of the current app's local symbol cache
matches = fuzzy.extractBests( matches = fuzzy.extractBests(
text, text,
tokens, source.keys(),
score_cutoff=90, score_cutoff=90,
) )

View File

@ -240,12 +240,12 @@ def hcolor(name: str) -> str:
'gunmetal': '#91A3B0', 'gunmetal': '#91A3B0',
'battleship': '#848482', 'battleship': '#848482',
# default ohlc-bars/curve gray
'bracket': '#666666', # like the logo
# bluish # bluish
'charcoal': '#36454F', 'charcoal': '#36454F',
# default bars
'bracket': '#666666', # like the logo
# work well for filled polygons which want a 'bracket' feel # work well for filled polygons which want a 'bracket' feel
# going light to dark # going light to dark
'davies': '#555555', 'davies': '#555555',

View File

@ -21,29 +21,15 @@ Qt main window singletons and stuff.
import os import os
import signal import signal
import time import time
from typing import ( from typing import Callable, Optional, Union
Callable,
Optional,
Union,
)
import uuid import uuid
from pyqtgraph import QtGui
from PyQt5 import QtCore from PyQt5 import QtCore
from PyQt5.QtWidgets import ( from PyQt5.QtWidgets import QLabel, QStatusBar
QWidget,
QMainWindow,
QApplication,
QLabel,
QStatusBar,
)
from PyQt5.QtGui import (
QScreen,
QCloseEvent,
)
from ..log import get_logger from ..log import get_logger
from ._style import _font_small, hcolor from ._style import _font_small, hcolor
from ._chart import GodWidget
log = get_logger(__name__) log = get_logger(__name__)
@ -162,13 +148,12 @@ class MultiStatus:
self.bar.clearMessage() self.bar.clearMessage()
class MainWindow(QMainWindow): class MainWindow(QtGui.QMainWindow):
# XXX: for tiling wms this should scale # XXX: for tiling wms this should scale
# with the alloted window size. # with the alloted window size.
# TODO: detect for tiling and if untrue set some size? # TODO: detect for tiling and if untrue set some size?
# size = (300, 500) size = (300, 500)
godwidget: GodWidget
title = 'piker chart (ur symbol is loading bby)' title = 'piker chart (ur symbol is loading bby)'
@ -177,20 +162,17 @@ class MainWindow(QMainWindow):
# self.setMinimumSize(*self.size) # self.setMinimumSize(*self.size)
self.setWindowTitle(self.title) self.setWindowTitle(self.title)
# set by runtime after `trio` is engaged.
self.godwidget: Optional[GodWidget] = None
self._status_bar: QStatusBar = None self._status_bar: QStatusBar = None
self._status_label: QLabel = None self._status_label: QLabel = None
self._size: Optional[tuple[int, int]] = None self._size: Optional[tuple[int, int]] = None
@property @property
def mode_label(self) -> QLabel: def mode_label(self) -> QtGui.QLabel:
# init mode label # init mode label
if not self._status_label: if not self._status_label:
self._status_label = label = QLabel() self._status_label = label = QtGui.QLabel()
label.setStyleSheet( label.setStyleSheet(
f"""QLabel {{ f"""QLabel {{
color : {hcolor('gunmetal')}; color : {hcolor('gunmetal')};
@ -212,7 +194,8 @@ class MainWindow(QMainWindow):
def closeEvent( def closeEvent(
self, self,
event: QCloseEvent,
event: QtGui.QCloseEvent,
) -> None: ) -> None:
'''Cancel the root actor asap. '''Cancel the root actor asap.
@ -252,8 +235,8 @@ class MainWindow(QMainWindow):
def on_focus_change( def on_focus_change(
self, self,
last: QWidget, last: QtGui.QWidget,
current: QWidget, current: QtGui.QWidget,
) -> None: ) -> None:
@ -264,12 +247,11 @@ class MainWindow(QMainWindow):
name = getattr(current, 'mode_name', '') name = getattr(current, 'mode_name', '')
self.set_mode_name(name) self.set_mode_name(name)
def current_screen(self) -> QScreen: def current_screen(self) -> QtGui.QScreen:
''' """Get a frickin screen (if we can, gawd).
Get a frickin screen (if we can, gawd).
''' """
app = QApplication.instance() app = QtGui.QApplication.instance()
for _ in range(3): for _ in range(3):
screen = app.screenAt(self.pos()) screen = app.screenAt(self.pos())
@ -302,7 +284,7 @@ class MainWindow(QMainWindow):
''' '''
# https://stackoverflow.com/a/18975846 # https://stackoverflow.com/a/18975846
if not size and not self._size: if not size and not self._size:
# app = QApplication.instance() app = QtGui.QApplication.instance()
geo = self.current_screen().geometry() geo = self.current_screen().geometry()
h, w = geo.height(), geo.width() h, w = geo.height(), geo.width()
# use approx 1/3 of the area of the screen by default # use approx 1/3 of the area of the screen by default
@ -310,36 +292,9 @@ class MainWindow(QMainWindow):
self.resize(*size or self._size) self.resize(*size or self._size)
def resizeEvent(self, event: QtCore.QEvent) -> None:
if (
# event.spontaneous()
event.oldSize().height == event.size().height
):
event.ignore()
return
# XXX: uncomment for debugging..
# attrs = {}
# for key in dir(event):
# if key == '__dir__':
# continue
# attr = getattr(event, key)
# try:
# attrs[key] = attr()
# except TypeError:
# attrs[key] = attr
# from pprint import pformat
# print(
# f'{pformat(attrs)}\n'
# f'WINDOW RESIZE: {self.size()}\n\n'
# )
self.godwidget.on_win_resize(event)
event.accept()
# singleton app per actor # singleton app per actor
_qt_win: QMainWindow = None _qt_win: QtGui.QMainWindow = None
def main_window() -> MainWindow: def main_window() -> MainWindow:

View File

@ -46,10 +46,8 @@ def _kivy_import_hack():
@click.argument('name', nargs=1, required=True) @click.argument('name', nargs=1, required=True)
@click.pass_obj @click.pass_obj
def monitor(config, rate, name, dhost, test, tl): def monitor(config, rate, name, dhost, test, tl):
''' """Start a real-time watchlist UI
Start a real-time watchlist UI """
'''
# global opts # global opts
brokermod = config['brokermods'][0] brokermod = config['brokermods'][0]
loglevel = config['loglevel'] loglevel = config['loglevel']
@ -72,12 +70,8 @@ def monitor(config, rate, name, dhost, test, tl):
) as portal: ) as portal:
# run app "main" # run app "main"
await _async_main( await _async_main(
name, name, portal, tickers,
portal, brokermod, rate, test=test,
tickers,
brokermod,
rate,
test=test,
) )
tractor.run( tractor.run(
@ -128,7 +122,7 @@ def optschain(config, symbol, date, rate, test):
@cli.command() @cli.command()
@click.option( @click.option(
'--profile', '--profile',
# '-p', '-p',
default=None, default=None,
help='Enable pyqtgraph profiling' help='Enable pyqtgraph profiling'
) )
@ -137,14 +131,9 @@ def optschain(config, symbol, date, rate, test):
is_flag=True, is_flag=True,
help='Enable tractor debug mode' help='Enable tractor debug mode'
) )
@click.argument('symbols', nargs=-1, required=True) @click.argument('symbol', required=True)
@click.pass_obj @click.pass_obj
def chart( def chart(config, symbol, profile, pdb):
config,
symbols: list[str],
profile,
pdb: bool,
):
''' '''
Start a real-time chartng UI Start a real-time chartng UI
@ -155,27 +144,24 @@ def chart(
_profile._pg_profile = True _profile._pg_profile = True
_profile.ms_slower_then = float(profile) _profile.ms_slower_then = float(profile)
# Qt UI entrypoint
from ._app import _main from ._app import _main
for symbol in symbols: if '.' not in symbol:
if '.' not in symbol: click.echo(click.style(
click.echo(click.style( f'symbol: {symbol} must have a {symbol}.<provider> suffix',
f'symbol: {symbol} must have a {symbol}.<provider> suffix', fg='red',
fg='red', ))
)) return
return
# global opts # global opts
brokernames = config['brokers'] brokernames = config['brokers']
brokermods = config['brokermods']
assert brokermods
tractorloglevel = config['tractorloglevel'] tractorloglevel = config['tractorloglevel']
pikerloglevel = config['loglevel'] pikerloglevel = config['loglevel']
_main( _main(
syms=symbols, sym=symbol,
brokermods=brokermods, brokernames=brokernames,
piker_loglevel=pikerloglevel, piker_loglevel=pikerloglevel,
tractor_kwargs={ tractor_kwargs={
'debug_mode': pdb, 'debug_mode': pdb,
@ -184,6 +170,5 @@ def chart(
'enable_modules': [ 'enable_modules': [
'piker.clearing._client' 'piker.clearing._client'
], ],
'registry_addr': config.get('registry_addr'),
}, },
) )

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
"""
Super hawt Qt UI components
"""

View File

@ -0,0 +1,67 @@
import sys
from PySide2.QtCharts import QtCharts
from PySide2.QtWidgets import QApplication, QMainWindow
from PySide2.QtCore import Qt, QPointF
from PySide2 import QtGui
import qdarkstyle
data = ((1, 7380, 7520, 7380, 7510, 7324),
(2, 7520, 7580, 7410, 7440, 7372),
(3, 7440, 7650, 7310, 7520, 7434),
(4, 7450, 7640, 7450, 7550, 7480),
(5, 7510, 7590, 7460, 7490, 7502),
(6, 7500, 7590, 7480, 7560, 7512),
(7, 7560, 7830, 7540, 7800, 7584))
app = QApplication([])
# set dark stylesheet
# import pdb; pdb.set_trace()
app.setStyleSheet(qdarkstyle.load_stylesheet_pyside())
series = QtCharts.QCandlestickSeries()
series.setDecreasingColor(Qt.darkRed)
series.setIncreasingColor(Qt.darkGreen)
ma5 = QtCharts.QLineSeries() # 5-days average data line
tm = [] # stores str type data
# in a loop, series and ma5 append corresponding data
for num, o, h, l, c, m in data:
candle = QtCharts.QCandlestickSet(o, h, l, c)
series.append(candle)
ma5.append(QPointF(num, m))
tm.append(str(num))
pen = candle.pen()
# import pdb; pdb.set_trace()
chart = QtCharts.QChart()
# import pdb; pdb.set_trace()
series.setBodyOutlineVisible(False)
series.setCapsVisible(False)
# brush = QtGui.QBrush()
# brush.setColor(Qt.green)
# series.setBrush(brush)
chart.addSeries(series) # candle
chart.addSeries(ma5) # ma5 line
chart.setAnimationOptions(QtCharts.QChart.SeriesAnimations)
chart.createDefaultAxes()
chart.legend().hide()
chart.axisX(series).setCategories(tm)
chart.axisX(ma5).setVisible(False)
view = QtCharts.QChartView(chart)
view.chart().setTheme(QtCharts.QChart.ChartTheme.ChartThemeDark)
view.setRubberBand(QtCharts.QChartView.HorizontalRubberBand)
# chartview.chart().setTheme(QtCharts.QChart.ChartTheme.ChartThemeBlueCerulean)
ui = QMainWindow()
# ui.setGeometry(50, 50, 500, 300)
ui.setCentralWidget(view)
ui.show()
sys.exit(app.exec_())

View File

@ -1,12 +1,13 @@
# we require a pinned dev branch to get some edge features that # we require a pinned dev branch to get some edge features that
# are often untested in tractor's CI and/or being tested by us # are often untested in tractor's CI and/or being tested by us
# first before committing as core features in tractor's base. # first before committing as core features in tractor's base.
-e git+https://github.com/goodboy/tractor.git@piker_pin#egg=tractor -e git+https://github.com/goodboy/tractor.git@master#egg=tractor
# `pyqtgraph` peeps keep breaking, fixing, improving so might as well # `pyqtgraph` peeps keep breaking, fixing, improving so might as well
# pin this to a dev branch that we have more control over especially # pin this to a dev branch that we have more control over especially
# as more graphics stuff gets hashed out. # as more graphics stuff gets hashed out.
-e git+https://github.com/pikers/pyqtgraph.git@master#egg=pyqtgraph -e git+https://github.com/pikers/pyqtgraph.git@piker_pin#egg=pyqtgraph
# our async client for ``marketstore`` (the tsdb) # our async client for ``marketstore`` (the tsdb)
-e git+https://github.com/pikers/anyio-marketstore.git@master#egg=anyio-marketstore -e git+https://github.com/pikers/anyio-marketstore.git@master#egg=anyio-marketstore
@ -17,7 +18,4 @@
# ``asyncvnc`` for sending interactions to ib-gw inside docker # ``asyncvnc`` for sending interactions to ib-gw inside docker
-e git+https://github.com/pikers/asyncvnc.git@main#egg=asyncvnc -e git+https://github.com/pikers/asyncvnc.git@vid_passthrough#egg=asyncvnc
# ``cryptofeed`` for connecting to various crypto exchanges + custom fixes
-e git+https://github.com/pikers/cryptofeed.git@date_parsing#egg=cryptofeed

View File

@ -41,28 +41,26 @@ setup(
}, },
install_requires=[ install_requires=[
'toml', 'toml',
'tomli', # fastest pure py reader
'click', 'click',
'colorlog', 'colorlog',
'attrs', 'attrs',
'pygments', 'pygments',
'colorama', # numba traceback coloring 'colorama', # numba traceback coloring
'msgspec', # performant IPC messaging and structs 'pydantic', # structured data
'protobuf',
# async # async
'trio', 'trio',
'trio-websocket', 'trio-websocket',
'msgspec', # performant IPC messaging
'async_generator', 'async_generator',
# from github currently (see requirements.txt) # from github currently (see requirements.txt)
# 'trimeter', # not released yet.. # 'trimeter', # not released yet..
# 'tractor', # 'tractor',
# asyncvnc, # asyncvnc,
# 'cryptofeed',
# brokers # brokers
'asks', 'asks==2.4.8',
'ib_insync', 'ib_insync',
# numerics # numerics

View File

@ -1,16 +1,10 @@
from contextlib import asynccontextmanager as acm
from functools import partial
import os import os
import pytest import pytest
import tractor import tractor
from piker import ( import trio
# log, from piker import log, config
config, from piker.brokers import questrade
)
from piker._daemon import (
Services,
)
def pytest_addoption(parser): def pytest_addoption(parser):
@ -20,9 +14,13 @@ def pytest_addoption(parser):
help="Use a practice API account") help="Use a practice API account")
@pytest.fixture(scope='session') @pytest.fixture(scope='session', autouse=True)
def loglevel(request) -> str: def loglevel(request):
return request.config.option.loglevel orig = tractor.log._default_loglevel
level = tractor.log._default_loglevel = request.config.option.loglevel
log.get_console_log(level)
yield level
tractor.log._default_loglevel = orig
@pytest.fixture(scope='session') @pytest.fixture(scope='session')
@ -38,15 +36,10 @@ def test_config():
@pytest.fixture(scope='session', autouse=True) @pytest.fixture(scope='session', autouse=True)
def confdir( def confdir(request, test_config):
request, """If the `--confdir` flag is not passed use the
test_config: str,
):
'''
If the `--confdir` flag is not passed use the
broker config file found in that dir. broker config file found in that dir.
"""
'''
confdir = request.config.option.confdir confdir = request.config.option.confdir
if confdir is not None: if confdir is not None:
config._override_config_dir(confdir) config._override_config_dir(confdir)
@ -54,81 +47,61 @@ def confdir(
return confdir return confdir
_ci_env: bool = os.environ.get('CI', False) @pytest.fixture(scope='session', autouse=True)
def travis(confdir):
is_travis = os.environ.get('TRAVIS', False)
if is_travis:
# this directory is cached, see .travis.yaml
conf_file = config.get_broker_conf_path()
refresh_token = os.environ['QT_REFRESH_TOKEN']
def write_with_token(token):
@pytest.fixture(scope='session') # XXX don't pass the dir path here since may be
def ci_env() -> bool: # written behind the scenes in the `confdir fixture`
''' if not os.path.isfile(conf_file):
Detect CI envoirment. open(conf_file, 'w').close()
conf, path = config.load()
''' conf.setdefault('questrade', {}).update(
return _ci_env {'refresh_token': token,
'is_practice': 'True'}
@acm
async def _open_test_pikerd(
reg_addr: tuple[str, int] | None = None,
loglevel: str = 'warning',
**kwargs,
) -> tuple[
str,
int,
tractor.Portal
]:
'''
Testing helper to startup the service tree and runtime on
a different port then the default to allow testing alongside
a running stack.
'''
import random
from piker._daemon import maybe_open_pikerd
if reg_addr is None:
port = random.randint(6e3, 7e3)
reg_addr = ('127.0.0.1', port)
async with (
maybe_open_pikerd(
registry_addr=reg_addr,
loglevel=loglevel,
**kwargs,
) as service_manager,
):
# this proc/actor is the pikerd
assert service_manager is Services
async with tractor.wait_for_actor(
'pikerd',
arbiter_sockaddr=reg_addr,
) as portal:
raddr = portal.channel.raddr
assert raddr == reg_addr
yield (
raddr[0],
raddr[1],
portal,
service_manager,
) )
config.write(conf, path)
async def ensure_config():
# try to refresh current token using cached brokers config
# if it fails fail try using the refresh token provided by the
# env var and if that fails stop the test run here.
try:
async with questrade.get_client(ask_user=False):
pass
except (
FileNotFoundError, ValueError,
questrade.BrokerError, questrade.QuestradeError,
trio.MultiError,
):
# 3 cases:
# - config doesn't have a ``refresh_token`` k/v
# - cache dir does not exist yet
# - current token is expired; take it form env var
write_with_token(refresh_token)
async with questrade.get_client(ask_user=False):
pass
# XXX ``pytest_trio`` doesn't support scope or autouse
trio.run(ensure_config)
@pytest.fixture @pytest.fixture
def open_test_pikerd( def us_symbols():
request, return ['TSLA', 'AAPL', 'CGC', 'CRON']
loglevel: str,
):
yield partial(
_open_test_pikerd,
# bind in level from fixture, which is itself set by @pytest.fixture
# `--ll <value>` cli flag. def tmx_symbols():
loglevel=loglevel, return ['APHA.TO', 'WEED.TO', 'ACB.TO']
)
# TODO: teardown checks such as,
# - no leaked subprocs or shm buffers @pytest.fixture
# - all requested container service are torn down def cse_symbols():
# - certain ``tractor`` runtime state? return ['TRUL.CN', 'CWEB.CN', 'SNN.CN']

View File

@ -1,129 +0,0 @@
'''
Data feed layer APIs, performance, msg throttling.
'''
from collections import Counter
from pprint import pprint
from typing import AsyncContextManager
import pytest
# import tractor
import trio
from piker.data import (
ShmArray,
open_feed,
)
from piker.data._source import (
unpack_fqsn,
)
@pytest.mark.parametrize(
'fqsns',
[
# binance
(100, {'btcusdt.binance', 'ethusdt.binance'}, False),
# kraken
(20, {'ethusdt.kraken', 'xbtusd.kraken'}, True),
# binance + kraken
(100, {'btcusdt.binance', 'xbtusd.kraken'}, False),
],
ids=lambda param: f'quotes={param[0]}@fqsns={param[1]}',
)
def test_multi_fqsn_feed(
open_test_pikerd: AsyncContextManager,
fqsns: set[str],
loglevel: str,
ci_env: bool
):
'''
Start a real-time data feed for provided fqsn and pull
a few quotes then simply shut down.
'''
max_quotes, fqsns, run_in_ci = fqsns
if (
ci_env
and not run_in_ci
):
pytest.skip('Skipping CI disabled test due to feed restrictions')
brokers = set()
for fqsn in fqsns:
brokername, key, suffix = unpack_fqsn(fqsn)
brokers.add(brokername)
async def main():
async with (
open_test_pikerd(),
open_feed(
fqsns,
loglevel=loglevel,
# TODO: ensure throttle rate is applied
# limit to at least display's FPS
# avoiding needless Qt-in-guest-mode context switches
# tick_throttle=_quote_throttle_rate,
) as feed
):
# verify shm buffers exist
for fqin in fqsns:
flume = feed.flumes[fqin]
ohlcv: ShmArray = flume.rt_shm
hist_ohlcv: ShmArray = flume.hist_shm
async with feed.open_multi_stream(brokers) as stream:
# pull the first startup quotes, one for each fqsn, and
# ensure they match each flume's startup quote value.
fqsns_copy = fqsns.copy()
with trio.fail_after(0.5):
for _ in range(1):
first_quotes = await stream.receive()
for fqsn, quote in first_quotes.items():
# XXX: TODO: WTF apparently this error will get
# supressed and only show up in the teardown
# excgroup if we don't have the fix from
# <tractorbugurl>
# assert 0
fqsns_copy.remove(fqsn)
flume = feed.flumes[fqsn]
assert quote['last'] == flume.first_quote['last']
cntr = Counter()
with trio.fail_after(6):
async for quotes in stream:
for fqsn, quote in quotes.items():
cntr[fqsn] += 1
# await tractor.breakpoint()
flume = feed.flumes[fqsn]
ohlcv: ShmArray = flume.rt_shm
hist_ohlcv: ShmArray = flume.hist_shm
# print quote msg, rt and history
# buffer values on console.
rt_row = ohlcv.array[-1]
hist_row = hist_ohlcv.array[-1]
# last = quote['last']
# assert last == rt_row['close']
# assert last == hist_row['close']
pprint(
f'{fqsn}: {quote}\n'
f'rt_ohlc: {rt_row}\n'
f'hist_ohlc: {hist_row}\n'
)
if cntr.total() >= max_quotes:
break
assert set(cntr.keys()) == fqsns
trio.run(main)

View File

@ -8,6 +8,7 @@ from trio.testing import trio_test
from piker.brokers import questrade as qt from piker.brokers import questrade as qt
import pytest import pytest
import tractor import tractor
from tractor.testing import tractor_test
import piker import piker
from piker.brokers import get_brokermod from piker.brokers import get_brokermod
@ -22,12 +23,6 @@ pytestmark = pytest.mark.skipif(
reason="questrade tests can only be run locally with an API key", reason="questrade tests can only be run locally with an API key",
) )
# TODO: this module was removed from tractor into it's
# tests/conftest.py, we need to rewrite the below tests
# to use the `open_pikerd_runtime()` to make these work again
# (if we're not just gonna junk em).
# from tractor.testing import tractor_test
# stock quote # stock quote
_ex_quotes = { _ex_quotes = {
@ -111,22 +106,7 @@ def match_packet(symbols, quotes, feed_type='stock'):
assert not quotes assert not quotes
@pytest.fixture @tractor_test
def us_symbols():
return ['TSLA', 'AAPL', 'CGC', 'CRON']
@pytest.fixture
def tmx_symbols():
return ['APHA.TO', 'WEED.TO', 'ACB.TO']
@pytest.fixture
def cse_symbols():
return ['TRUL.CN', 'CWEB.CN', 'SNN.CN']
# @tractor_test
async def test_concurrent_tokens_refresh(us_symbols, loglevel): async def test_concurrent_tokens_refresh(us_symbols, loglevel):
"""Verify that concurrent requests from mulitple tasks work alongside """Verify that concurrent requests from mulitple tasks work alongside
random token refreshing which simulates an access token expiry + refresh random token refreshing which simulates an access token expiry + refresh
@ -273,8 +253,8 @@ async def stream_option_chain(feed, symbols):
# latency arithmetic # latency arithmetic
loops = 8 loops = 8
# period = 1/3. # 3 rps period = 1/3. # 3 rps
timeout = float('inf') # loops / period timeout = float('inf') #loops / period
try: try:
# it'd sure be nice to have an asyncitertools here... # it'd sure be nice to have an asyncitertools here...
@ -322,8 +302,8 @@ async def stream_stocks(feed, symbols):
symbols, 'stock', rate=3, diff_cached=False) symbols, 'stock', rate=3, diff_cached=False)
# latency arithmetic # latency arithmetic
loops = 8 loops = 8
# period = 1/3. # 3 rps period = 1/3. # 3 rps
# timeout = loops / period timeout = loops / period
try: try:
# it'd sure be nice to have an asyncitertools here... # it'd sure be nice to have an asyncitertools here...
@ -357,7 +337,7 @@ async def stream_stocks(feed, symbols):
'options_and_options', 'options_and_options',
], ],
) )
# @tractor_test @tractor_test
async def test_quote_streaming(tmx_symbols, loglevel, stream_what): async def test_quote_streaming(tmx_symbols, loglevel, stream_what):
"""Set up option streaming using the broker daemon. """Set up option streaming using the broker daemon.
""" """

Some files were not shown because too many files have changed in this diff Show More