Compare commits
130 Commits
310_plus
...
incr_updat
Author | SHA1 | Date |
---|---|---|
Tyler Goodlet | 8c1905e35a | |
Tyler Goodlet | d9e2666e80 | |
Tyler Goodlet | 1abe7d87a5 | |
Tyler Goodlet | 2f9d199a7f | |
Tyler Goodlet | 2f3418546f | |
Tyler Goodlet | 729c72a48f | |
Tyler Goodlet | 324dcbbfb0 | |
Tyler Goodlet | 112cba43e5 | |
Tyler Goodlet | 468cd3a381 | |
Tyler Goodlet | e8aaf42cc6 | |
Tyler Goodlet | 1967bc7973 | |
Tyler Goodlet | 303a5cc66c | |
Tyler Goodlet | 2e6b7da4bc | |
Tyler Goodlet | 69d0555180 | |
Tyler Goodlet | 8915f0c0c9 | |
Tyler Goodlet | 946d554716 | |
Tyler Goodlet | 8711465d96 | |
Tyler Goodlet | 0f53450c68 | |
Tyler Goodlet | 1c91cf56de | |
Tyler Goodlet | 4129677ec5 | |
Tyler Goodlet | aa8f9c02f2 | |
Tyler Goodlet | c282555c2f | |
Tyler Goodlet | f4f6ffe819 | |
Tyler Goodlet | 3b15f83e70 | |
Tyler Goodlet | d968f4897f | |
Tyler Goodlet | ac57396acd | |
Tyler Goodlet | 9df96cd182 | |
Tyler Goodlet | bc2ebbc457 | |
Tyler Goodlet | ecefc74d07 | |
Tyler Goodlet | 9505350ff0 | |
Tyler Goodlet | 15fe46091b | |
Tyler Goodlet | e726a3ebef | |
Tyler Goodlet | ba30b5d9bf | |
Tyler Goodlet | 447549e1af | |
Tyler Goodlet | 5ca0d1a19d | |
Tyler Goodlet | 2c3da825e3 | |
Tyler Goodlet | a752a22a4b | |
Tyler Goodlet | 55606cffbb | |
Tyler Goodlet | d3251a2922 | |
Tyler Goodlet | dc4a0a6fd2 | |
Tyler Goodlet | 04c12a756c | |
Tyler Goodlet | cde23361a4 | |
Tyler Goodlet | 3c58847595 | |
Tyler Goodlet | fae249dd2f | |
Tyler Goodlet | 6f5bb9cbe0 | |
Tyler Goodlet | b4c7d02fcb | |
Tyler Goodlet | b2697bfd13 | |
Tyler Goodlet | 14037cd1dc | |
Tyler Goodlet | a935245360 | |
Tyler Goodlet | 8a5538f490 | |
Tyler Goodlet | 90b9c12d25 | |
Tyler Goodlet | aab1a3c565 | |
Tyler Goodlet | 6c136e82b4 | |
Tyler Goodlet | 8388918af0 | |
Tyler Goodlet | 82dbdd6148 | |
Tyler Goodlet | 79eff13e76 | |
Tyler Goodlet | 8e5f5b6be6 | |
Tyler Goodlet | 8110c4c70d | |
Tyler Goodlet | 6f64ff5842 | |
Tyler Goodlet | bd23b6e8f2 | |
Tyler Goodlet | 49f3e15a3c | |
Tyler Goodlet | c985c01c62 | |
Tyler Goodlet | 8e11d79712 | |
Tyler Goodlet | 48cce42c77 | |
Tyler Goodlet | 1fd3513689 | |
Tyler Goodlet | 9d375a0ce5 | |
Tyler Goodlet | c18795e454 | |
Tyler Goodlet | 76287a7523 | |
Tyler Goodlet | b0f659a66b | |
Tyler Goodlet | f7b3215aa4 | |
Tyler Goodlet | c5ed9b5955 | |
Tyler Goodlet | ea9d76ffce | |
Tyler Goodlet | 16f2f6ff94 | |
Tyler Goodlet | 4a383795bf | |
Tyler Goodlet | ef8e71f628 | |
Tyler Goodlet | 21d0d551d3 | |
Tyler Goodlet | 32c3f63cfd | |
Tyler Goodlet | 894dcc2de4 | |
Tyler Goodlet | 8b8ffe78af | |
Tyler Goodlet | 99a37f504f | |
Tyler Goodlet | d3ddcc8206 | |
Tyler Goodlet | 9b1491efc9 | |
Tyler Goodlet | 7175901d0d | |
Tyler Goodlet | bb13f76375 | |
Tyler Goodlet | a682887e63 | |
Tyler Goodlet | 1837e467be | |
Tyler Goodlet | 7df795435e | |
Tyler Goodlet | 8421422768 | |
Tyler Goodlet | 23d386e0f5 | |
Tyler Goodlet | f0c4261aa4 | |
Tyler Goodlet | 2be8f63487 | |
Tyler Goodlet | 24a07fd5e5 | |
Tyler Goodlet | a893537ade | |
Tyler Goodlet | 15d15fdfbf | |
Tyler Goodlet | d1b05246f8 | |
Tyler Goodlet | 96ec4ba28b | |
Tyler Goodlet | fa0be47d66 | |
Tyler Goodlet | 65609a35dc | |
Tyler Goodlet | d8d7757e88 | |
Tyler Goodlet | c439e99f8a | |
Tyler Goodlet | 5c2b9a01e9 | |
Tyler Goodlet | 15d3f99410 | |
Tyler Goodlet | ae8170204f | |
Tyler Goodlet | 7d628c4059 | |
Tyler Goodlet | 6dfe59cce6 | |
Tyler Goodlet | a465a11782 | |
Tyler Goodlet | 807685d27e | |
Tyler Goodlet | aea42ccbd9 | |
Tyler Goodlet | 8fb9308e21 | |
Tyler Goodlet | bbae8ad426 | |
Tyler Goodlet | 893ac7a986 | |
Tyler Goodlet | 91856ddda8 | |
Tyler Goodlet | 25e2e13bd7 | |
Tyler Goodlet | 2427c96336 | |
Tyler Goodlet | 8732b2bd5e | |
Tyler Goodlet | ff9208c15b | |
Tyler Goodlet | a2547a548f | |
Tyler Goodlet | 3873b8619e | |
Tyler Goodlet | 326d05ac82 | |
Tyler Goodlet | dcab99e3d2 | |
Tyler Goodlet | 5a5df21f94 | |
Tyler Goodlet | d7c1286e5d | |
Tyler Goodlet | 8acaa28df0 | |
Tyler Goodlet | bb45100168 | |
Tyler Goodlet | d9ded54e10 | |
Tyler Goodlet | 1bde86a7b2 | |
Guillermo Rodriguez | 5031892dcf | |
Guillermo Rodriguez | afe6f0b42b | |
Guillermo Rodriguez | 5d539b7c49 | |
Tyler Goodlet | e2ce341f93 |
|
@ -138,7 +138,7 @@ provider support
|
||||||
****************
|
****************
|
||||||
for live data feeds the in-progress set of supported brokers is:
|
for live data feeds the in-progress set of supported brokers is:
|
||||||
|
|
||||||
- IB_ via ``ib_insync``, also see our `container docs`_
|
- IB_ via ``ib_insync``
|
||||||
- binance_ and kraken_ for crypto over their public websocket API
|
- binance_ and kraken_ for crypto over their public websocket API
|
||||||
- questrade_ (ish) which comes with effectively free L1
|
- questrade_ (ish) which comes with effectively free L1
|
||||||
|
|
||||||
|
@ -150,7 +150,6 @@ coming soon...
|
||||||
if you want your broker supported and they have an API let us know.
|
if you want your broker supported and they have an API let us know.
|
||||||
|
|
||||||
.. _IB: https://interactivebrokers.github.io/tws-api/index.html
|
.. _IB: https://interactivebrokers.github.io/tws-api/index.html
|
||||||
.. _container docs: https://github.com/pikers/piker/tree/master/dockering/ib
|
|
||||||
.. _questrade: https://www.questrade.com/api/documentation
|
.. _questrade: https://www.questrade.com/api/documentation
|
||||||
.. _kraken: https://www.kraken.com/features/api#public-market-data
|
.. _kraken: https://www.kraken.com/features/api#public-market-data
|
||||||
.. _binance: https://github.com/pikers/piker/pull/182
|
.. _binance: https://github.com/pikers/piker/pull/182
|
||||||
|
|
|
@ -12,41 +12,16 @@ api_key = ""
|
||||||
secret = ""
|
secret = ""
|
||||||
|
|
||||||
[ib]
|
[ib]
|
||||||
hosts = [
|
host = "127.0.0.1"
|
||||||
"127.0.0.1",
|
|
||||||
]
|
|
||||||
# XXX: the order in which ports will be scanned
|
|
||||||
# (by the `brokerd` daemon-actor)
|
|
||||||
# is determined # by the line order here.
|
|
||||||
# TODO: when we eventually spawn gateways in our
|
|
||||||
# container, we can just dynamically allocate these
|
|
||||||
# using IBC.
|
|
||||||
ports = [
|
|
||||||
4002, # gw
|
|
||||||
7497, # tws
|
|
||||||
]
|
|
||||||
|
|
||||||
# XXX: for a paper account the flex web query service
|
ports.gw = 4002
|
||||||
# is not supported so you have to manually download
|
ports.tws = 7497
|
||||||
# and XML report and put it in a location that can be
|
ports.order = ["gw", "tws",]
|
||||||
# accessed by the ``brokerd.ib`` backend code for parsing.
|
|
||||||
flex_token = '666666666666666666666666'
|
|
||||||
flex_trades_query_id = '666666' # live account
|
|
||||||
|
|
||||||
# when clients are being scanned this determines
|
accounts.margin = "X0000000"
|
||||||
# which clients are preferred to be used for data
|
accounts.ira = "X0000000"
|
||||||
# feeds based on the order of account names, if
|
accounts.paper = "XX0000000"
|
||||||
# detected as active on an API client.
|
|
||||||
prefer_data_account = [
|
|
||||||
'paper',
|
|
||||||
'margin',
|
|
||||||
'ira',
|
|
||||||
]
|
|
||||||
|
|
||||||
[ib.accounts]
|
# the order in which accounts will be selected (if found through
|
||||||
# the order in which accounts will be selectable
|
# `brokerd`) when a new symbol is loaded
|
||||||
# in the order mode UI (if found via clients during
|
accounts_order = ['paper', 'margin', 'ira']
|
||||||
# API-app scanning)when a new symbol is loaded.
|
|
||||||
paper = "XX0000000"
|
|
||||||
margin = "X0000000"
|
|
||||||
ira = "X0000000"
|
|
||||||
|
|
|
@ -1,30 +0,0 @@
|
||||||
running ``ib`` gateway in ``docker``
|
|
||||||
------------------------------------
|
|
||||||
We have a config based on the (now defunct)
|
|
||||||
image from "waytrade":
|
|
||||||
|
|
||||||
https://github.com/waytrade/ib-gateway-docker
|
|
||||||
|
|
||||||
To startup this image with our custom settings
|
|
||||||
simply run the command::
|
|
||||||
|
|
||||||
docker compose up
|
|
||||||
|
|
||||||
And you should have the following socket-available services:
|
|
||||||
|
|
||||||
- ``x11vnc1@127.0.0.1:3003``
|
|
||||||
- ``ib-gw@127.0.0.1:4002``
|
|
||||||
|
|
||||||
You can attach to the container via a VNC client
|
|
||||||
without password auth.
|
|
||||||
|
|
||||||
SECURITY STUFF!?!?!
|
|
||||||
-------------------
|
|
||||||
Though "``ib``" claims they host filter connections outside
|
|
||||||
localhost (aka ``127.0.0.1``) it's probably better if you filter
|
|
||||||
the socket at the OS level using a stateless firewall rule::
|
|
||||||
|
|
||||||
ip rule add not unicast iif lo to 0.0.0.0/0 dport 4002
|
|
||||||
|
|
||||||
We will soon have this baked into our own custom image but for
|
|
||||||
now you'll have to do it urself dawgy.
|
|
|
@ -1,64 +0,0 @@
|
||||||
# rework from the original @
|
|
||||||
# https://github.com/waytrade/ib-gateway-docker/blob/master/docker-compose.yml
|
|
||||||
version: "3.5"
|
|
||||||
|
|
||||||
services:
|
|
||||||
ib-gateway:
|
|
||||||
# other image tags available:
|
|
||||||
# https://github.com/waytrade/ib-gateway-docker#supported-tags
|
|
||||||
image: waytrade/ib-gateway:981.3j
|
|
||||||
restart: always
|
|
||||||
network_mode: 'host'
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
- type: bind
|
|
||||||
source: ./jts.ini
|
|
||||||
target: /root/Jts/jts.ini
|
|
||||||
# don't let IBC clobber this file for
|
|
||||||
# the main reason of not having a stupid
|
|
||||||
# timezone set..
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
# force our own IBC config
|
|
||||||
- type: bind
|
|
||||||
source: ./ibc.ini
|
|
||||||
target: /root/ibc/config.ini
|
|
||||||
|
|
||||||
# force our noop script - socat isn't needed in host mode.
|
|
||||||
- type: bind
|
|
||||||
source: ./fork_ports_delayed.sh
|
|
||||||
target: /root/scripts/fork_ports_delayed.sh
|
|
||||||
|
|
||||||
# force our noop script - socat isn't needed in host mode.
|
|
||||||
- type: bind
|
|
||||||
source: ./run_x11_vnc.sh
|
|
||||||
target: /root/scripts/run_x11_vnc.sh
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
# NOTE:to fill these out, define an `.env` file in the same dir as
|
|
||||||
# this compose file which looks something like:
|
|
||||||
# TWS_USERID='myuser'
|
|
||||||
# TWS_PASSWORD='guest'
|
|
||||||
# TRADING_MODE=paper (or live)
|
|
||||||
# VNC_SERVER_PASSWORD='diggity'
|
|
||||||
|
|
||||||
environment:
|
|
||||||
TWS_USERID: ${TWS_USERID}
|
|
||||||
TWS_PASSWORD: ${TWS_PASSWORD}
|
|
||||||
TRADING_MODE: ${TRADING_MODE:-paper}
|
|
||||||
VNC_SERVER_PASSWORD: ${VNC_SERVER_PASSWORD:-}
|
|
||||||
|
|
||||||
# ports:
|
|
||||||
# - target: 4002
|
|
||||||
# host_ip: 127.0.0.1
|
|
||||||
# published: 4002
|
|
||||||
# protocol: tcp
|
|
||||||
|
|
||||||
# original mappings for use in non-host-mode
|
|
||||||
# which we won't really need going forward since
|
|
||||||
# ideally we just pick the port to have ib-gw listen
|
|
||||||
# on **when** we spawn the container - i.e. everything
|
|
||||||
# will be driven by a ``brokers.toml`` def.
|
|
||||||
# - "127.0.0.1:4001:4001"
|
|
||||||
# - "127.0.0.1:4002:4002"
|
|
||||||
# - "127.0.0.1:5900:5900"
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# we now just set this is to a noop script
|
|
||||||
# since we can just run the container in
|
|
||||||
# `network_mode: 'host'` and get literally
|
|
||||||
# the exact same behaviour XD
|
|
|
@ -1,711 +0,0 @@
|
||||||
# Note that in the comments in this file, TWS refers to both the Trader
|
|
||||||
# Workstation and the IB Gateway, unless explicitly stated otherwise.
|
|
||||||
#
|
|
||||||
# When referred to below, the default value for a setting is the value
|
|
||||||
# assumed if either the setting is included but no value is specified, or
|
|
||||||
# the setting is not included at all.
|
|
||||||
#
|
|
||||||
# IBC may also be used to start the FIX CTCI Gateway. All settings
|
|
||||||
# relating to this have names prefixed with FIX.
|
|
||||||
#
|
|
||||||
# The IB API Gateway and the FIX CTCI Gateway share the same code. Which
|
|
||||||
# gateway actually runs is governed by an option on the initial gateway
|
|
||||||
# login screen. The FIX setting described under IBC Startup
|
|
||||||
# Settings below controls this.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 1. IBC Startup Settings
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
|
|
||||||
# IBC may be used to start the IB Gateway for the FIX CTCI. This
|
|
||||||
# setting must be set to 'yes' if you want to run the FIX CTCI gateway. The
|
|
||||||
# default is 'no'.
|
|
||||||
|
|
||||||
FIX=no
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 2. Authentication Settings
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
# TWS and the IB API gateway require a single username and password.
|
|
||||||
# You may specify the username and password using the following settings:
|
|
||||||
#
|
|
||||||
# IbLoginId
|
|
||||||
# IbPassword
|
|
||||||
#
|
|
||||||
# Alternatively, you can specify the username and password in the command
|
|
||||||
# files used to start TWS or the Gateway, but this is not recommended for
|
|
||||||
# security reasons.
|
|
||||||
#
|
|
||||||
# If you don't specify them, you will be prompted for them in the usual
|
|
||||||
# login dialog when TWS starts (but whatever you have specified will be
|
|
||||||
# included in the dialog automatically: for example you may specify the
|
|
||||||
# username but not the password, and then you will be prompted for the
|
|
||||||
# password via the login dialog). Note that if you specify either
|
|
||||||
# the username or the password (or both) in the command file, then
|
|
||||||
# IbLoginId and IbPassword settings defined in this file are ignored.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# The FIX CTCI gateway requires one username and password for FIX order
|
|
||||||
# routing, and optionally a separate username and password for market
|
|
||||||
# data connections. You may specify the usernames and passwords using
|
|
||||||
# the following settings:
|
|
||||||
#
|
|
||||||
# FIXLoginId
|
|
||||||
# FIXPassword
|
|
||||||
# IbLoginId (optional - for market data connections)
|
|
||||||
# IbPassword (optional - for market data connections)
|
|
||||||
#
|
|
||||||
# Alternatively you can specify the FIX username and password in the
|
|
||||||
# command file used to start the FIX CTCI Gateway, but this is not
|
|
||||||
# recommended for security reasons.
|
|
||||||
#
|
|
||||||
# If you don't specify them, you will be prompted for them in the usual
|
|
||||||
# login dialog when FIX CTCI gateway starts (but whatever you have
|
|
||||||
# specified will be included in the dialog automatically: for example
|
|
||||||
# you may specify the usernames but not the passwords, and then you will
|
|
||||||
# be prompted for the passwords via the login dialog). Note that if you
|
|
||||||
# specify either the FIX username or the FIX password (or both) on the
|
|
||||||
# command line, then FIXLoginId and FIXPassword settings defined in this
|
|
||||||
# file are ignored; he same applies to the market data username and
|
|
||||||
# password.
|
|
||||||
|
|
||||||
# IB API Authentication Settings
|
|
||||||
# ------------------------------
|
|
||||||
|
|
||||||
# Your TWS username:
|
|
||||||
|
|
||||||
IbLoginId=
|
|
||||||
|
|
||||||
|
|
||||||
# Your TWS password:
|
|
||||||
|
|
||||||
IbPassword=
|
|
||||||
|
|
||||||
|
|
||||||
# FIX CTCI Authentication Settings
|
|
||||||
# --------------------------------
|
|
||||||
|
|
||||||
# Your FIX CTCI username:
|
|
||||||
|
|
||||||
FIXLoginId=
|
|
||||||
|
|
||||||
|
|
||||||
# Your FIX CTCI password:
|
|
||||||
|
|
||||||
FIXPassword=
|
|
||||||
|
|
||||||
|
|
||||||
# Second Factor Authentication Settings
|
|
||||||
# -------------------------------------
|
|
||||||
|
|
||||||
# If you have enabled more than one second factor authentication
|
|
||||||
# device, TWS presents a list from which you must select the device
|
|
||||||
# you want to use for this login. You can use this setting to
|
|
||||||
# instruct IBC to select a particular item in the list on your
|
|
||||||
# behalf. Note that you must spell this value exactly as it appears
|
|
||||||
# in the list. If no value is set, you must manually select the
|
|
||||||
# relevant list entry.
|
|
||||||
|
|
||||||
SecondFactorDevice=
|
|
||||||
|
|
||||||
|
|
||||||
# If you use the IBKR Mobile app for second factor authentication,
|
|
||||||
# and you fail to complete the process before the time limit imposed
|
|
||||||
# by IBKR, you can use this setting to tell IBC to exit: arrangements
|
|
||||||
# can then be made to automatically restart IBC in order to initiate
|
|
||||||
# the login sequence afresh. Otherwise, manual intervention at TWS's
|
|
||||||
# Second Factor Authentication dialog is needed to complete the
|
|
||||||
# login.
|
|
||||||
#
|
|
||||||
# Permitted values are 'yes' and 'no'. The default is 'no'.
|
|
||||||
#
|
|
||||||
# Note that the scripts provided with the IBC zips for Windows and
|
|
||||||
# Linux provide options to automatically restart in these
|
|
||||||
# circumstances, but only if this setting is also set to 'yes'.
|
|
||||||
|
|
||||||
ExitAfterSecondFactorAuthenticationTimeout=no
|
|
||||||
|
|
||||||
|
|
||||||
# This setting is only relevant if
|
|
||||||
# ExitAfterSecondFactorAuthenticationTimeout is set to 'yes'.
|
|
||||||
#
|
|
||||||
# It controls how long (in seconds) IBC waits for login to complete
|
|
||||||
# after the user acknowledges the second factor authentication
|
|
||||||
# alert at the IBKR Mobile app. If login has not completed after
|
|
||||||
# this time, IBC terminates.
|
|
||||||
# The default value is 40.
|
|
||||||
|
|
||||||
SecondFactorAuthenticationExitInterval=
|
|
||||||
|
|
||||||
|
|
||||||
# Trading Mode
|
|
||||||
# ------------
|
|
||||||
#
|
|
||||||
# TWS 955 introduced a new Trading Mode combo box on its login
|
|
||||||
# dialog. This indicates whether the live account or the paper
|
|
||||||
# trading account corresponding to the supplied credentials is
|
|
||||||
# to be used. The allowed values are 'live' (the default) and
|
|
||||||
# 'paper'. For earlier versions of TWS this setting has no
|
|
||||||
# effect.
|
|
||||||
|
|
||||||
TradingMode=
|
|
||||||
|
|
||||||
|
|
||||||
# Paper-trading Account Warning
|
|
||||||
# -----------------------------
|
|
||||||
#
|
|
||||||
# Logging in to a paper-trading account results in TWS displaying
|
|
||||||
# a dialog asking the user to confirm that they are aware that this
|
|
||||||
# is not a brokerage account. Until this dialog has been accepted,
|
|
||||||
# TWS will not allow API connections to succeed. Setting this
|
|
||||||
# to 'yes' (the default) will cause IBC to automatically
|
|
||||||
# confirm acceptance. Setting it to 'no' will leave the dialog
|
|
||||||
# on display, and the user will have to deal with it manually.
|
|
||||||
|
|
||||||
AcceptNonBrokerageAccountWarning=yes
|
|
||||||
|
|
||||||
|
|
||||||
# Login Dialog Display Timeout
|
|
||||||
#-----------------------------
|
|
||||||
#
|
|
||||||
# In some circumstances, starting TWS may result in failure to display
|
|
||||||
# the login dialog. Restarting TWS may help to resolve this situation,
|
|
||||||
# and IBC does this automatically.
|
|
||||||
#
|
|
||||||
# This setting controls how long (in seconds) IBC waits for the login
|
|
||||||
# dialog to appear before restarting TWS.
|
|
||||||
#
|
|
||||||
# Note that in normal circumstances with a reasonably specified
|
|
||||||
# computer the time to displaying the login dialog is typically less
|
|
||||||
# than 20 seconds, and frequently much less. However many factors can
|
|
||||||
# influence this, and it is unwise to set this value too low.
|
|
||||||
#
|
|
||||||
# The default value is 60.
|
|
||||||
|
|
||||||
LoginDialogDisplayTimeout = 60
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 3. TWS Startup Settings
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
# Path to settings store
|
|
||||||
# ----------------------
|
|
||||||
#
|
|
||||||
# Path to the directory where TWS should store its settings. This is
|
|
||||||
# normally the folder in which TWS is installed. However you may set
|
|
||||||
# it to some other location if you wish (for example if you want to
|
|
||||||
# run multiple instances of TWS with different settings).
|
|
||||||
#
|
|
||||||
# It is recommended for clarity that you use an absolute path. The
|
|
||||||
# effect of using a relative path is undefined.
|
|
||||||
#
|
|
||||||
# Linux and macOS users should use the appropriate path syntax.
|
|
||||||
#
|
|
||||||
# Note that, for Windows users, you MUST use double separator
|
|
||||||
# characters to separate the elements of the folder path: for
|
|
||||||
# example, IbDir=C:\\IBLiveSettings is valid, but
|
|
||||||
# IbDir=C:\IBLiveSettings is NOT valid and will give unexpected
|
|
||||||
# results. Linux and macOS users need not use double separators,
|
|
||||||
# but they are acceptable.
|
|
||||||
#
|
|
||||||
# The default is the current working directory when IBC is
|
|
||||||
# started.
|
|
||||||
|
|
||||||
IbDir=/root/Jts
|
|
||||||
|
|
||||||
|
|
||||||
# Store settings on server
|
|
||||||
# ------------------------
|
|
||||||
#
|
|
||||||
# If you wish to store a copy of your TWS settings on IB's
|
|
||||||
# servers as well as locally on your computer, set this to
|
|
||||||
# 'yes': this enables you to run TWS on different computers
|
|
||||||
# with the same configuration, market data lines, etc. If set
|
|
||||||
# to 'no', running TWS on different computers will not share the
|
|
||||||
# same settings. If no value is specified, TWS will obtain its
|
|
||||||
# settings from the same place as the last time this user logged
|
|
||||||
# in (whether manually or using IBC).
|
|
||||||
|
|
||||||
StoreSettingsOnServer=
|
|
||||||
|
|
||||||
|
|
||||||
# Minimize TWS on startup
|
|
||||||
# -----------------------
|
|
||||||
#
|
|
||||||
# Set to 'yes' to minimize TWS when it starts:
|
|
||||||
|
|
||||||
MinimizeMainWindow=no
|
|
||||||
|
|
||||||
|
|
||||||
# Existing Session Detected Action
|
|
||||||
# --------------------------------
|
|
||||||
#
|
|
||||||
# When a user logs on to an IBKR account for trading purposes by any means, the
|
|
||||||
# IBKR account server checks to see whether the account is already logged in
|
|
||||||
# elsewhere. If so, a dialog is displayed to both the users that enables them
|
|
||||||
# to determine what happens next. The 'ExistingSessionDetectedAction' setting
|
|
||||||
# instructs TWS how to proceed when it displays this dialog:
|
|
||||||
#
|
|
||||||
# * If the new TWS session is set to 'secondary', the existing session continues
|
|
||||||
# and the new session terminates. Thus a secondary TWS session can never
|
|
||||||
# override any other session.
|
|
||||||
#
|
|
||||||
# * If the existing TWS session is set to 'primary', the existing session
|
|
||||||
# continues and the new session terminates (even if the new session is also
|
|
||||||
# set to primary). Thus a primary TWS session can never be overridden by
|
|
||||||
# any new session).
|
|
||||||
#
|
|
||||||
# * If both the existing and the new TWS sessions are set to 'primaryoverride',
|
|
||||||
# the existing session terminates and the new session proceeds.
|
|
||||||
#
|
|
||||||
# * If the existing TWS session is set to 'manual', the user must handle the
|
|
||||||
# dialog.
|
|
||||||
#
|
|
||||||
# The difference between 'primary' and 'primaryoverride' is that a
|
|
||||||
# 'primaryoverride' session can be overriden over by a new 'primary' session,
|
|
||||||
# but a 'primary' session cannot be overriden by any other session.
|
|
||||||
#
|
|
||||||
# When set to 'primary', if another TWS session is started and manually told to
|
|
||||||
# end the 'primary' session, the 'primary' session is automatically reconnected.
|
|
||||||
#
|
|
||||||
# The default is 'manual'.
|
|
||||||
|
|
||||||
ExistingSessionDetectedAction=primary
|
|
||||||
|
|
||||||
|
|
||||||
# Override TWS API Port Number
|
|
||||||
# ----------------------------
|
|
||||||
#
|
|
||||||
# If OverrideTwsApiPort is set to an integer, IBC changes the
|
|
||||||
# 'Socket port' in TWS's API configuration to that number shortly
|
|
||||||
# after startup. Leaving the setting blank will make no change to
|
|
||||||
# the current setting. This setting is only intended for use in
|
|
||||||
# certain specialized situations where the port number needs to
|
|
||||||
# be set dynamically at run-time: most users will never need it,
|
|
||||||
# so don't use it unless you know you need it.
|
|
||||||
|
|
||||||
OverrideTwsApiPort=4002
|
|
||||||
|
|
||||||
|
|
||||||
# Read-only Login
|
|
||||||
# ---------------
|
|
||||||
#
|
|
||||||
# If ReadOnlyLogin is set to 'yes', and the user is enrolled in IB's
|
|
||||||
# account security programme, the user will not be asked to perform
|
|
||||||
# the second factor authentication action, and login to TWS will
|
|
||||||
# occur automatically in read-only mode: in this mode, placing or
|
|
||||||
# managing orders is not allowed. If set to 'no', and the user is
|
|
||||||
# enrolled in IB's account security programme, the user must perform
|
|
||||||
# the relevant second factor authentication action to complete the
|
|
||||||
# login.
|
|
||||||
|
|
||||||
# If the user is not enrolled in IB's account security programme,
|
|
||||||
# this setting is ignored. The default is 'no'.
|
|
||||||
|
|
||||||
ReadOnlyLogin=no
|
|
||||||
|
|
||||||
|
|
||||||
# Read-only API
|
|
||||||
# -------------
|
|
||||||
#
|
|
||||||
# If ReadOnlyApi is set to 'yes', API programs cannot submit, modify
|
|
||||||
# or cancel orders. If set to 'no', API programs can do these things.
|
|
||||||
# If not set, the existing TWS/Gateway configuration is unchanged.
|
|
||||||
# NB: this setting is really only supplied for the benefit of new TWS
|
|
||||||
# or Gateway instances that are being automatically installed and
|
|
||||||
# started without user intervention (eg Docker containers). Where
|
|
||||||
# a user is involved, they should use the Global Configuration to
|
|
||||||
# set the relevant checkbox (this only needs to be done once) and
|
|
||||||
# not provide a value for this setting.
|
|
||||||
|
|
||||||
ReadOnlyApi=no
|
|
||||||
|
|
||||||
|
|
||||||
# Market data size for US stocks - lots or shares
|
|
||||||
# -----------------------------------------------
|
|
||||||
#
|
|
||||||
# Since IB introduced the option of market data for US stocks showing
|
|
||||||
# bid, ask and last sizes in shares rather than lots, TWS and Gateway
|
|
||||||
# display a dialog immediately after login notifying the user about
|
|
||||||
# this and requiring user input before allowing market data to be
|
|
||||||
# accessed. The user can request that the dialog not be shown again.
|
|
||||||
#
|
|
||||||
# It is recommended that the user should handle this dialog manually
|
|
||||||
# rather than using these settings, which are provided for situations
|
|
||||||
# where the user interface is not easily accessible, or where user
|
|
||||||
# settings are not preserved between sessions (eg some Docker images).
|
|
||||||
#
|
|
||||||
# - If this setting is set to 'accept', the dialog will be handled
|
|
||||||
# automatically and the option to not show it again will be
|
|
||||||
# selected.
|
|
||||||
#
|
|
||||||
# Note that in this case, the only way to allow the dialog to be
|
|
||||||
# displayed again is to manually enable the 'Bid, Ask and Last
|
|
||||||
# Size Display Update' message in the 'Messages' section of the TWS
|
|
||||||
# configuration dialog. So you should only use 'Accept' if you are
|
|
||||||
# sure you really don't want the dialog to be displayed again, or
|
|
||||||
# you have easy access to the user interface.
|
|
||||||
#
|
|
||||||
# - If set to 'defer', the dialog will be handled automatically (so
|
|
||||||
# that market data will start), but the option to not show it again
|
|
||||||
# will not be selected, and it will be shown again after the next
|
|
||||||
# login.
|
|
||||||
#
|
|
||||||
# - If set to 'ignore', the user has to deal with the dialog manually.
|
|
||||||
#
|
|
||||||
# The default value is 'ignore'.
|
|
||||||
#
|
|
||||||
# Note if set to 'accept' or 'defer', TWS also automatically sets
|
|
||||||
# the API settings checkbox labelled 'Send market data in lots for
|
|
||||||
# US stocks for dual-mode API clients'. IBC cannot prevent this.
|
|
||||||
# However you can change this immmediately by setting
|
|
||||||
# SendMarketDataInLotsForUSstocks (see below) to 'no' .
|
|
||||||
|
|
||||||
AcceptBidAskLastSizeDisplayUpdateNotification=accept
|
|
||||||
|
|
||||||
|
|
||||||
# This setting determines whether the API settings checkbox labelled
|
|
||||||
# 'Send market data in lots for US stocks for dual-mode API clients'
|
|
||||||
# is set or cleared. If set to 'yes', the checkbox is set. If set to
|
|
||||||
# 'no' the checkbox is cleared. If defaulted, the checkbox is
|
|
||||||
# unchanged.
|
|
||||||
|
|
||||||
SendMarketDataInLotsForUSstocks=
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 4. TWS Auto-Closedown
|
|
||||||
# =============================================================================
|
|
||||||
#
|
|
||||||
# IMPORTANT NOTE: Starting with TWS 974, this setting no longer
|
|
||||||
# works properly, because IB have changed the way TWS handles its
|
|
||||||
# autologoff mechanism.
|
|
||||||
#
|
|
||||||
# You should now configure the TWS autologoff time to something
|
|
||||||
# convenient for you, and restart IBC each day.
|
|
||||||
#
|
|
||||||
# Alternatively, discontinue use of IBC and use the auto-relogin
|
|
||||||
# mechanism within TWS 974 and later versions (note that the
|
|
||||||
# auto-relogin mechanism provided by IB is not available if you
|
|
||||||
# use IBC).
|
|
||||||
|
|
||||||
# Set to yes or no (lower case).
|
|
||||||
#
|
|
||||||
# yes means allow TWS to shut down automatically at its
|
|
||||||
# specified shutdown time, which is set via the TWS
|
|
||||||
# configuration menu.
|
|
||||||
#
|
|
||||||
# no means TWS never shuts down automatically.
|
|
||||||
#
|
|
||||||
# NB: IB recommends that you do not keep TWS running
|
|
||||||
# continuously. If you set this setting to 'no', you may
|
|
||||||
# experience incorrect TWS operation.
|
|
||||||
#
|
|
||||||
# NB: the default for this setting is 'no'. Since this will
|
|
||||||
# only work properly with TWS versions earlier than 974, you
|
|
||||||
# should explicitly set this to 'yes' for version 974 and later.
|
|
||||||
|
|
||||||
IbAutoClosedown=yes
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 5. TWS Tidy Closedown Time
|
|
||||||
# =============================================================================
|
|
||||||
#
|
|
||||||
# NB: starting with TWS 974 this is no longer a useful option
|
|
||||||
# because both TWS and Gateway now have the same auto-logoff
|
|
||||||
# mechanism, and IBC can no longer avoid this.
|
|
||||||
#
|
|
||||||
# Note that giving this setting a value does not change TWS's
|
|
||||||
# auto-logoff in any way: any setting will be additional to the
|
|
||||||
# TWS auto-logoff.
|
|
||||||
#
|
|
||||||
# To tell IBC to tidily close TWS at a specified time every
|
|
||||||
# day, set this value to <hh:mm>, for example:
|
|
||||||
# ClosedownAt=22:00
|
|
||||||
#
|
|
||||||
# To tell IBC to tidily close TWS at a specified day and time
|
|
||||||
# each week, set this value to <dayOfWeek hh:mm>, for example:
|
|
||||||
# ClosedownAt=Friday 22:00
|
|
||||||
#
|
|
||||||
# Note that the day of the week must be specified using your
|
|
||||||
# default locale. Also note that Java will only accept
|
|
||||||
# characters encoded to ISO 8859-1 (Latin-1). This means that
|
|
||||||
# if the day name in your default locale uses any non-Latin-1
|
|
||||||
# characters you need to encode them using Unicode escapes
|
|
||||||
# (see http://java.sun.com/docs/books/jls/third_edition/html/lexical.html#3.3
|
|
||||||
# for details). For example, to tidily close TWS at 12:00 on
|
|
||||||
# Saturday where the default locale is Simplified Chinese,
|
|
||||||
# use the following:
|
|
||||||
# #ClosedownAt=\u661F\u671F\u516D 12:00
|
|
||||||
|
|
||||||
ClosedownAt=
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 6. Other TWS Settings
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
# Accept Incoming Connection
|
|
||||||
# --------------------------
|
|
||||||
#
|
|
||||||
# If set to 'accept', IBC automatically accepts incoming
|
|
||||||
# API connection dialogs. If set to 'reject', IBC
|
|
||||||
# automatically rejects incoming API connection dialogs. If
|
|
||||||
# set to 'manual', the user must decide whether to accept or reject
|
|
||||||
# incoming API connection dialogs. The default is 'manual'.
|
|
||||||
# NB: it is recommended to set this to 'reject', and to explicitly
|
|
||||||
# configure which IP addresses can connect to the API in TWS's API
|
|
||||||
# configuration page, as this is much more secure (in this case, no
|
|
||||||
# incoming API connection dialogs will occur for those IP addresses).
|
|
||||||
|
|
||||||
AcceptIncomingConnectionAction=reject
|
|
||||||
|
|
||||||
|
|
||||||
# Allow Blind Trading
|
|
||||||
# -------------------
|
|
||||||
#
|
|
||||||
# If you attempt to place an order for a contract for which
|
|
||||||
# you have no market data subscription, TWS displays a dialog
|
|
||||||
# to warn you against such blind trading.
|
|
||||||
#
|
|
||||||
# yes means the dialog is dismissed as though the user had
|
|
||||||
# clicked the 'Ok' button: this means that you accept
|
|
||||||
# the risk and want the order to be submitted.
|
|
||||||
#
|
|
||||||
# no means the dialog remains on display and must be
|
|
||||||
# handled by the user.
|
|
||||||
|
|
||||||
AllowBlindTrading=yes
|
|
||||||
|
|
||||||
|
|
||||||
# Save Settings on a Schedule
|
|
||||||
# ---------------------------
|
|
||||||
#
|
|
||||||
# You can tell TWS to automatically save its settings on a schedule
|
|
||||||
# of your choosing. You can specify one or more specific times,
|
|
||||||
# like this:
|
|
||||||
#
|
|
||||||
# SaveTwsSettingsAt=HH:MM [ HH:MM]...
|
|
||||||
#
|
|
||||||
# for example:
|
|
||||||
# SaveTwsSettingsAt=08:00 12:30 17:30
|
|
||||||
#
|
|
||||||
# Or you can specify an interval at which settings are to be saved,
|
|
||||||
# optionally starting at a specific time and continuing until another
|
|
||||||
# time, like this:
|
|
||||||
#
|
|
||||||
#SaveTwsSettingsAt=Every n [{mins | hours}] [hh:mm] [hh:mm]
|
|
||||||
#
|
|
||||||
# where the first hh:mm is the start time and the second is the end
|
|
||||||
# time. If you don't specify the end time, settings are saved regularly
|
|
||||||
# from the start time till midnight. If you don't specify the start time.
|
|
||||||
# settings are saved regularly all day, beginning at 00:00. Note that
|
|
||||||
# settings will always be saved at the end time, even if that is not
|
|
||||||
# exactly one interval later than the previous time. If neither 'mins'
|
|
||||||
# nor 'hours' is specified, 'mins' is assumed. Examples:
|
|
||||||
#
|
|
||||||
# To save every 30 minutes all day starting at 00:00
|
|
||||||
#SaveTwsSettingsAt=Every 30
|
|
||||||
#SaveTwsSettingsAt=Every 30 mins
|
|
||||||
#
|
|
||||||
# To save every hour starting at 08:00 and ending at midnight
|
|
||||||
#SaveTwsSettingsAt=Every 1 hours 08:00
|
|
||||||
#SaveTwsSettingsAt=Every 1 hours 08:00 00:00
|
|
||||||
#
|
|
||||||
# To save every 90 minutes starting at 08:00 up to and including 17:43
|
|
||||||
#SaveTwsSettingsAt=Every 90 08:00 17:43
|
|
||||||
|
|
||||||
SaveTwsSettingsAt=
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 7. Settings Specific to Indian Versions of TWS
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
# Indian versions of TWS may display a password expiry
|
|
||||||
# notification dialog and a NSE Compliance dialog. These can be
|
|
||||||
# dismissed by setting the following to yes. By default the
|
|
||||||
# password expiry notice is not dismissed, but the NSE Compliance
|
|
||||||
# notice is dismissed.
|
|
||||||
|
|
||||||
# Warning: setting DismissPasswordExpiryWarning=yes will mean
|
|
||||||
# you will not be notified when your password is about to expire.
|
|
||||||
# You must then take other measures to ensure that your password
|
|
||||||
# is changed within the expiry period, otherwise IBC will
|
|
||||||
# not be able to login successfully.
|
|
||||||
|
|
||||||
DismissPasswordExpiryWarning=no
|
|
||||||
DismissNSEComplianceNotice=yes
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 8. IBC Command Server Settings
|
|
||||||
# =============================================================================
|
|
||||||
|
|
||||||
# Do NOT CHANGE THE FOLLOWING SETTINGS unless you
|
|
||||||
# intend to issue commands to IBC (for example
|
|
||||||
# using telnet). Note that these settings have nothing to
|
|
||||||
# do with running programs that use the TWS API.
|
|
||||||
|
|
||||||
# Command Server Port Number
|
|
||||||
# --------------------------
|
|
||||||
#
|
|
||||||
# The port number that IBC listens on for commands
|
|
||||||
# such as "STOP". DO NOT set this to the port number
|
|
||||||
# used for TWS API connections. There is no good reason
|
|
||||||
# to change this setting unless the port is used by
|
|
||||||
# some other application (typically another instance of
|
|
||||||
# IBC). The default value is 0, which tells IBC not to
|
|
||||||
# start the command server
|
|
||||||
|
|
||||||
#CommandServerPort=7462
|
|
||||||
|
|
||||||
|
|
||||||
# Permitted Command Sources
|
|
||||||
# -------------------------
|
|
||||||
#
|
|
||||||
# A comma separated list of IP addresses, or host names,
|
|
||||||
# which are allowed addresses for sending commands to
|
|
||||||
# IBC. Commands can always be sent from the
|
|
||||||
# same host as IBC is running on.
|
|
||||||
|
|
||||||
ControlFrom=127.0.0.1
|
|
||||||
|
|
||||||
|
|
||||||
# Address for Receiving Commands
|
|
||||||
# ------------------------------
|
|
||||||
#
|
|
||||||
# Specifies the IP address on which the Command Server
|
|
||||||
# is so listen. For a multi-homed host, this can be used
|
|
||||||
# to specify that connection requests are only to be
|
|
||||||
# accepted on the specified address. The default is to
|
|
||||||
# accept connection requests on all local addresses.
|
|
||||||
|
|
||||||
BindAddress=127.0.0.1
|
|
||||||
|
|
||||||
|
|
||||||
# Command Prompt
|
|
||||||
# --------------
|
|
||||||
#
|
|
||||||
# The specified string is output by the server when
|
|
||||||
# the connection is first opened and after the completion
|
|
||||||
# of each command. This can be useful if sending commands
|
|
||||||
# using an interactive program such as telnet. The default
|
|
||||||
# is that no prompt is output.
|
|
||||||
# For example:
|
|
||||||
#
|
|
||||||
# CommandPrompt=>
|
|
||||||
|
|
||||||
CommandPrompt=
|
|
||||||
|
|
||||||
|
|
||||||
# Suppress Command Server Info Messages
|
|
||||||
# -------------------------------------
|
|
||||||
#
|
|
||||||
# Some commands can return intermediate information about
|
|
||||||
# their progress. This setting controls whether such
|
|
||||||
# information is sent. The default is that such information
|
|
||||||
# is not sent.
|
|
||||||
|
|
||||||
SuppressInfoMessages=no
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# =============================================================================
|
|
||||||
# 9. Diagnostic Settings
|
|
||||||
# =============================================================================
|
|
||||||
#
|
|
||||||
# IBC can log information about the structure of windows
|
|
||||||
# displayed by TWS. This information is useful when adding
|
|
||||||
# new features to IBC or when behaviour is not as expected.
|
|
||||||
#
|
|
||||||
# The logged information shows the hierarchical organisation
|
|
||||||
# of all the components of the window, and includes the
|
|
||||||
# current values of text boxes and labels.
|
|
||||||
#
|
|
||||||
# Note that this structure logging has a small performance
|
|
||||||
# impact, and depending on the settings can cause the logfile
|
|
||||||
# size to be significantly increased. It is therefore
|
|
||||||
# recommended that the LogStructureWhen setting be set to
|
|
||||||
# 'never' (the default) unless there is a specific reason
|
|
||||||
# that this information is needed.
|
|
||||||
|
|
||||||
|
|
||||||
# Scope of Structure Logging
|
|
||||||
# --------------------------
|
|
||||||
#
|
|
||||||
# The LogStructureScope setting indicates which windows are
|
|
||||||
# eligible for structure logging:
|
|
||||||
#
|
|
||||||
# - if set to 'known', only windows that IBC recognizes
|
|
||||||
# are eligible - these are windows that IBC has some
|
|
||||||
# interest in monitoring, usually to take some action
|
|
||||||
# on the user's behalf;
|
|
||||||
#
|
|
||||||
# - if set to 'unknown', only windows that IBC does not
|
|
||||||
# recognize are eligible. Most windows displayed by
|
|
||||||
# TWS fall into this category;
|
|
||||||
#
|
|
||||||
# - if set to 'untitled', only windows that IBC does not
|
|
||||||
# recognize and that have no title are eligible. These
|
|
||||||
# are usually message boxes or similar small windows,
|
|
||||||
#
|
|
||||||
# - if set to 'all', then every window displayed by TWS
|
|
||||||
# is eligible.
|
|
||||||
#
|
|
||||||
# The default value is 'known'.
|
|
||||||
|
|
||||||
LogStructureScope=all
|
|
||||||
|
|
||||||
|
|
||||||
# When to Log Window Structure
|
|
||||||
# ----------------------------
|
|
||||||
#
|
|
||||||
# The LogStructureWhen setting specifies the circumstances
|
|
||||||
# when eligible TWS windows have their structure logged:
|
|
||||||
#
|
|
||||||
# - if set to 'open' or 'yes' or 'true', IBC logs the
|
|
||||||
# structure of an eligible window the first time it
|
|
||||||
# is encountered;
|
|
||||||
#
|
|
||||||
# - if set to 'activate', the structure is logged every
|
|
||||||
# time an eligible window is made active;
|
|
||||||
#
|
|
||||||
# - if set to 'never' or 'no' or 'false', structure
|
|
||||||
# information is never logged.
|
|
||||||
#
|
|
||||||
# The default value is 'never'.
|
|
||||||
|
|
||||||
LogStructureWhen=never
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED SETTING
|
|
||||||
# ------------------
|
|
||||||
#
|
|
||||||
# LogComponents - THIS SETTING WILL BE REMOVED IN A FUTURE
|
|
||||||
# RELEASE
|
|
||||||
#
|
|
||||||
# If LogComponents is set to any value, this is equivalent
|
|
||||||
# to setting LogStructureWhen to that same value and
|
|
||||||
# LogStructureScope to 'all': the actual values of those
|
|
||||||
# settings are ignored. The default is that the values
|
|
||||||
# of LogStructureScope and LogStructureWhen are honoured.
|
|
||||||
|
|
||||||
#LogComponents=
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,33 +0,0 @@
|
||||||
[IBGateway]
|
|
||||||
ApiOnly=true
|
|
||||||
LocalServerPort=4002
|
|
||||||
# NOTE: must be set if using IBC's "reject" mode
|
|
||||||
TrustedIPs=127.0.0.1
|
|
||||||
; RemoteHostOrderRouting=ndc1.ibllc.com
|
|
||||||
; WriteDebug=true
|
|
||||||
; RemotePortOrderRouting=4001
|
|
||||||
; useRemoteSettings=false
|
|
||||||
; tradingMode=p
|
|
||||||
; Steps=8
|
|
||||||
; colorPalletName=dark
|
|
||||||
|
|
||||||
# window geo, this may be useful for sending `xdotool` commands?
|
|
||||||
; MainWindow.Width=1986
|
|
||||||
; screenHeight=3960
|
|
||||||
|
|
||||||
|
|
||||||
[Logon]
|
|
||||||
Locale=en
|
|
||||||
# most markets are oriented around this zone
|
|
||||||
# so might as well hard code it.
|
|
||||||
TimeZone=America/New_York
|
|
||||||
UseSSL=true
|
|
||||||
displayedproxymsg=1
|
|
||||||
os_titlebar=true
|
|
||||||
s3store=true
|
|
||||||
useRemoteSettings=false
|
|
||||||
|
|
||||||
[Communication]
|
|
||||||
ctciAutoEncrypt=true
|
|
||||||
Region=usr
|
|
||||||
; Peer=cdc1.ibllc.com:4001
|
|
|
@ -1,16 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# start VNC server
|
|
||||||
x11vnc \
|
|
||||||
-ncache_cr \
|
|
||||||
-listen localhost \
|
|
||||||
-display :1 \
|
|
||||||
-forever \
|
|
||||||
-shared \
|
|
||||||
-logappend /var/log/x11vnc.log \
|
|
||||||
-bg \
|
|
||||||
-noipv6 \
|
|
||||||
-autoport 3003 \
|
|
||||||
# can't use this because of ``asyncvnc`` issue:
|
|
||||||
# https://github.com/barneygale/asyncvnc/issues/1
|
|
||||||
# -passwd 'ibcansmbz'
|
|
|
@ -35,7 +35,7 @@ log = get_logger(__name__)
|
||||||
|
|
||||||
_root_dname = 'pikerd'
|
_root_dname = 'pikerd'
|
||||||
|
|
||||||
_registry_addr = ('127.0.0.1', 6116)
|
_registry_addr = ('127.0.0.1', 1616)
|
||||||
_tractor_kwargs: dict[str, Any] = {
|
_tractor_kwargs: dict[str, Any] = {
|
||||||
# use a different registry addr then tractor's default
|
# use a different registry addr then tractor's default
|
||||||
'arbiter_addr': _registry_addr
|
'arbiter_addr': _registry_addr
|
||||||
|
@ -426,19 +426,9 @@ async def spawn_brokerd(
|
||||||
|
|
||||||
# ask `pikerd` to spawn a new sub-actor and manage it under its
|
# ask `pikerd` to spawn a new sub-actor and manage it under its
|
||||||
# actor nursery
|
# actor nursery
|
||||||
modpath = brokermod.__name__
|
|
||||||
broker_enable = [modpath]
|
|
||||||
for submodname in getattr(
|
|
||||||
brokermod,
|
|
||||||
'__enable_modules__',
|
|
||||||
[],
|
|
||||||
):
|
|
||||||
subpath = f'{modpath}.{submodname}'
|
|
||||||
broker_enable.append(subpath)
|
|
||||||
|
|
||||||
portal = await _services.actor_n.start_actor(
|
portal = await _services.actor_n.start_actor(
|
||||||
dname,
|
dname,
|
||||||
enable_modules=_data_mods + broker_enable,
|
enable_modules=_data_mods + [brokermod.__name__],
|
||||||
loglevel=loglevel,
|
loglevel=loglevel,
|
||||||
debug_mode=_services.debug_mode,
|
debug_mode=_services.debug_mode,
|
||||||
**tractor_kwargs
|
**tractor_kwargs
|
||||||
|
|
|
@ -402,7 +402,7 @@ async def open_history_client(
|
||||||
end_dt = pendulum.from_timestamp(array[-1]['time'])
|
end_dt = pendulum.from_timestamp(array[-1]['time'])
|
||||||
return array, start_dt, end_dt
|
return array, start_dt, end_dt
|
||||||
|
|
||||||
yield get_ohlc, {'erlangs': 3, 'rate': 3}
|
yield get_ohlc, {'erlangs': 4, 'rate': 4}
|
||||||
|
|
||||||
|
|
||||||
async def backfill_bars(
|
async def backfill_bars(
|
||||||
|
@ -452,8 +452,8 @@ async def stream_quotes(
|
||||||
|
|
||||||
# XXX: after manually inspecting the response format we
|
# XXX: after manually inspecting the response format we
|
||||||
# just directly pick out the info we need
|
# just directly pick out the info we need
|
||||||
si['price_tick_size'] = float(syminfo.filters[0]['tickSize'])
|
si['price_tick_size'] = syminfo.filters[0]['tickSize']
|
||||||
si['lot_tick_size'] = float(syminfo.filters[2]['stepSize'])
|
si['lot_tick_size'] = syminfo.filters[2]['stepSize']
|
||||||
si['asset_type'] = 'crypto'
|
si['asset_type'] = 'crypto'
|
||||||
|
|
||||||
symbol = symbols[0]
|
symbol = symbols[0]
|
||||||
|
|
|
@ -23,6 +23,7 @@ from operator import attrgetter
|
||||||
from operator import itemgetter
|
from operator import itemgetter
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
import pandas as pd
|
||||||
import trio
|
import trio
|
||||||
import tractor
|
import tractor
|
||||||
|
|
||||||
|
@ -46,10 +47,8 @@ _watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
|
||||||
@click.argument('kwargs', nargs=-1)
|
@click.argument('kwargs', nargs=-1)
|
||||||
@click.pass_obj
|
@click.pass_obj
|
||||||
def api(config, meth, kwargs, keys):
|
def api(config, meth, kwargs, keys):
|
||||||
'''
|
"""Make a broker-client API method call
|
||||||
Make a broker-client API method call
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
# global opts
|
# global opts
|
||||||
broker = config['brokers'][0]
|
broker = config['brokers'][0]
|
||||||
|
|
||||||
|
@ -80,13 +79,13 @@ def api(config, meth, kwargs, keys):
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
|
@click.option('--df-output', '-df', flag_value=True,
|
||||||
|
help='Output in `pandas.DataFrame` format')
|
||||||
@click.argument('tickers', nargs=-1, required=True)
|
@click.argument('tickers', nargs=-1, required=True)
|
||||||
@click.pass_obj
|
@click.pass_obj
|
||||||
def quote(config, tickers):
|
def quote(config, tickers, df_output):
|
||||||
'''
|
"""Print symbol quotes to the console
|
||||||
Print symbol quotes to the console
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
# global opts
|
# global opts
|
||||||
brokermod = config['brokermods'][0]
|
brokermod = config['brokermods'][0]
|
||||||
|
|
||||||
|
@ -101,19 +100,28 @@ def quote(config, tickers):
|
||||||
if ticker not in syms:
|
if ticker not in syms:
|
||||||
brokermod.log.warn(f"Could not find symbol {ticker}?")
|
brokermod.log.warn(f"Could not find symbol {ticker}?")
|
||||||
|
|
||||||
|
if df_output:
|
||||||
|
cols = next(filter(bool, quotes)).copy()
|
||||||
|
cols.pop('symbol')
|
||||||
|
df = pd.DataFrame(
|
||||||
|
(quote or {} for quote in quotes),
|
||||||
|
columns=cols,
|
||||||
|
)
|
||||||
|
click.echo(df)
|
||||||
|
else:
|
||||||
click.echo(colorize_json(quotes))
|
click.echo(colorize_json(quotes))
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
|
@click.option('--df-output', '-df', flag_value=True,
|
||||||
|
help='Output in `pandas.DataFrame` format')
|
||||||
@click.option('--count', '-c', default=1000,
|
@click.option('--count', '-c', default=1000,
|
||||||
help='Number of bars to retrieve')
|
help='Number of bars to retrieve')
|
||||||
@click.argument('symbol', required=True)
|
@click.argument('symbol', required=True)
|
||||||
@click.pass_obj
|
@click.pass_obj
|
||||||
def bars(config, symbol, count):
|
def bars(config, symbol, count, df_output):
|
||||||
'''
|
"""Retreive 1m bars for symbol and print on the console
|
||||||
Retreive 1m bars for symbol and print on the console
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
# global opts
|
# global opts
|
||||||
brokermod = config['brokermods'][0]
|
brokermod = config['brokermods'][0]
|
||||||
|
|
||||||
|
@ -125,7 +133,7 @@ def bars(config, symbol, count):
|
||||||
brokermod,
|
brokermod,
|
||||||
symbol,
|
symbol,
|
||||||
count=count,
|
count=count,
|
||||||
as_np=False,
|
as_np=df_output
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -133,6 +141,9 @@ def bars(config, symbol, count):
|
||||||
log.error(f"No quotes could be found for {symbol}?")
|
log.error(f"No quotes could be found for {symbol}?")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if df_output:
|
||||||
|
click.echo(pd.DataFrame(bars))
|
||||||
|
else:
|
||||||
click.echo(colorize_json(bars))
|
click.echo(colorize_json(bars))
|
||||||
|
|
||||||
|
|
||||||
|
@ -145,10 +156,8 @@ def bars(config, symbol, count):
|
||||||
@click.argument('name', nargs=1, required=True)
|
@click.argument('name', nargs=1, required=True)
|
||||||
@click.pass_obj
|
@click.pass_obj
|
||||||
def record(config, rate, name, dhost, filename):
|
def record(config, rate, name, dhost, filename):
|
||||||
'''
|
"""Record client side quotes to a file on disk
|
||||||
Record client side quotes to a file on disk
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
# global opts
|
# global opts
|
||||||
brokermod = config['brokermods'][0]
|
brokermod = config['brokermods'][0]
|
||||||
loglevel = config['loglevel']
|
loglevel = config['loglevel']
|
||||||
|
@ -186,10 +195,8 @@ def record(config, rate, name, dhost, filename):
|
||||||
@click.argument('symbol', required=True)
|
@click.argument('symbol', required=True)
|
||||||
@click.pass_context
|
@click.pass_context
|
||||||
def contracts(ctx, loglevel, broker, symbol, ids):
|
def contracts(ctx, loglevel, broker, symbol, ids):
|
||||||
'''
|
"""Get list of all option contracts for symbol
|
||||||
Get list of all option contracts for symbol
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
brokermod = get_brokermod(broker)
|
brokermod = get_brokermod(broker)
|
||||||
get_console_log(loglevel)
|
get_console_log(loglevel)
|
||||||
|
|
||||||
|
@ -206,14 +213,14 @@ def contracts(ctx, loglevel, broker, symbol, ids):
|
||||||
|
|
||||||
|
|
||||||
@cli.command()
|
@cli.command()
|
||||||
|
@click.option('--df-output', '-df', flag_value=True,
|
||||||
|
help='Output in `pandas.DataFrame` format')
|
||||||
@click.option('--date', '-d', help='Contracts expiry date')
|
@click.option('--date', '-d', help='Contracts expiry date')
|
||||||
@click.argument('symbol', required=True)
|
@click.argument('symbol', required=True)
|
||||||
@click.pass_obj
|
@click.pass_obj
|
||||||
def optsquote(config, symbol, date):
|
def optsquote(config, symbol, df_output, date):
|
||||||
'''
|
"""Retreive symbol option quotes on the console
|
||||||
Retreive symbol option quotes on the console
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
# global opts
|
# global opts
|
||||||
brokermod = config['brokermods'][0]
|
brokermod = config['brokermods'][0]
|
||||||
|
|
||||||
|
@ -226,6 +233,13 @@ def optsquote(config, symbol, date):
|
||||||
log.error(f"No option quotes could be found for {symbol}?")
|
log.error(f"No option quotes could be found for {symbol}?")
|
||||||
return
|
return
|
||||||
|
|
||||||
|
if df_output:
|
||||||
|
df = pd.DataFrame(
|
||||||
|
(quote.values() for quote in quotes),
|
||||||
|
columns=quotes[0].keys(),
|
||||||
|
)
|
||||||
|
click.echo(df)
|
||||||
|
else:
|
||||||
click.echo(colorize_json(quotes))
|
click.echo(colorize_json(quotes))
|
||||||
|
|
||||||
|
|
||||||
|
@ -233,10 +247,8 @@ def optsquote(config, symbol, date):
|
||||||
@click.argument('tickers', nargs=-1, required=True)
|
@click.argument('tickers', nargs=-1, required=True)
|
||||||
@click.pass_obj
|
@click.pass_obj
|
||||||
def symbol_info(config, tickers):
|
def symbol_info(config, tickers):
|
||||||
'''
|
"""Print symbol quotes to the console
|
||||||
Print symbol quotes to the console
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
# global opts
|
# global opts
|
||||||
brokermod = config['brokermods'][0]
|
brokermod = config['brokermods'][0]
|
||||||
|
|
||||||
|
@ -258,10 +270,8 @@ def symbol_info(config, tickers):
|
||||||
@click.argument('pattern', required=True)
|
@click.argument('pattern', required=True)
|
||||||
@click.pass_obj
|
@click.pass_obj
|
||||||
def search(config, pattern):
|
def search(config, pattern):
|
||||||
'''
|
"""Search for symbols from broker backend(s).
|
||||||
Search for symbols from broker backend(s).
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
# global opts
|
# global opts
|
||||||
brokermods = config['brokermods']
|
brokermods = config['brokermods']
|
||||||
|
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,67 +0,0 @@
|
||||||
# piker: trading gear for hackers
|
|
||||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Interactive Brokers API backend.
|
|
||||||
|
|
||||||
Sub-modules within break into the core functionalities:
|
|
||||||
|
|
||||||
- ``broker.py`` part for orders / trading endpoints
|
|
||||||
- ``data.py`` for real-time data feed endpoints
|
|
||||||
|
|
||||||
- ``client.py`` for the core API machinery which is ``trio``-ized
|
|
||||||
wrapping around ``ib_insync``.
|
|
||||||
|
|
||||||
- ``report.py`` for the hackery to build manual pp calcs
|
|
||||||
to avoid ib's absolute bullshit FIFO style position
|
|
||||||
tracking..
|
|
||||||
|
|
||||||
"""
|
|
||||||
from .api import (
|
|
||||||
get_client,
|
|
||||||
)
|
|
||||||
from .feed import (
|
|
||||||
open_history_client,
|
|
||||||
open_symbol_search,
|
|
||||||
stream_quotes,
|
|
||||||
)
|
|
||||||
from .broker import trades_dialogue
|
|
||||||
|
|
||||||
__all__ = [
|
|
||||||
'get_client',
|
|
||||||
'trades_dialogue',
|
|
||||||
'open_history_client',
|
|
||||||
'open_symbol_search',
|
|
||||||
'stream_quotes',
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# tractor RPC enable arg
|
|
||||||
__enable_modules__: list[str] = [
|
|
||||||
'api',
|
|
||||||
'feed',
|
|
||||||
'broker',
|
|
||||||
]
|
|
||||||
|
|
||||||
# passed to ``tractor.ActorNursery.start_actor()``
|
|
||||||
_spawn_kwargs = {
|
|
||||||
'infect_asyncio': True,
|
|
||||||
}
|
|
||||||
|
|
||||||
# annotation to let backend agnostic code
|
|
||||||
# know if ``brokerd`` should be spawned with
|
|
||||||
# ``tractor``'s aio mode.
|
|
||||||
_infect_asyncio: bool = True
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,590 +0,0 @@
|
||||||
# piker: trading gear for hackers
|
|
||||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
"""
|
|
||||||
Order and trades endpoints for use with ``piker``'s EMS.
|
|
||||||
|
|
||||||
"""
|
|
||||||
from __future__ import annotations
|
|
||||||
from dataclasses import asdict
|
|
||||||
from functools import partial
|
|
||||||
from pprint import pformat
|
|
||||||
import time
|
|
||||||
from typing import (
|
|
||||||
Any,
|
|
||||||
Optional,
|
|
||||||
AsyncIterator,
|
|
||||||
)
|
|
||||||
|
|
||||||
import trio
|
|
||||||
from trio_typing import TaskStatus
|
|
||||||
import tractor
|
|
||||||
from ib_insync.contract import (
|
|
||||||
Contract,
|
|
||||||
Option,
|
|
||||||
)
|
|
||||||
from ib_insync.order import (
|
|
||||||
Trade,
|
|
||||||
OrderStatus,
|
|
||||||
)
|
|
||||||
from ib_insync.objects import (
|
|
||||||
Fill,
|
|
||||||
Execution,
|
|
||||||
)
|
|
||||||
from ib_insync.objects import Position
|
|
||||||
|
|
||||||
from piker import config
|
|
||||||
from piker.log import get_console_log
|
|
||||||
from piker.clearing._messages import (
|
|
||||||
BrokerdOrder,
|
|
||||||
BrokerdOrderAck,
|
|
||||||
BrokerdStatus,
|
|
||||||
BrokerdPosition,
|
|
||||||
BrokerdCancel,
|
|
||||||
BrokerdFill,
|
|
||||||
BrokerdError,
|
|
||||||
)
|
|
||||||
from .api import (
|
|
||||||
_accounts2clients,
|
|
||||||
_adhoc_futes_set,
|
|
||||||
log,
|
|
||||||
get_config,
|
|
||||||
open_client_proxies,
|
|
||||||
Client,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def pack_position(
|
|
||||||
pos: Position
|
|
||||||
|
|
||||||
) -> dict[str, Any]:
|
|
||||||
con = pos.contract
|
|
||||||
|
|
||||||
if isinstance(con, Option):
|
|
||||||
# TODO: option symbol parsing and sane display:
|
|
||||||
symbol = con.localSymbol.replace(' ', '')
|
|
||||||
|
|
||||||
else:
|
|
||||||
# TODO: lookup fqsn even for derivs.
|
|
||||||
symbol = con.symbol.lower()
|
|
||||||
|
|
||||||
exch = (con.primaryExchange or con.exchange).lower()
|
|
||||||
symkey = '.'.join((symbol, exch))
|
|
||||||
if not exch:
|
|
||||||
# attempt to lookup the symbol from our
|
|
||||||
# hacked set..
|
|
||||||
for sym in _adhoc_futes_set:
|
|
||||||
if symbol in sym:
|
|
||||||
symkey = sym
|
|
||||||
break
|
|
||||||
|
|
||||||
expiry = con.lastTradeDateOrContractMonth
|
|
||||||
if expiry:
|
|
||||||
symkey += f'.{expiry}'
|
|
||||||
|
|
||||||
# TODO: options contracts into a sane format..
|
|
||||||
|
|
||||||
return BrokerdPosition(
|
|
||||||
broker='ib',
|
|
||||||
account=pos.account,
|
|
||||||
symbol=symkey,
|
|
||||||
currency=con.currency,
|
|
||||||
size=float(pos.position),
|
|
||||||
avg_price=float(pos.avgCost) / float(con.multiplier or 1.0),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def handle_order_requests(
|
|
||||||
|
|
||||||
ems_order_stream: tractor.MsgStream,
|
|
||||||
accounts_def: dict[str, str],
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
|
|
||||||
request_msg: dict
|
|
||||||
async for request_msg in ems_order_stream:
|
|
||||||
log.info(f'Received order request {request_msg}')
|
|
||||||
|
|
||||||
action = request_msg['action']
|
|
||||||
account = request_msg['account']
|
|
||||||
|
|
||||||
acct_number = accounts_def.get(account)
|
|
||||||
if not acct_number:
|
|
||||||
log.error(
|
|
||||||
f'An IB account number for name {account} is not found?\n'
|
|
||||||
'Make sure you have all TWS and GW instances running.'
|
|
||||||
)
|
|
||||||
await ems_order_stream.send(BrokerdError(
|
|
||||||
oid=request_msg['oid'],
|
|
||||||
symbol=request_msg['symbol'],
|
|
||||||
reason=f'No account found: `{account}` ?',
|
|
||||||
).dict())
|
|
||||||
continue
|
|
||||||
|
|
||||||
client = _accounts2clients.get(account)
|
|
||||||
if not client:
|
|
||||||
log.error(
|
|
||||||
f'An IB client for account name {account} is not found.\n'
|
|
||||||
'Make sure you have all TWS and GW instances running.'
|
|
||||||
)
|
|
||||||
await ems_order_stream.send(BrokerdError(
|
|
||||||
oid=request_msg['oid'],
|
|
||||||
symbol=request_msg['symbol'],
|
|
||||||
reason=f'No api client loaded for account: `{account}` ?',
|
|
||||||
).dict())
|
|
||||||
continue
|
|
||||||
|
|
||||||
if action in {'buy', 'sell'}:
|
|
||||||
# validate
|
|
||||||
order = BrokerdOrder(**request_msg)
|
|
||||||
|
|
||||||
# call our client api to submit the order
|
|
||||||
reqid = client.submit_limit(
|
|
||||||
oid=order.oid,
|
|
||||||
symbol=order.symbol,
|
|
||||||
price=order.price,
|
|
||||||
action=order.action,
|
|
||||||
size=order.size,
|
|
||||||
account=acct_number,
|
|
||||||
|
|
||||||
# XXX: by default 0 tells ``ib_insync`` methods that
|
|
||||||
# there is no existing order so ask the client to create
|
|
||||||
# a new one (which it seems to do by allocating an int
|
|
||||||
# counter - collision prone..)
|
|
||||||
reqid=order.reqid,
|
|
||||||
)
|
|
||||||
if reqid is None:
|
|
||||||
await ems_order_stream.send(BrokerdError(
|
|
||||||
oid=request_msg['oid'],
|
|
||||||
symbol=request_msg['symbol'],
|
|
||||||
reason='Order already active?',
|
|
||||||
).dict())
|
|
||||||
|
|
||||||
# deliver ack that order has been submitted to broker routing
|
|
||||||
await ems_order_stream.send(
|
|
||||||
BrokerdOrderAck(
|
|
||||||
# ems order request id
|
|
||||||
oid=order.oid,
|
|
||||||
# broker specific request id
|
|
||||||
reqid=reqid,
|
|
||||||
time_ns=time.time_ns(),
|
|
||||||
account=account,
|
|
||||||
).dict()
|
|
||||||
)
|
|
||||||
|
|
||||||
elif action == 'cancel':
|
|
||||||
msg = BrokerdCancel(**request_msg)
|
|
||||||
client.submit_cancel(reqid=msg.reqid)
|
|
||||||
|
|
||||||
else:
|
|
||||||
log.error(f'Unknown order command: {request_msg}')
|
|
||||||
|
|
||||||
|
|
||||||
async def recv_trade_updates(
|
|
||||||
|
|
||||||
client: Client,
|
|
||||||
to_trio: trio.abc.SendChannel,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
"""Stream a ticker using the std L1 api.
|
|
||||||
"""
|
|
||||||
client.inline_errors(to_trio)
|
|
||||||
|
|
||||||
# sync with trio task
|
|
||||||
to_trio.send_nowait(None)
|
|
||||||
|
|
||||||
def push_tradesies(eventkit_obj, obj, fill=None):
|
|
||||||
"""Push events to trio task.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if fill is not None:
|
|
||||||
# execution details event
|
|
||||||
item = ('fill', (obj, fill))
|
|
||||||
|
|
||||||
elif eventkit_obj.name() == 'positionEvent':
|
|
||||||
item = ('position', obj)
|
|
||||||
|
|
||||||
else:
|
|
||||||
item = ('status', obj)
|
|
||||||
|
|
||||||
log.info(f'eventkit event ->\n{pformat(item)}')
|
|
||||||
|
|
||||||
try:
|
|
||||||
to_trio.send_nowait(item)
|
|
||||||
except trio.BrokenResourceError:
|
|
||||||
log.exception(f'Disconnected from {eventkit_obj} updates')
|
|
||||||
eventkit_obj.disconnect(push_tradesies)
|
|
||||||
|
|
||||||
# hook up to the weird eventkit object - event stream api
|
|
||||||
for ev_name in [
|
|
||||||
'orderStatusEvent', # all order updates
|
|
||||||
'execDetailsEvent', # all "fill" updates
|
|
||||||
'positionEvent', # avg price updates per symbol per account
|
|
||||||
|
|
||||||
# 'commissionReportEvent',
|
|
||||||
# XXX: ugh, it is a separate event from IB and it's
|
|
||||||
# emitted as follows:
|
|
||||||
# self.ib.commissionReportEvent.emit(trade, fill, report)
|
|
||||||
|
|
||||||
# XXX: not sure yet if we need these
|
|
||||||
# 'updatePortfolioEvent',
|
|
||||||
|
|
||||||
# XXX: these all seem to be weird ib_insync intrernal
|
|
||||||
# events that we probably don't care that much about
|
|
||||||
# given the internal design is wonky af..
|
|
||||||
# 'newOrderEvent',
|
|
||||||
# 'orderModifyEvent',
|
|
||||||
# 'cancelOrderEvent',
|
|
||||||
# 'openOrderEvent',
|
|
||||||
]:
|
|
||||||
eventkit_obj = getattr(client.ib, ev_name)
|
|
||||||
handler = partial(push_tradesies, eventkit_obj)
|
|
||||||
eventkit_obj.connect(handler)
|
|
||||||
|
|
||||||
# let the engine run and stream
|
|
||||||
await client.ib.disconnectedEvent
|
|
||||||
|
|
||||||
|
|
||||||
@tractor.context
|
|
||||||
async def trades_dialogue(
|
|
||||||
|
|
||||||
ctx: tractor.Context,
|
|
||||||
loglevel: str = None,
|
|
||||||
|
|
||||||
) -> AsyncIterator[dict[str, Any]]:
|
|
||||||
|
|
||||||
# XXX: required to propagate ``tractor`` loglevel to piker logging
|
|
||||||
get_console_log(loglevel or tractor.current_actor().loglevel)
|
|
||||||
|
|
||||||
accounts_def = config.load_accounts(['ib'])
|
|
||||||
|
|
||||||
global _client_cache
|
|
||||||
|
|
||||||
# deliver positions to subscriber before anything else
|
|
||||||
all_positions = []
|
|
||||||
accounts = set()
|
|
||||||
clients: list[tuple[Client, trio.MemoryReceiveChannel]] = []
|
|
||||||
|
|
||||||
async with (
|
|
||||||
trio.open_nursery() as nurse,
|
|
||||||
open_client_proxies() as (proxies, aioclients),
|
|
||||||
):
|
|
||||||
for account, proxy in proxies.items():
|
|
||||||
|
|
||||||
client = aioclients[account]
|
|
||||||
|
|
||||||
async def open_stream(
|
|
||||||
task_status: TaskStatus[
|
|
||||||
trio.abc.ReceiveChannel
|
|
||||||
] = trio.TASK_STATUS_IGNORED,
|
|
||||||
):
|
|
||||||
# each api client has a unique event stream
|
|
||||||
async with tractor.to_asyncio.open_channel_from(
|
|
||||||
recv_trade_updates,
|
|
||||||
client=client,
|
|
||||||
) as (first, trade_event_stream):
|
|
||||||
|
|
||||||
task_status.started(trade_event_stream)
|
|
||||||
await trio.sleep_forever()
|
|
||||||
|
|
||||||
trade_event_stream = await nurse.start(open_stream)
|
|
||||||
|
|
||||||
clients.append((client, trade_event_stream))
|
|
||||||
|
|
||||||
assert account in accounts_def
|
|
||||||
accounts.add(account)
|
|
||||||
|
|
||||||
for client in aioclients.values():
|
|
||||||
for pos in client.positions():
|
|
||||||
|
|
||||||
msg = pack_position(pos)
|
|
||||||
msg.account = accounts_def.inverse[msg.account]
|
|
||||||
|
|
||||||
assert msg.account in accounts, (
|
|
||||||
f'Position for unknown account: {msg.account}')
|
|
||||||
|
|
||||||
all_positions.append(msg.dict())
|
|
||||||
|
|
||||||
trades: list[dict] = []
|
|
||||||
for proxy in proxies.values():
|
|
||||||
trades.append(await proxy.trades())
|
|
||||||
|
|
||||||
log.info(f'Loaded {len(trades)} from this session')
|
|
||||||
# TODO: write trades to local ``trades.toml``
|
|
||||||
# - use above per-session trades data and write to local file
|
|
||||||
# - get the "flex reports" working and pull historical data and
|
|
||||||
# also save locally.
|
|
||||||
|
|
||||||
await ctx.started((
|
|
||||||
all_positions,
|
|
||||||
tuple(name for name in accounts_def if name in accounts),
|
|
||||||
))
|
|
||||||
|
|
||||||
async with (
|
|
||||||
ctx.open_stream() as ems_stream,
|
|
||||||
trio.open_nursery() as n,
|
|
||||||
):
|
|
||||||
# start order request handler **before** local trades event loop
|
|
||||||
n.start_soon(handle_order_requests, ems_stream, accounts_def)
|
|
||||||
|
|
||||||
# allocate event relay tasks for each client connection
|
|
||||||
for client, stream in clients:
|
|
||||||
n.start_soon(
|
|
||||||
deliver_trade_events,
|
|
||||||
stream,
|
|
||||||
ems_stream,
|
|
||||||
accounts_def
|
|
||||||
)
|
|
||||||
|
|
||||||
# block until cancelled
|
|
||||||
await trio.sleep_forever()
|
|
||||||
|
|
||||||
|
|
||||||
async def deliver_trade_events(
|
|
||||||
|
|
||||||
trade_event_stream: trio.MemoryReceiveChannel,
|
|
||||||
ems_stream: tractor.MsgStream,
|
|
||||||
accounts_def: dict[str, str],
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
'''Format and relay all trade events for a given client to the EMS.
|
|
||||||
|
|
||||||
'''
|
|
||||||
action_map = {'BOT': 'buy', 'SLD': 'sell'}
|
|
||||||
|
|
||||||
# TODO: for some reason we can receive a ``None`` here when the
|
|
||||||
# ib-gw goes down? Not sure exactly how that's happening looking
|
|
||||||
# at the eventkit code above but we should probably handle it...
|
|
||||||
async for event_name, item in trade_event_stream:
|
|
||||||
|
|
||||||
log.info(f'ib sending {event_name}:\n{pformat(item)}')
|
|
||||||
|
|
||||||
# TODO: templating the ib statuses in comparison with other
|
|
||||||
# brokers is likely the way to go:
|
|
||||||
# https://interactivebrokers.github.io/tws-api/interfaceIBApi_1_1EWrapper.html#a17f2a02d6449710b6394d0266a353313
|
|
||||||
# short list:
|
|
||||||
# - PendingSubmit
|
|
||||||
# - PendingCancel
|
|
||||||
# - PreSubmitted (simulated orders)
|
|
||||||
# - ApiCancelled (cancelled by client before submission
|
|
||||||
# to routing)
|
|
||||||
# - Cancelled
|
|
||||||
# - Filled
|
|
||||||
# - Inactive (reject or cancelled but not by trader)
|
|
||||||
|
|
||||||
# XXX: here's some other sucky cases from the api
|
|
||||||
# - short-sale but securities haven't been located, in this
|
|
||||||
# case we should probably keep the order in some kind of
|
|
||||||
# weird state or cancel it outright?
|
|
||||||
|
|
||||||
# status='PendingSubmit', message=''),
|
|
||||||
# status='Cancelled', message='Error 404,
|
|
||||||
# reqId 1550: Order held while securities are located.'),
|
|
||||||
# status='PreSubmitted', message='')],
|
|
||||||
|
|
||||||
if event_name == 'status':
|
|
||||||
|
|
||||||
# XXX: begin normalization of nonsense ib_insync internal
|
|
||||||
# object-state tracking representations...
|
|
||||||
|
|
||||||
# unwrap needed data from ib_insync internal types
|
|
||||||
trade: Trade = item
|
|
||||||
status: OrderStatus = trade.orderStatus
|
|
||||||
|
|
||||||
# skip duplicate filled updates - we get the deats
|
|
||||||
# from the execution details event
|
|
||||||
msg = BrokerdStatus(
|
|
||||||
|
|
||||||
reqid=trade.order.orderId,
|
|
||||||
time_ns=time.time_ns(), # cuz why not
|
|
||||||
account=accounts_def.inverse[trade.order.account],
|
|
||||||
|
|
||||||
# everyone doin camel case..
|
|
||||||
status=status.status.lower(), # force lower case
|
|
||||||
|
|
||||||
filled=status.filled,
|
|
||||||
reason=status.whyHeld,
|
|
||||||
|
|
||||||
# this seems to not be necessarily up to date in the
|
|
||||||
# execDetails event.. so we have to send it here I guess?
|
|
||||||
remaining=status.remaining,
|
|
||||||
|
|
||||||
broker_details={'name': 'ib'},
|
|
||||||
)
|
|
||||||
|
|
||||||
elif event_name == 'fill':
|
|
||||||
|
|
||||||
# for wtv reason this is a separate event type
|
|
||||||
# from IB, not sure why it's needed other then for extra
|
|
||||||
# complexity and over-engineering :eyeroll:.
|
|
||||||
# we may just end up dropping these events (or
|
|
||||||
# translating them to ``Status`` msgs) if we can
|
|
||||||
# show the equivalent status events are no more latent.
|
|
||||||
|
|
||||||
# unpack ib_insync types
|
|
||||||
# pep-0526 style:
|
|
||||||
# https://www.python.org/dev/peps/pep-0526/#global-and-local-variable-annotations
|
|
||||||
trade: Trade
|
|
||||||
fill: Fill
|
|
||||||
trade, fill = item
|
|
||||||
execu: Execution = fill.execution
|
|
||||||
|
|
||||||
# TODO: normalize out commissions details?
|
|
||||||
details = {
|
|
||||||
'contract': asdict(fill.contract),
|
|
||||||
'execution': asdict(fill.execution),
|
|
||||||
'commissions': asdict(fill.commissionReport),
|
|
||||||
'broker_time': execu.time, # supposedly server fill time
|
|
||||||
'name': 'ib',
|
|
||||||
}
|
|
||||||
|
|
||||||
msg = BrokerdFill(
|
|
||||||
# should match the value returned from `.submit_limit()`
|
|
||||||
reqid=execu.orderId,
|
|
||||||
time_ns=time.time_ns(), # cuz why not
|
|
||||||
|
|
||||||
action=action_map[execu.side],
|
|
||||||
size=execu.shares,
|
|
||||||
price=execu.price,
|
|
||||||
|
|
||||||
broker_details=details,
|
|
||||||
# XXX: required by order mode currently
|
|
||||||
broker_time=details['broker_time'],
|
|
||||||
|
|
||||||
)
|
|
||||||
|
|
||||||
elif event_name == 'error':
|
|
||||||
|
|
||||||
err: dict = item
|
|
||||||
|
|
||||||
# f$#$% gawd dammit insync..
|
|
||||||
con = err['contract']
|
|
||||||
if isinstance(con, Contract):
|
|
||||||
err['contract'] = asdict(con)
|
|
||||||
|
|
||||||
if err['reqid'] == -1:
|
|
||||||
log.error(f'TWS external order error:\n{pformat(err)}')
|
|
||||||
|
|
||||||
# TODO: what schema for this msg if we're going to make it
|
|
||||||
# portable across all backends?
|
|
||||||
# msg = BrokerdError(**err)
|
|
||||||
continue
|
|
||||||
|
|
||||||
elif event_name == 'position':
|
|
||||||
msg = pack_position(item)
|
|
||||||
msg.account = accounts_def.inverse[msg.account]
|
|
||||||
|
|
||||||
elif event_name == 'event':
|
|
||||||
|
|
||||||
# it's either a general system status event or an external
|
|
||||||
# trade event?
|
|
||||||
log.info(f"TWS system status: \n{pformat(item)}")
|
|
||||||
|
|
||||||
# TODO: support this again but needs parsing at the callback
|
|
||||||
# level...
|
|
||||||
# reqid = item.get('reqid', 0)
|
|
||||||
# if getattr(msg, 'reqid', 0) < -1:
|
|
||||||
# log.info(f"TWS triggered trade\n{pformat(msg.dict())}")
|
|
||||||
|
|
||||||
continue
|
|
||||||
|
|
||||||
# msg.reqid = 'tws-' + str(-1 * reqid)
|
|
||||||
|
|
||||||
# mark msg as from "external system"
|
|
||||||
# TODO: probably something better then this.. and start
|
|
||||||
# considering multiplayer/group trades tracking
|
|
||||||
# msg.broker_details['external_src'] = 'tws'
|
|
||||||
|
|
||||||
# XXX: we always serialize to a dict for msgpack
|
|
||||||
# translations, ideally we can move to an msgspec (or other)
|
|
||||||
# encoder # that can be enabled in ``tractor`` ahead of
|
|
||||||
# time so we can pass through the message types directly.
|
|
||||||
await ems_stream.send(msg.dict())
|
|
||||||
|
|
||||||
|
|
||||||
def load_flex_trades(
|
|
||||||
path: Optional[str] = None,
|
|
||||||
|
|
||||||
) -> dict[str, str]:
|
|
||||||
|
|
||||||
from pprint import pprint
|
|
||||||
from ib_insync import flexreport, util
|
|
||||||
|
|
||||||
conf = get_config()
|
|
||||||
|
|
||||||
if not path:
|
|
||||||
# load ``brokers.toml`` and try to get the flex
|
|
||||||
# token and query id that must be previously defined
|
|
||||||
# by the user.
|
|
||||||
token = conf.get('flex_token')
|
|
||||||
if not token:
|
|
||||||
raise ValueError(
|
|
||||||
'You must specify a ``flex_token`` field in your'
|
|
||||||
'`brokers.toml` in order load your trade log, see our'
|
|
||||||
'intructions for how to set this up here:\n'
|
|
||||||
'PUT LINK HERE!'
|
|
||||||
)
|
|
||||||
|
|
||||||
qid = conf['flex_trades_query_id']
|
|
||||||
|
|
||||||
# TODO: hack this into our logging
|
|
||||||
# system like we do with the API client..
|
|
||||||
util.logToConsole()
|
|
||||||
|
|
||||||
# TODO: rewrite the query part of this with async..httpx?
|
|
||||||
report = flexreport.FlexReport(
|
|
||||||
token=token,
|
|
||||||
queryId=qid,
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
|
||||||
# XXX: another project we could potentially look at,
|
|
||||||
# https://pypi.org/project/ibflex/
|
|
||||||
report = flexreport.FlexReport(path=path)
|
|
||||||
|
|
||||||
trade_entries = report.extract('Trade')
|
|
||||||
trades = {
|
|
||||||
# XXX: LOL apparently ``toml`` has a bug
|
|
||||||
# where a section key error will show up in the write
|
|
||||||
# if you leave this as an ``int``?
|
|
||||||
str(t.__dict__['tradeID']): t.__dict__
|
|
||||||
for t in trade_entries
|
|
||||||
}
|
|
||||||
|
|
||||||
ln = len(trades)
|
|
||||||
log.info(f'Loaded {ln} trades from flex query')
|
|
||||||
|
|
||||||
trades_by_account = {}
|
|
||||||
for tid, trade in trades.items():
|
|
||||||
trades_by_account.setdefault(
|
|
||||||
# oddly for some so-called "BookTrade" entries
|
|
||||||
# this field seems to be blank, no cuckin clue.
|
|
||||||
# trade['ibExecID']
|
|
||||||
str(trade['accountId']), {}
|
|
||||||
)[tid] = trade
|
|
||||||
|
|
||||||
section = {'ib': trades_by_account}
|
|
||||||
pprint(section)
|
|
||||||
|
|
||||||
# TODO: load the config first and append in
|
|
||||||
# the new trades loaded here..
|
|
||||||
try:
|
|
||||||
config.write(section, 'trades')
|
|
||||||
except KeyError:
|
|
||||||
import pdbpp; pdbpp.set_trace() # noqa
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
load_flex_trades()
|
|
|
@ -1,938 +0,0 @@
|
||||||
# piker: trading gear for hackers
|
|
||||||
# Copyright (C) Tyler Goodlet (in stewardship for pikers)
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
"""
|
|
||||||
Data feed endpoints pre-wrapped and ready for use with ``tractor``/``trio``.
|
|
||||||
|
|
||||||
"""
|
|
||||||
from __future__ import annotations
|
|
||||||
import asyncio
|
|
||||||
from contextlib import asynccontextmanager as acm
|
|
||||||
from dataclasses import asdict
|
|
||||||
from datetime import datetime
|
|
||||||
from math import isnan
|
|
||||||
import time
|
|
||||||
from typing import (
|
|
||||||
Callable,
|
|
||||||
Optional,
|
|
||||||
Awaitable,
|
|
||||||
)
|
|
||||||
|
|
||||||
from async_generator import aclosing
|
|
||||||
from fuzzywuzzy import process as fuzzy
|
|
||||||
import numpy as np
|
|
||||||
import pendulum
|
|
||||||
import tractor
|
|
||||||
import trio
|
|
||||||
from trio_typing import TaskStatus
|
|
||||||
|
|
||||||
from piker.data._sharedmem import ShmArray
|
|
||||||
from .._util import SymbolNotFound, NoData
|
|
||||||
from .api import (
|
|
||||||
_adhoc_futes_set,
|
|
||||||
log,
|
|
||||||
load_aio_clients,
|
|
||||||
ibis,
|
|
||||||
MethodProxy,
|
|
||||||
open_client_proxies,
|
|
||||||
get_preferred_data_client,
|
|
||||||
Ticker,
|
|
||||||
RequestError,
|
|
||||||
Contract,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# https://interactivebrokers.github.io/tws-api/tick_types.html
|
|
||||||
tick_types = {
|
|
||||||
77: 'trade',
|
|
||||||
|
|
||||||
# a "utrade" aka an off exchange "unreportable" (dark) vlm:
|
|
||||||
# https://interactivebrokers.github.io/tws-api/tick_types.html#rt_volume
|
|
||||||
48: 'dark_trade',
|
|
||||||
|
|
||||||
# standard L1 ticks
|
|
||||||
0: 'bsize',
|
|
||||||
1: 'bid',
|
|
||||||
2: 'ask',
|
|
||||||
3: 'asize',
|
|
||||||
4: 'last',
|
|
||||||
5: 'size',
|
|
||||||
8: 'volume',
|
|
||||||
|
|
||||||
# ``ib_insync`` already packs these into
|
|
||||||
# quotes under the following fields.
|
|
||||||
# 55: 'trades_per_min', # `'tradeRate'`
|
|
||||||
# 56: 'vlm_per_min', # `'volumeRate'`
|
|
||||||
# 89: 'shortable', # `'shortableShares'`
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@acm
|
|
||||||
async def open_data_client() -> MethodProxy:
|
|
||||||
'''
|
|
||||||
Open the first found preferred "data client" as defined in the
|
|
||||||
user's ``brokers.toml`` in the ``ib.prefer_data_account`` variable
|
|
||||||
and deliver that client wrapped in a ``MethodProxy``.
|
|
||||||
|
|
||||||
'''
|
|
||||||
async with (
|
|
||||||
open_client_proxies() as (proxies, clients),
|
|
||||||
):
|
|
||||||
account_name, client = get_preferred_data_client(clients)
|
|
||||||
proxy = proxies.get(f'ib.{account_name}')
|
|
||||||
if not proxy:
|
|
||||||
raise ValueError(
|
|
||||||
f'No preferred data client could be found for {account_name}!'
|
|
||||||
)
|
|
||||||
|
|
||||||
yield proxy
|
|
||||||
|
|
||||||
|
|
||||||
@acm
|
|
||||||
async def open_history_client(
|
|
||||||
symbol: str,
|
|
||||||
|
|
||||||
) -> tuple[Callable, int]:
|
|
||||||
'''
|
|
||||||
History retreival endpoint - delivers a historical frame callble
|
|
||||||
that takes in ``pendulum.datetime`` and returns ``numpy`` arrays.
|
|
||||||
|
|
||||||
'''
|
|
||||||
async with open_data_client() as proxy:
|
|
||||||
|
|
||||||
async def get_hist(
|
|
||||||
end_dt: Optional[datetime] = None,
|
|
||||||
start_dt: Optional[datetime] = None,
|
|
||||||
|
|
||||||
) -> tuple[np.ndarray, str]:
|
|
||||||
|
|
||||||
out, fails = await get_bars(proxy, symbol, end_dt=end_dt)
|
|
||||||
|
|
||||||
# TODO: add logic here to handle tradable hours and only grab
|
|
||||||
# valid bars in the range
|
|
||||||
if out is None:
|
|
||||||
# could be trying to retreive bars over weekend
|
|
||||||
log.error(f"Can't grab bars starting at {end_dt}!?!?")
|
|
||||||
raise NoData(
|
|
||||||
f'{end_dt}',
|
|
||||||
frame_size=2000,
|
|
||||||
)
|
|
||||||
|
|
||||||
bars, bars_array, first_dt, last_dt = out
|
|
||||||
|
|
||||||
# volume cleaning since there's -ve entries,
|
|
||||||
# wood luv to know what crookery that is..
|
|
||||||
vlm = bars_array['volume']
|
|
||||||
vlm[vlm < 0] = 0
|
|
||||||
|
|
||||||
return bars_array, first_dt, last_dt
|
|
||||||
|
|
||||||
# TODO: it seems like we can do async queries for ohlc
|
|
||||||
# but getting the order right still isn't working and I'm not
|
|
||||||
# quite sure why.. needs some tinkering and probably
|
|
||||||
# a lookthrough of the ``ib_insync`` machinery, for eg. maybe
|
|
||||||
# we have to do the batch queries on the `asyncio` side?
|
|
||||||
yield get_hist, {'erlangs': 1, 'rate': 6}
|
|
||||||
|
|
||||||
|
|
||||||
_pacing: str = (
|
|
||||||
'Historical Market Data Service error '
|
|
||||||
'message:Historical data request pacing violation'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
async def get_bars(
|
|
||||||
|
|
||||||
proxy: MethodProxy,
|
|
||||||
fqsn: str,
|
|
||||||
|
|
||||||
# blank to start which tells ib to look up the latest datum
|
|
||||||
end_dt: str = '',
|
|
||||||
|
|
||||||
) -> (dict, np.ndarray):
|
|
||||||
'''
|
|
||||||
Retrieve historical data from a ``trio``-side task using
|
|
||||||
a ``MethoProxy``.
|
|
||||||
|
|
||||||
'''
|
|
||||||
fails = 0
|
|
||||||
bars: Optional[list] = None
|
|
||||||
first_dt: datetime = None
|
|
||||||
last_dt: datetime = None
|
|
||||||
|
|
||||||
if end_dt:
|
|
||||||
last_dt = pendulum.from_timestamp(end_dt.timestamp())
|
|
||||||
|
|
||||||
for _ in range(10):
|
|
||||||
try:
|
|
||||||
out = await proxy.bars(
|
|
||||||
fqsn=fqsn,
|
|
||||||
end_dt=end_dt,
|
|
||||||
)
|
|
||||||
if out:
|
|
||||||
bars, bars_array = out
|
|
||||||
|
|
||||||
else:
|
|
||||||
await tractor.breakpoint()
|
|
||||||
|
|
||||||
if bars_array is None:
|
|
||||||
raise SymbolNotFound(fqsn)
|
|
||||||
|
|
||||||
first_dt = pendulum.from_timestamp(
|
|
||||||
bars[0].date.timestamp())
|
|
||||||
|
|
||||||
last_dt = pendulum.from_timestamp(
|
|
||||||
bars[-1].date.timestamp())
|
|
||||||
|
|
||||||
time = bars_array['time']
|
|
||||||
assert time[-1] == last_dt.timestamp()
|
|
||||||
assert time[0] == first_dt.timestamp()
|
|
||||||
log.info(
|
|
||||||
f'{len(bars)} bars retreived for {first_dt} -> {last_dt}'
|
|
||||||
)
|
|
||||||
|
|
||||||
return (bars, bars_array, first_dt, last_dt), fails
|
|
||||||
|
|
||||||
except RequestError as err:
|
|
||||||
msg = err.message
|
|
||||||
# why do we always need to rebind this?
|
|
||||||
# _err = err
|
|
||||||
|
|
||||||
if 'No market data permissions for' in msg:
|
|
||||||
# TODO: signalling for no permissions searches
|
|
||||||
raise NoData(
|
|
||||||
f'Symbol: {fqsn}',
|
|
||||||
)
|
|
||||||
|
|
||||||
elif (
|
|
||||||
err.code == 162
|
|
||||||
and 'HMDS query returned no data' in err.message
|
|
||||||
):
|
|
||||||
# XXX: this is now done in the storage mgmt layer
|
|
||||||
# and we shouldn't implicitly decrement the frame dt
|
|
||||||
# index since the upper layer may be doing so
|
|
||||||
# concurrently and we don't want to be delivering frames
|
|
||||||
# that weren't asked for.
|
|
||||||
log.warning(
|
|
||||||
f'NO DATA found ending @ {end_dt}\n'
|
|
||||||
)
|
|
||||||
|
|
||||||
# try to decrement start point and look further back
|
|
||||||
# end_dt = last_dt = last_dt.subtract(seconds=2000)
|
|
||||||
|
|
||||||
raise NoData(
|
|
||||||
f'Symbol: {fqsn}',
|
|
||||||
frame_size=2000,
|
|
||||||
)
|
|
||||||
|
|
||||||
elif _pacing in msg:
|
|
||||||
|
|
||||||
log.warning(
|
|
||||||
'History throttle rate reached!\n'
|
|
||||||
'Resetting farms with `ctrl-alt-f` hack\n'
|
|
||||||
)
|
|
||||||
# TODO: we might have to put a task lock around this
|
|
||||||
# method..
|
|
||||||
hist_ev = proxy.status_event(
|
|
||||||
'HMDS data farm connection is OK:ushmds'
|
|
||||||
)
|
|
||||||
|
|
||||||
# XXX: other event messages we might want to try and
|
|
||||||
# wait for but i wasn't able to get any of this
|
|
||||||
# reliable..
|
|
||||||
# reconnect_start = proxy.status_event(
|
|
||||||
# 'Market data farm is connecting:usfuture'
|
|
||||||
# )
|
|
||||||
# live_ev = proxy.status_event(
|
|
||||||
# 'Market data farm connection is OK:usfuture'
|
|
||||||
# )
|
|
||||||
|
|
||||||
# try to wait on the reset event(s) to arrive, a timeout
|
|
||||||
# will trigger a retry up to 6 times (for now).
|
|
||||||
tries: int = 2
|
|
||||||
timeout: float = 10
|
|
||||||
|
|
||||||
# try 3 time with a data reset then fail over to
|
|
||||||
# a connection reset.
|
|
||||||
for i in range(1, tries):
|
|
||||||
|
|
||||||
log.warning('Sending DATA RESET request')
|
|
||||||
await data_reset_hack(reset_type='data')
|
|
||||||
|
|
||||||
with trio.move_on_after(timeout) as cs:
|
|
||||||
for name, ev in [
|
|
||||||
# TODO: not sure if waiting on other events
|
|
||||||
# is all that useful here or not. in theory
|
|
||||||
# you could wait on one of the ones above
|
|
||||||
# first to verify the reset request was
|
|
||||||
# sent?
|
|
||||||
('history', hist_ev),
|
|
||||||
]:
|
|
||||||
await ev.wait()
|
|
||||||
log.info(f"{name} DATA RESET")
|
|
||||||
break
|
|
||||||
|
|
||||||
if cs.cancelled_caught:
|
|
||||||
fails += 1
|
|
||||||
log.warning(
|
|
||||||
f'Data reset {name} timeout, retrying {i}.'
|
|
||||||
)
|
|
||||||
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
|
|
||||||
log.warning('Sending CONNECTION RESET')
|
|
||||||
await data_reset_hack(reset_type='connection')
|
|
||||||
|
|
||||||
with trio.move_on_after(timeout) as cs:
|
|
||||||
for name, ev in [
|
|
||||||
# TODO: not sure if waiting on other events
|
|
||||||
# is all that useful here or not. in theory
|
|
||||||
# you could wait on one of the ones above
|
|
||||||
# first to verify the reset request was
|
|
||||||
# sent?
|
|
||||||
('history', hist_ev),
|
|
||||||
]:
|
|
||||||
await ev.wait()
|
|
||||||
log.info(f"{name} DATA RESET")
|
|
||||||
|
|
||||||
if cs.cancelled_caught:
|
|
||||||
fails += 1
|
|
||||||
log.warning('Data CONNECTION RESET timeout!?')
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
return None, None
|
|
||||||
# else: # throttle wasn't fixed so error out immediately
|
|
||||||
# raise _err
|
|
||||||
|
|
||||||
|
|
||||||
async def backfill_bars(
|
|
||||||
|
|
||||||
fqsn: str,
|
|
||||||
shm: ShmArray, # type: ignore # noqa
|
|
||||||
|
|
||||||
# TODO: we want to avoid overrunning the underlying shm array buffer
|
|
||||||
# and we should probably calc the number of calls to make depending
|
|
||||||
# on that until we have the `marketstore` daemon in place in which
|
|
||||||
# case the shm size will be driven by user config and available sys
|
|
||||||
# memory.
|
|
||||||
count: int = 16,
|
|
||||||
|
|
||||||
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
Fill historical bars into shared mem / storage afap.
|
|
||||||
|
|
||||||
TODO: avoid pacing constraints:
|
|
||||||
https://github.com/pikers/piker/issues/128
|
|
||||||
|
|
||||||
'''
|
|
||||||
# last_dt1 = None
|
|
||||||
last_dt = None
|
|
||||||
|
|
||||||
with trio.CancelScope() as cs:
|
|
||||||
|
|
||||||
async with open_data_client() as proxy:
|
|
||||||
|
|
||||||
out, fails = await get_bars(proxy, fqsn)
|
|
||||||
|
|
||||||
if out is None:
|
|
||||||
raise RuntimeError("Could not pull currrent history?!")
|
|
||||||
|
|
||||||
(first_bars, bars_array, first_dt, last_dt) = out
|
|
||||||
vlm = bars_array['volume']
|
|
||||||
vlm[vlm < 0] = 0
|
|
||||||
last_dt = first_dt
|
|
||||||
|
|
||||||
# write historical data to buffer
|
|
||||||
shm.push(bars_array)
|
|
||||||
|
|
||||||
task_status.started(cs)
|
|
||||||
|
|
||||||
i = 0
|
|
||||||
while i < count:
|
|
||||||
|
|
||||||
out, fails = await get_bars(proxy, fqsn, end_dt=first_dt)
|
|
||||||
|
|
||||||
if out is None:
|
|
||||||
# could be trying to retreive bars over weekend
|
|
||||||
# TODO: add logic here to handle tradable hours and
|
|
||||||
# only grab valid bars in the range
|
|
||||||
log.error(f"Can't grab bars starting at {first_dt}!?!?")
|
|
||||||
|
|
||||||
# XXX: get_bars() should internally decrement dt by
|
|
||||||
# 2k seconds and try again.
|
|
||||||
continue
|
|
||||||
|
|
||||||
(first_bars, bars_array, first_dt, last_dt) = out
|
|
||||||
# last_dt1 = last_dt
|
|
||||||
# last_dt = first_dt
|
|
||||||
|
|
||||||
# volume cleaning since there's -ve entries,
|
|
||||||
# wood luv to know what crookery that is..
|
|
||||||
vlm = bars_array['volume']
|
|
||||||
vlm[vlm < 0] = 0
|
|
||||||
|
|
||||||
# TODO we should probably dig into forums to see what peeps
|
|
||||||
# think this data "means" and then use it as an indicator of
|
|
||||||
# sorts? dinkus has mentioned that $vlms for the day dont'
|
|
||||||
# match other platforms nor the summary stat tws shows in
|
|
||||||
# the monitor - it's probably worth investigating.
|
|
||||||
|
|
||||||
shm.push(bars_array, prepend=True)
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
|
|
||||||
asset_type_map = {
|
|
||||||
'STK': 'stock',
|
|
||||||
'OPT': 'option',
|
|
||||||
'FUT': 'future',
|
|
||||||
'CONTFUT': 'continuous_future',
|
|
||||||
'CASH': 'forex',
|
|
||||||
'IND': 'index',
|
|
||||||
'CFD': 'cfd',
|
|
||||||
'BOND': 'bond',
|
|
||||||
'CMDTY': 'commodity',
|
|
||||||
'FOP': 'futures_option',
|
|
||||||
'FUND': 'mutual_fund',
|
|
||||||
'WAR': 'warrant',
|
|
||||||
'IOPT': 'warran',
|
|
||||||
'BAG': 'bag',
|
|
||||||
# 'NEWS': 'news',
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
_quote_streams: dict[str, trio.abc.ReceiveStream] = {}
|
|
||||||
|
|
||||||
|
|
||||||
async def _setup_quote_stream(
|
|
||||||
|
|
||||||
from_trio: asyncio.Queue,
|
|
||||||
to_trio: trio.abc.SendChannel,
|
|
||||||
|
|
||||||
symbol: str,
|
|
||||||
opts: tuple[int] = (
|
|
||||||
'375', # RT trade volume (excludes utrades)
|
|
||||||
'233', # RT trade volume (includes utrades)
|
|
||||||
'236', # Shortable shares
|
|
||||||
|
|
||||||
# these all appear to only be updated every 25s thus
|
|
||||||
# making them mostly useless and explains why the scanner
|
|
||||||
# is always slow XD
|
|
||||||
# '293', # Trade count for day
|
|
||||||
'294', # Trade rate / minute
|
|
||||||
'295', # Vlm rate / minute
|
|
||||||
),
|
|
||||||
contract: Optional[Contract] = None,
|
|
||||||
|
|
||||||
) -> trio.abc.ReceiveChannel:
|
|
||||||
'''
|
|
||||||
Stream a ticker using the std L1 api.
|
|
||||||
|
|
||||||
This task is ``asyncio``-side and must be called from
|
|
||||||
``tractor.to_asyncio.open_channel_from()``.
|
|
||||||
|
|
||||||
'''
|
|
||||||
global _quote_streams
|
|
||||||
|
|
||||||
to_trio.send_nowait(None)
|
|
||||||
|
|
||||||
async with load_aio_clients() as accts2clients:
|
|
||||||
caccount_name, client = get_preferred_data_client(accts2clients)
|
|
||||||
contract = contract or (await client.find_contract(symbol))
|
|
||||||
ticker: Ticker = client.ib.reqMktData(contract, ','.join(opts))
|
|
||||||
|
|
||||||
# NOTE: it's batch-wise and slow af but I guess could
|
|
||||||
# be good for backchecking? Seems to be every 5s maybe?
|
|
||||||
# ticker: Ticker = client.ib.reqTickByTickData(
|
|
||||||
# contract, 'Last',
|
|
||||||
# )
|
|
||||||
|
|
||||||
# # define a simple queue push routine that streams quote packets
|
|
||||||
# # to trio over the ``to_trio`` memory channel.
|
|
||||||
# to_trio, from_aio = trio.open_memory_channel(2**8) # type: ignore
|
|
||||||
def teardown():
|
|
||||||
ticker.updateEvent.disconnect(push)
|
|
||||||
log.error(f"Disconnected stream for `{symbol}`")
|
|
||||||
client.ib.cancelMktData(contract)
|
|
||||||
|
|
||||||
# decouple broadcast mem chan
|
|
||||||
_quote_streams.pop(symbol, None)
|
|
||||||
|
|
||||||
def push(t: Ticker) -> None:
|
|
||||||
"""
|
|
||||||
Push quotes to trio task.
|
|
||||||
|
|
||||||
"""
|
|
||||||
# log.debug(t)
|
|
||||||
try:
|
|
||||||
to_trio.send_nowait(t)
|
|
||||||
|
|
||||||
except (
|
|
||||||
trio.BrokenResourceError,
|
|
||||||
|
|
||||||
# XXX: HACK, not sure why this gets left stale (probably
|
|
||||||
# due to our terrible ``tractor.to_asyncio``
|
|
||||||
# implementation for streams.. but if the mem chan
|
|
||||||
# gets left here and starts blocking just kill the feed?
|
|
||||||
# trio.WouldBlock,
|
|
||||||
):
|
|
||||||
# XXX: eventkit's ``Event.emit()`` for whatever redic
|
|
||||||
# reason will catch and ignore regular exceptions
|
|
||||||
# resulting in tracebacks spammed to console..
|
|
||||||
# Manually do the dereg ourselves.
|
|
||||||
teardown()
|
|
||||||
except trio.WouldBlock:
|
|
||||||
log.warning(
|
|
||||||
f'channel is blocking symbol feed for {symbol}?'
|
|
||||||
f'\n{to_trio.statistics}'
|
|
||||||
)
|
|
||||||
|
|
||||||
# except trio.WouldBlock:
|
|
||||||
# # for slow debugging purposes to avoid clobbering prompt
|
|
||||||
# # with log msgs
|
|
||||||
# pass
|
|
||||||
|
|
||||||
ticker.updateEvent.connect(push)
|
|
||||||
try:
|
|
||||||
await asyncio.sleep(float('inf'))
|
|
||||||
finally:
|
|
||||||
teardown()
|
|
||||||
|
|
||||||
# return from_aio
|
|
||||||
|
|
||||||
|
|
||||||
@acm
|
|
||||||
async def open_aio_quote_stream(
|
|
||||||
|
|
||||||
symbol: str,
|
|
||||||
contract: Optional[Contract] = None,
|
|
||||||
|
|
||||||
) -> trio.abc.ReceiveStream:
|
|
||||||
|
|
||||||
from tractor.trionics import broadcast_receiver
|
|
||||||
global _quote_streams
|
|
||||||
|
|
||||||
from_aio = _quote_streams.get(symbol)
|
|
||||||
if from_aio:
|
|
||||||
|
|
||||||
# if we already have a cached feed deliver a rx side clone to consumer
|
|
||||||
async with broadcast_receiver(
|
|
||||||
from_aio,
|
|
||||||
2**6,
|
|
||||||
) as from_aio:
|
|
||||||
yield from_aio
|
|
||||||
return
|
|
||||||
|
|
||||||
async with tractor.to_asyncio.open_channel_from(
|
|
||||||
_setup_quote_stream,
|
|
||||||
symbol=symbol,
|
|
||||||
contract=contract,
|
|
||||||
|
|
||||||
) as (first, from_aio):
|
|
||||||
|
|
||||||
# cache feed for later consumers
|
|
||||||
_quote_streams[symbol] = from_aio
|
|
||||||
|
|
||||||
yield from_aio
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: cython/mypyc/numba this!
|
|
||||||
def normalize(
|
|
||||||
ticker: Ticker,
|
|
||||||
calc_price: bool = False
|
|
||||||
|
|
||||||
) -> dict:
|
|
||||||
|
|
||||||
# should be real volume for this contract by default
|
|
||||||
calc_price = False
|
|
||||||
|
|
||||||
# check for special contract types
|
|
||||||
con = ticker.contract
|
|
||||||
if type(con) in (
|
|
||||||
ibis.Commodity,
|
|
||||||
ibis.Forex,
|
|
||||||
):
|
|
||||||
# commodities and forex don't have an exchange name and
|
|
||||||
# no real volume so we have to calculate the price
|
|
||||||
suffix = con.secType
|
|
||||||
# no real volume on this tract
|
|
||||||
calc_price = True
|
|
||||||
|
|
||||||
else:
|
|
||||||
suffix = con.primaryExchange
|
|
||||||
if not suffix:
|
|
||||||
suffix = con.exchange
|
|
||||||
|
|
||||||
# append a `.<suffix>` to the returned symbol
|
|
||||||
# key for derivatives that normally is the expiry
|
|
||||||
# date key.
|
|
||||||
expiry = con.lastTradeDateOrContractMonth
|
|
||||||
if expiry:
|
|
||||||
suffix += f'.{expiry}'
|
|
||||||
|
|
||||||
# convert named tuples to dicts so we send usable keys
|
|
||||||
new_ticks = []
|
|
||||||
for tick in ticker.ticks:
|
|
||||||
if tick and not isinstance(tick, dict):
|
|
||||||
td = tick._asdict()
|
|
||||||
td['type'] = tick_types.get(
|
|
||||||
td['tickType'],
|
|
||||||
'n/a',
|
|
||||||
)
|
|
||||||
|
|
||||||
new_ticks.append(td)
|
|
||||||
|
|
||||||
tbt = ticker.tickByTicks
|
|
||||||
if tbt:
|
|
||||||
print(f'tickbyticks:\n {ticker.tickByTicks}')
|
|
||||||
|
|
||||||
ticker.ticks = new_ticks
|
|
||||||
|
|
||||||
# some contracts don't have volume so we may want to calculate
|
|
||||||
# a midpoint price based on data we can acquire (such as bid / ask)
|
|
||||||
if calc_price:
|
|
||||||
ticker.ticks.append(
|
|
||||||
{'type': 'trade', 'price': ticker.marketPrice()}
|
|
||||||
)
|
|
||||||
|
|
||||||
# serialize for transport
|
|
||||||
data = asdict(ticker)
|
|
||||||
|
|
||||||
# generate fqsn with possible specialized suffix
|
|
||||||
# for derivatives, note the lowercase.
|
|
||||||
data['symbol'] = data['fqsn'] = '.'.join(
|
|
||||||
(con.symbol, suffix)
|
|
||||||
).lower()
|
|
||||||
|
|
||||||
# convert named tuples to dicts for transport
|
|
||||||
tbts = data.get('tickByTicks')
|
|
||||||
if tbts:
|
|
||||||
data['tickByTicks'] = [tbt._asdict() for tbt in tbts]
|
|
||||||
|
|
||||||
# add time stamps for downstream latency measurements
|
|
||||||
data['brokerd_ts'] = time.time()
|
|
||||||
|
|
||||||
# stupid stupid shit...don't even care any more..
|
|
||||||
# leave it until we do a proper latency study
|
|
||||||
# if ticker.rtTime is not None:
|
|
||||||
# data['broker_ts'] = data['rtTime_s'] = float(
|
|
||||||
# ticker.rtTime.timestamp) / 1000.
|
|
||||||
data.pop('rtTime')
|
|
||||||
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
async def stream_quotes(
|
|
||||||
|
|
||||||
send_chan: trio.abc.SendChannel,
|
|
||||||
symbols: list[str],
|
|
||||||
feed_is_live: trio.Event,
|
|
||||||
loglevel: str = None,
|
|
||||||
|
|
||||||
# startup sync
|
|
||||||
task_status: TaskStatus[tuple[dict, dict]] = trio.TASK_STATUS_IGNORED,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
Stream symbol quotes.
|
|
||||||
|
|
||||||
This is a ``trio`` callable routine meant to be invoked
|
|
||||||
once the brokerd is up.
|
|
||||||
|
|
||||||
'''
|
|
||||||
# TODO: support multiple subscriptions
|
|
||||||
sym = symbols[0]
|
|
||||||
log.info(f'request for real-time quotes: {sym}')
|
|
||||||
|
|
||||||
async with open_data_client() as proxy:
|
|
||||||
|
|
||||||
con, first_ticker, details = await proxy.get_sym_details(symbol=sym)
|
|
||||||
first_quote = normalize(first_ticker)
|
|
||||||
# print(f'first quote: {first_quote}')
|
|
||||||
|
|
||||||
def mk_init_msgs() -> dict[str, dict]:
|
|
||||||
'''
|
|
||||||
Collect a bunch of meta-data useful for feed startup and
|
|
||||||
pack in a `dict`-msg.
|
|
||||||
|
|
||||||
'''
|
|
||||||
# pass back some symbol info like min_tick, trading_hours, etc.
|
|
||||||
syminfo = asdict(details)
|
|
||||||
syminfo.update(syminfo['contract'])
|
|
||||||
|
|
||||||
# nested dataclass we probably don't need and that won't IPC
|
|
||||||
# serialize
|
|
||||||
syminfo.pop('secIdList')
|
|
||||||
|
|
||||||
# TODO: more consistent field translation
|
|
||||||
atype = syminfo['asset_type'] = asset_type_map[syminfo['secType']]
|
|
||||||
|
|
||||||
# for stocks it seems TWS reports too small a tick size
|
|
||||||
# such that you can't submit orders with that granularity?
|
|
||||||
min_tick = 0.01 if atype == 'stock' else 0
|
|
||||||
|
|
||||||
syminfo['price_tick_size'] = max(syminfo['minTick'], min_tick)
|
|
||||||
|
|
||||||
# for "traditional" assets, volume is normally discreet, not
|
|
||||||
# a float
|
|
||||||
syminfo['lot_tick_size'] = 0.0
|
|
||||||
|
|
||||||
ibclient = proxy._aio_ns.ib.client
|
|
||||||
host, port = ibclient.host, ibclient.port
|
|
||||||
|
|
||||||
# TODO: for loop through all symbols passed in
|
|
||||||
init_msgs = {
|
|
||||||
# pass back token, and bool, signalling if we're the writer
|
|
||||||
# and that history has been written
|
|
||||||
sym: {
|
|
||||||
'symbol_info': syminfo,
|
|
||||||
'fqsn': first_quote['fqsn'],
|
|
||||||
},
|
|
||||||
'status': {
|
|
||||||
'data_ep': f'{host}:{port}',
|
|
||||||
},
|
|
||||||
|
|
||||||
}
|
|
||||||
return init_msgs
|
|
||||||
|
|
||||||
init_msgs = mk_init_msgs()
|
|
||||||
|
|
||||||
# TODO: we should instead spawn a task that waits on a feed to start
|
|
||||||
# and let it wait indefinitely..instead of this hard coded stuff.
|
|
||||||
with trio.move_on_after(1):
|
|
||||||
contract, first_ticker, details = await proxy.get_quote(symbol=sym)
|
|
||||||
|
|
||||||
# it might be outside regular trading hours so see if we can at
|
|
||||||
# least grab history.
|
|
||||||
if isnan(first_ticker.last):
|
|
||||||
task_status.started((init_msgs, first_quote))
|
|
||||||
|
|
||||||
# it's not really live but this will unblock
|
|
||||||
# the brokerd feed task to tell the ui to update?
|
|
||||||
feed_is_live.set()
|
|
||||||
|
|
||||||
# block and let data history backfill code run.
|
|
||||||
await trio.sleep_forever()
|
|
||||||
return # we never expect feed to come up?
|
|
||||||
|
|
||||||
async with open_aio_quote_stream(
|
|
||||||
symbol=sym,
|
|
||||||
contract=con,
|
|
||||||
) as stream:
|
|
||||||
|
|
||||||
# ugh, clear ticks since we've consumed them
|
|
||||||
# (ahem, ib_insync is stateful trash)
|
|
||||||
first_ticker.ticks = []
|
|
||||||
|
|
||||||
task_status.started((init_msgs, first_quote))
|
|
||||||
|
|
||||||
async with aclosing(stream):
|
|
||||||
if type(first_ticker.contract) not in (
|
|
||||||
ibis.Commodity,
|
|
||||||
ibis.Forex
|
|
||||||
):
|
|
||||||
# wait for real volume on feed (trading might be closed)
|
|
||||||
while True:
|
|
||||||
ticker = await stream.receive()
|
|
||||||
|
|
||||||
# for a real volume contract we rait for the first
|
|
||||||
# "real" trade to take place
|
|
||||||
if (
|
|
||||||
# not calc_price
|
|
||||||
# and not ticker.rtTime
|
|
||||||
not ticker.rtTime
|
|
||||||
):
|
|
||||||
# spin consuming tickers until we get a real
|
|
||||||
# market datum
|
|
||||||
log.debug(f"New unsent ticker: {ticker}")
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
log.debug("Received first real volume tick")
|
|
||||||
# ugh, clear ticks since we've consumed them
|
|
||||||
# (ahem, ib_insync is truly stateful trash)
|
|
||||||
ticker.ticks = []
|
|
||||||
|
|
||||||
# XXX: this works because we don't use
|
|
||||||
# ``aclosing()`` above?
|
|
||||||
break
|
|
||||||
|
|
||||||
quote = normalize(ticker)
|
|
||||||
log.debug(f"First ticker received {quote}")
|
|
||||||
|
|
||||||
# tell caller quotes are now coming in live
|
|
||||||
feed_is_live.set()
|
|
||||||
|
|
||||||
# last = time.time()
|
|
||||||
async for ticker in stream:
|
|
||||||
quote = normalize(ticker)
|
|
||||||
await send_chan.send({quote['fqsn']: quote})
|
|
||||||
|
|
||||||
# ugh, clear ticks since we've consumed them
|
|
||||||
ticker.ticks = []
|
|
||||||
# last = time.time()
|
|
||||||
|
|
||||||
|
|
||||||
async def data_reset_hack(
|
|
||||||
reset_type: str = 'data',
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
Run key combos for resetting data feeds and yield back to caller
|
|
||||||
when complete.
|
|
||||||
|
|
||||||
This is a linux-only hack around:
|
|
||||||
|
|
||||||
https://interactivebrokers.github.io/tws-api/historical_limitations.html#pacing_violations
|
|
||||||
|
|
||||||
TODOs:
|
|
||||||
- a return type that hopefully determines if the hack was
|
|
||||||
successful.
|
|
||||||
- other OS support?
|
|
||||||
- integration with ``ib-gw`` run in docker + Xorg?
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
async def vnc_click_hack(
|
|
||||||
reset_type: str = 'data'
|
|
||||||
) -> None:
|
|
||||||
'''
|
|
||||||
Reset the data or netowork connection for the VNC attached
|
|
||||||
ib gateway using magic combos.
|
|
||||||
|
|
||||||
'''
|
|
||||||
key = {'data': 'f', 'connection': 'r'}[reset_type]
|
|
||||||
|
|
||||||
import asyncvnc
|
|
||||||
|
|
||||||
async with asyncvnc.connect(
|
|
||||||
'localhost',
|
|
||||||
port=3003,
|
|
||||||
# password='ibcansmbz',
|
|
||||||
) as client:
|
|
||||||
|
|
||||||
# move to middle of screen
|
|
||||||
# 640x1800
|
|
||||||
client.mouse.move(
|
|
||||||
x=500,
|
|
||||||
y=500,
|
|
||||||
)
|
|
||||||
client.mouse.click()
|
|
||||||
client.keyboard.press('Ctrl', 'Alt', key) # keys are stacked
|
|
||||||
|
|
||||||
await tractor.to_asyncio.run_task(vnc_click_hack)
|
|
||||||
|
|
||||||
# we don't really need the ``xdotool`` approach any more B)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
@tractor.context
|
|
||||||
async def open_symbol_search(
|
|
||||||
ctx: tractor.Context,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
|
|
||||||
# TODO: load user defined symbol set locally for fast search?
|
|
||||||
await ctx.started({})
|
|
||||||
|
|
||||||
async with open_data_client() as proxy:
|
|
||||||
async with ctx.open_stream() as stream:
|
|
||||||
|
|
||||||
last = time.time()
|
|
||||||
|
|
||||||
async for pattern in stream:
|
|
||||||
log.debug(f'received {pattern}')
|
|
||||||
now = time.time()
|
|
||||||
|
|
||||||
assert pattern, 'IB can not accept blank search pattern'
|
|
||||||
|
|
||||||
# throttle search requests to no faster then 1Hz
|
|
||||||
diff = now - last
|
|
||||||
if diff < 1.0:
|
|
||||||
log.debug('throttle sleeping')
|
|
||||||
await trio.sleep(diff)
|
|
||||||
try:
|
|
||||||
pattern = stream.receive_nowait()
|
|
||||||
except trio.WouldBlock:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if not pattern or pattern.isspace():
|
|
||||||
log.warning('empty pattern received, skipping..')
|
|
||||||
|
|
||||||
# TODO: *BUG* if nothing is returned here the client
|
|
||||||
# side will cache a null set result and not showing
|
|
||||||
# anything to the use on re-searches when this query
|
|
||||||
# timed out. We probably need a special "timeout" msg
|
|
||||||
# or something...
|
|
||||||
|
|
||||||
# XXX: this unblocks the far end search task which may
|
|
||||||
# hold up a multi-search nursery block
|
|
||||||
await stream.send({})
|
|
||||||
|
|
||||||
continue
|
|
||||||
|
|
||||||
log.debug(f'searching for {pattern}')
|
|
||||||
|
|
||||||
last = time.time()
|
|
||||||
|
|
||||||
# async batch search using api stocks endpoint and module
|
|
||||||
# defined adhoc symbol set.
|
|
||||||
stock_results = []
|
|
||||||
|
|
||||||
async def stash_results(target: Awaitable[list]):
|
|
||||||
stock_results.extend(await target)
|
|
||||||
|
|
||||||
async with trio.open_nursery() as sn:
|
|
||||||
sn.start_soon(
|
|
||||||
stash_results,
|
|
||||||
proxy.search_symbols(
|
|
||||||
pattern=pattern,
|
|
||||||
upto=5,
|
|
||||||
),
|
|
||||||
)
|
|
||||||
|
|
||||||
# trigger async request
|
|
||||||
await trio.sleep(0)
|
|
||||||
|
|
||||||
# match against our ad-hoc set immediately
|
|
||||||
adhoc_matches = fuzzy.extractBests(
|
|
||||||
pattern,
|
|
||||||
list(_adhoc_futes_set),
|
|
||||||
score_cutoff=90,
|
|
||||||
)
|
|
||||||
log.info(f'fuzzy matched adhocs: {adhoc_matches}')
|
|
||||||
adhoc_match_results = {}
|
|
||||||
if adhoc_matches:
|
|
||||||
# TODO: do we need to pull contract details?
|
|
||||||
adhoc_match_results = {i[0]: {} for i in adhoc_matches}
|
|
||||||
|
|
||||||
log.debug(f'fuzzy matching stocks {stock_results}')
|
|
||||||
stock_matches = fuzzy.extractBests(
|
|
||||||
pattern,
|
|
||||||
stock_results,
|
|
||||||
score_cutoff=50,
|
|
||||||
)
|
|
||||||
|
|
||||||
matches = adhoc_match_results | {
|
|
||||||
item[0]: {} for item in stock_matches
|
|
||||||
}
|
|
||||||
# TODO: we used to deliver contract details
|
|
||||||
# {item[2]: item[0] for item in stock_matches}
|
|
||||||
|
|
||||||
log.debug(f"sending matches: {matches.keys()}")
|
|
||||||
await stream.send(matches)
|
|
|
@ -21,7 +21,6 @@ Kraken backend.
|
||||||
from contextlib import asynccontextmanager as acm
|
from contextlib import asynccontextmanager as acm
|
||||||
from dataclasses import asdict, field
|
from dataclasses import asdict, field
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from pprint import pformat
|
|
||||||
from typing import Any, Optional, AsyncIterator, Callable, Union
|
from typing import Any, Optional, AsyncIterator, Callable, Union
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
@ -570,10 +569,7 @@ async def handle_order_requests(
|
||||||
order: BrokerdOrder
|
order: BrokerdOrder
|
||||||
|
|
||||||
async for request_msg in ems_order_stream:
|
async for request_msg in ems_order_stream:
|
||||||
log.info(
|
log.info(f'Received order request {request_msg}')
|
||||||
'Received order request:\n'
|
|
||||||
f'{pformat(request_msg)}'
|
|
||||||
)
|
|
||||||
|
|
||||||
action = request_msg['action']
|
action = request_msg['action']
|
||||||
|
|
||||||
|
@ -632,7 +628,6 @@ async def handle_order_requests(
|
||||||
# update the internal pairing of oid to krakens
|
# update the internal pairing of oid to krakens
|
||||||
# txid with the new txid that is returned on edit
|
# txid with the new txid that is returned on edit
|
||||||
reqid = resp['result']['txid']
|
reqid = resp['result']['txid']
|
||||||
|
|
||||||
# deliver ack that order has been submitted to broker routing
|
# deliver ack that order has been submitted to broker routing
|
||||||
await ems_order_stream.send(
|
await ems_order_stream.send(
|
||||||
BrokerdOrderAck(
|
BrokerdOrderAck(
|
||||||
|
@ -793,10 +788,7 @@ async def trades_dialogue(
|
||||||
# Get websocket token for authenticated data stream
|
# Get websocket token for authenticated data stream
|
||||||
# Assert that a token was actually received.
|
# Assert that a token was actually received.
|
||||||
resp = await client.endpoint('GetWebSocketsToken', {})
|
resp = await client.endpoint('GetWebSocketsToken', {})
|
||||||
|
|
||||||
# lol wtf is this..
|
|
||||||
assert resp['error'] == []
|
assert resp['error'] == []
|
||||||
|
|
||||||
token = resp['result']['token']
|
token = resp['result']['token']
|
||||||
|
|
||||||
async with (
|
async with (
|
||||||
|
|
|
@ -35,6 +35,7 @@ import pendulum
|
||||||
import trio
|
import trio
|
||||||
import tractor
|
import tractor
|
||||||
from async_generator import asynccontextmanager
|
from async_generator import asynccontextmanager
|
||||||
|
import pandas as pd
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import wrapt
|
import wrapt
|
||||||
import asks
|
import asks
|
||||||
|
@ -668,7 +669,7 @@ def get_OHLCV(
|
||||||
"""
|
"""
|
||||||
del bar['end']
|
del bar['end']
|
||||||
del bar['VWAP']
|
del bar['VWAP']
|
||||||
bar['start'] = pendulum.from_timestamp(bar['start']) / 10**9
|
bar['start'] = pd.Timestamp(bar['start']).value/10**9
|
||||||
return tuple(bar.values())
|
return tuple(bar.values())
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -80,9 +80,7 @@ def mk_check(
|
||||||
|
|
||||||
return check_lt
|
return check_lt
|
||||||
|
|
||||||
raise ValueError(
|
raise ValueError('trigger: {trigger_price}, last: {known_last}')
|
||||||
f'trigger: {trigger_price}, last: {known_last}'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -563,10 +561,7 @@ async def translate_and_relay_brokerd_events(
|
||||||
|
|
||||||
name = brokerd_msg['name']
|
name = brokerd_msg['name']
|
||||||
|
|
||||||
log.info(
|
log.info(f'Received broker trade event:\n{pformat(brokerd_msg)}')
|
||||||
f'Received broker trade event:\n'
|
|
||||||
f'{pformat(brokerd_msg)}'
|
|
||||||
)
|
|
||||||
|
|
||||||
if name == 'position':
|
if name == 'position':
|
||||||
|
|
||||||
|
@ -618,28 +613,19 @@ async def translate_and_relay_brokerd_events(
|
||||||
# packed at submission since we already know it ahead of
|
# packed at submission since we already know it ahead of
|
||||||
# time
|
# time
|
||||||
paper = brokerd_msg['broker_details'].get('paper_info')
|
paper = brokerd_msg['broker_details'].get('paper_info')
|
||||||
ext = brokerd_msg['broker_details'].get('external')
|
|
||||||
if paper:
|
if paper:
|
||||||
# paperboi keeps the ems id up front
|
# paperboi keeps the ems id up front
|
||||||
oid = paper['oid']
|
oid = paper['oid']
|
||||||
|
|
||||||
elif ext:
|
else:
|
||||||
# may be an order msg specified as "external" to the
|
# may be an order msg specified as "external" to the
|
||||||
# piker ems flow (i.e. generated by some other
|
# piker ems flow (i.e. generated by some other
|
||||||
# external broker backend client (like tws for ib)
|
# external broker backend client (like tws for ib)
|
||||||
|
ext = brokerd_msg['broker_details'].get('external')
|
||||||
|
if ext:
|
||||||
log.error(f"External trade event {ext}")
|
log.error(f"External trade event {ext}")
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
else:
|
|
||||||
# something is out of order, we don't have an oid for
|
|
||||||
# this broker-side message.
|
|
||||||
log.error(
|
|
||||||
'Unknown oid:{oid} for msg:\n'
|
|
||||||
f'{pformat(brokerd_msg)}'
|
|
||||||
'Unable to relay message to client side!?'
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# check for existing live flow entry
|
# check for existing live flow entry
|
||||||
entry = book._ems_entries.get(oid)
|
entry = book._ems_entries.get(oid)
|
||||||
|
@ -837,9 +823,7 @@ async def process_client_order_cmds(
|
||||||
if reqid:
|
if reqid:
|
||||||
|
|
||||||
# send cancel to brokerd immediately!
|
# send cancel to brokerd immediately!
|
||||||
log.info(
|
log.info("Submitting cancel for live order {reqid}")
|
||||||
f'Submitting cancel for live order {reqid}'
|
|
||||||
)
|
|
||||||
|
|
||||||
await brokerd_order_stream.send(msg.dict())
|
await brokerd_order_stream.send(msg.dict())
|
||||||
|
|
||||||
|
|
|
@ -242,7 +242,7 @@ class BrokerdError(BaseModel):
|
||||||
|
|
||||||
# if no brokerd order request was actually submitted (eg. we errored
|
# if no brokerd order request was actually submitted (eg. we errored
|
||||||
# at the ``pikerd`` layer) then there will be ``reqid`` allocated.
|
# at the ``pikerd`` layer) then there will be ``reqid`` allocated.
|
||||||
reqid: Optional[Union[int, str]] = None
|
reqid: Union[int, str] = ''
|
||||||
|
|
||||||
symbol: str
|
symbol: str
|
||||||
reason: str
|
reason: str
|
||||||
|
|
|
@ -1,25 +1,7 @@
|
||||||
# piker: trading gear for hackers
|
"""
|
||||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of pikers)
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
'''
|
|
||||||
CLI commons.
|
CLI commons.
|
||||||
|
"""
|
||||||
'''
|
|
||||||
import os
|
import os
|
||||||
from pprint import pformat
|
|
||||||
|
|
||||||
import click
|
import click
|
||||||
import trio
|
import trio
|
||||||
|
@ -72,22 +54,17 @@ def pikerd(loglevel, host, tl, pdb, tsdb):
|
||||||
trio.open_nursery() as n,
|
trio.open_nursery() as n,
|
||||||
):
|
):
|
||||||
if tsdb:
|
if tsdb:
|
||||||
from piker.data._ahab import start_ahab
|
# TODO:
|
||||||
from piker.data.marketstore import start_marketstore
|
# async with maybe_open_marketstored():
|
||||||
|
|
||||||
|
from piker.data._ahab import start_ahab
|
||||||
log.info('Spawning `marketstore` supervisor')
|
log.info('Spawning `marketstore` supervisor')
|
||||||
ctn_ready, config, (cid, pid) = await n.start(
|
ctn_ready = await n.start(
|
||||||
start_ahab,
|
start_ahab,
|
||||||
'marketstored',
|
'marketstored',
|
||||||
start_marketstore,
|
|
||||||
|
|
||||||
)
|
|
||||||
log.info(
|
|
||||||
f'`marketstore` up!\n'
|
|
||||||
f'`marketstored` pid: {pid}\n'
|
|
||||||
f'docker container id: {cid}\n'
|
|
||||||
f'config: {pformat(config)}'
|
|
||||||
)
|
)
|
||||||
|
await ctn_ready.wait()
|
||||||
|
log.info('`marketstore` container:{uid} up')
|
||||||
|
|
||||||
await trio.sleep_forever()
|
await trio.sleep_forever()
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# piker: trading gear for hackers
|
# piker: trading gear for hackers
|
||||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship for pikers)
|
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
@ -16,7 +16,6 @@
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Broker configuration mgmt.
|
Broker configuration mgmt.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
import platform
|
import platform
|
||||||
import sys
|
import sys
|
||||||
|
@ -51,7 +50,7 @@ def get_app_dir(app_name, roaming=True, force_posix=False):
|
||||||
Unix (POSIX):
|
Unix (POSIX):
|
||||||
``~/.foo-bar``
|
``~/.foo-bar``
|
||||||
Win XP (roaming):
|
Win XP (roaming):
|
||||||
``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo``
|
``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
|
||||||
Win XP (not roaming):
|
Win XP (not roaming):
|
||||||
``C:\Documents and Settings\<user>\Application Data\Foo Bar``
|
``C:\Documents and Settings\<user>\Application Data\Foo Bar``
|
||||||
Win 7 (roaming):
|
Win 7 (roaming):
|
||||||
|
@ -82,8 +81,7 @@ def get_app_dir(app_name, roaming=True, force_posix=False):
|
||||||
folder = os.path.expanduser("~")
|
folder = os.path.expanduser("~")
|
||||||
return os.path.join(folder, app_name)
|
return os.path.join(folder, app_name)
|
||||||
if force_posix:
|
if force_posix:
|
||||||
return os.path.join(
|
return os.path.join(os.path.expanduser("~/.{}".format(_posixify(app_name))))
|
||||||
os.path.expanduser("~/.{}".format(_posixify(app_name))))
|
|
||||||
if sys.platform == "darwin":
|
if sys.platform == "darwin":
|
||||||
return os.path.join(
|
return os.path.join(
|
||||||
os.path.expanduser("~/Library/Application Support"), app_name
|
os.path.expanduser("~/Library/Application Support"), app_name
|
||||||
|
@ -109,12 +107,7 @@ if _parent_user:
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
_conf_names: set[str] = {
|
_file_name = 'brokers.toml'
|
||||||
'brokers',
|
|
||||||
'trades',
|
|
||||||
'watchlists',
|
|
||||||
}
|
|
||||||
|
|
||||||
_watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
|
_watchlists_data_path = os.path.join(_config_dir, 'watchlists.json')
|
||||||
_context_defaults = dict(
|
_context_defaults = dict(
|
||||||
default_map={
|
default_map={
|
||||||
|
@ -136,43 +129,23 @@ def _override_config_dir(
|
||||||
_config_dir = path
|
_config_dir = path
|
||||||
|
|
||||||
|
|
||||||
def _conf_fn_w_ext(
|
def get_broker_conf_path():
|
||||||
name: str,
|
|
||||||
) -> str:
|
|
||||||
# change this if we ever change the config file format.
|
|
||||||
return f'{name}.toml'
|
|
||||||
|
|
||||||
|
|
||||||
def get_conf_path(
|
|
||||||
conf_name: str = 'brokers',
|
|
||||||
|
|
||||||
) -> str:
|
|
||||||
"""Return the default config path normally under
|
"""Return the default config path normally under
|
||||||
``~/.config/piker`` on linux.
|
``~/.config/piker`` on linux.
|
||||||
|
|
||||||
Contains files such as:
|
Contains files such as:
|
||||||
- brokers.toml
|
- brokers.toml
|
||||||
- watchlists.toml
|
- watchlists.toml
|
||||||
- trades.toml
|
|
||||||
|
|
||||||
# maybe coming soon ;)
|
|
||||||
- signals.toml
|
- signals.toml
|
||||||
- strats.toml
|
- strats.toml
|
||||||
|
|
||||||
"""
|
"""
|
||||||
assert conf_name in _conf_names
|
return os.path.join(_config_dir, _file_name)
|
||||||
fn = _conf_fn_w_ext(conf_name)
|
|
||||||
return os.path.join(
|
|
||||||
_config_dir,
|
|
||||||
fn,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def repodir():
|
def repodir():
|
||||||
'''
|
"""Return the abspath to the repo directory.
|
||||||
Return the abspath to the repo directory.
|
"""
|
||||||
|
|
||||||
'''
|
|
||||||
dirpath = os.path.abspath(
|
dirpath = os.path.abspath(
|
||||||
# we're 3 levels down in **this** module file
|
# we're 3 levels down in **this** module file
|
||||||
dirname(dirname(os.path.realpath(__file__)))
|
dirname(dirname(os.path.realpath(__file__)))
|
||||||
|
@ -181,27 +154,16 @@ def repodir():
|
||||||
|
|
||||||
|
|
||||||
def load(
|
def load(
|
||||||
conf_name: str = 'brokers',
|
|
||||||
path: str = None
|
path: str = None
|
||||||
|
|
||||||
) -> (dict, str):
|
) -> (dict, str):
|
||||||
'''
|
"""Load broker config.
|
||||||
Load config file by name.
|
"""
|
||||||
|
path = path or get_broker_conf_path()
|
||||||
'''
|
|
||||||
path = path or get_conf_path(conf_name)
|
|
||||||
if not os.path.isfile(path):
|
if not os.path.isfile(path):
|
||||||
fn = _conf_fn_w_ext(conf_name)
|
shutil.copyfile(
|
||||||
|
os.path.join(repodir(), 'config', 'brokers.toml'),
|
||||||
template = os.path.join(
|
path,
|
||||||
repodir(),
|
|
||||||
'config',
|
|
||||||
fn
|
|
||||||
)
|
)
|
||||||
# try to copy in a template config to the user's directory
|
|
||||||
# if one exists.
|
|
||||||
if os.path.isfile(template):
|
|
||||||
shutil.copyfile(template, path)
|
|
||||||
|
|
||||||
config = toml.load(path)
|
config = toml.load(path)
|
||||||
log.debug(f"Read config file {path}")
|
log.debug(f"Read config file {path}")
|
||||||
|
@ -210,17 +172,13 @@ def load(
|
||||||
|
|
||||||
def write(
|
def write(
|
||||||
config: dict, # toml config as dict
|
config: dict, # toml config as dict
|
||||||
name: str = 'brokers',
|
|
||||||
path: str = None,
|
path: str = None,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
''''
|
"""Write broker config to disk.
|
||||||
Write broker config to disk.
|
|
||||||
|
|
||||||
Create a ``brokers.ini`` file if one does not exist.
|
Create a ``brokers.ini`` file if one does not exist.
|
||||||
|
"""
|
||||||
'''
|
path = path or get_broker_conf_path()
|
||||||
path = path or get_conf_path(name)
|
|
||||||
dirname = os.path.dirname(path)
|
dirname = os.path.dirname(path)
|
||||||
if not os.path.isdir(dirname):
|
if not os.path.isdir(dirname):
|
||||||
log.debug(f"Creating config dir {_config_dir}")
|
log.debug(f"Creating config dir {_config_dir}")
|
||||||
|
@ -230,10 +188,7 @@ def write(
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Watch out you're trying to write a blank config!")
|
"Watch out you're trying to write a blank config!")
|
||||||
|
|
||||||
log.debug(
|
log.debug(f"Writing config file {path}")
|
||||||
f"Writing config `{name}` file to:\n"
|
|
||||||
f"{path}"
|
|
||||||
)
|
|
||||||
with open(path, 'w') as cf:
|
with open(path, 'w') as cf:
|
||||||
return toml.dump(config, cf)
|
return toml.dump(config, cf)
|
||||||
|
|
||||||
|
@ -263,5 +218,4 @@ def load_accounts(
|
||||||
|
|
||||||
# our default paper engine entry
|
# our default paper engine entry
|
||||||
accounts['paper'] = None
|
accounts['paper'] = None
|
||||||
|
|
||||||
return accounts
|
return accounts
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# piker: trading gear for hackers
|
# piker: trading gear for hackers
|
||||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of pikers)
|
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
@ -19,25 +19,19 @@ Supervisor for docker with included specific-image service helpers.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
import os
|
import os
|
||||||
import time
|
|
||||||
from typing import (
|
from typing import (
|
||||||
Optional,
|
Optional,
|
||||||
Callable,
|
# Any,
|
||||||
Any,
|
|
||||||
)
|
)
|
||||||
from contextlib import asynccontextmanager as acm
|
from contextlib import asynccontextmanager as acm
|
||||||
|
|
||||||
import trio
|
import trio
|
||||||
from trio_typing import TaskStatus
|
from trio_typing import TaskStatus
|
||||||
import tractor
|
import tractor
|
||||||
from tractor.msg import NamespacePath
|
|
||||||
import docker
|
import docker
|
||||||
import json
|
import json
|
||||||
from docker.models.containers import Container as DockerContainer
|
from docker.models.containers import Container as DockerContainer
|
||||||
from docker.errors import (
|
from docker.errors import DockerException, APIError
|
||||||
DockerException,
|
|
||||||
APIError,
|
|
||||||
)
|
|
||||||
from requests.exceptions import ConnectionError, ReadTimeout
|
from requests.exceptions import ConnectionError, ReadTimeout
|
||||||
|
|
||||||
from ..log import get_logger, get_console_log
|
from ..log import get_logger, get_console_log
|
||||||
|
@ -46,14 +40,49 @@ from .. import config
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
_config = '''
|
||||||
|
# piker's ``marketstore`` config.
|
||||||
|
|
||||||
|
# mount this config using:
|
||||||
|
# sudo docker run --mount \
|
||||||
|
# type=bind,source="$HOME/.config/piker/",target="/etc" -i -p \
|
||||||
|
# 5993:5993 alpacamarkets/marketstore:latest
|
||||||
|
|
||||||
|
root_directory: data
|
||||||
|
listen_port: 5993
|
||||||
|
grpc_listen_port: 5995
|
||||||
|
log_level: debug
|
||||||
|
queryable: true
|
||||||
|
stop_grace_period: 0
|
||||||
|
wal_rotate_interval: 5
|
||||||
|
stale_threshold: 5
|
||||||
|
enable_add: true
|
||||||
|
enable_remove: false
|
||||||
|
|
||||||
|
triggers:
|
||||||
|
- module: ondiskagg.so
|
||||||
|
on: "*/1Sec/OHLCV"
|
||||||
|
config:
|
||||||
|
# filter: "nasdaq"
|
||||||
|
destinations:
|
||||||
|
- 1Min
|
||||||
|
- 5Min
|
||||||
|
- 15Min
|
||||||
|
- 1H
|
||||||
|
- 1D
|
||||||
|
|
||||||
|
- module: stream.so
|
||||||
|
on: '*/*/*'
|
||||||
|
# config:
|
||||||
|
# filter: "nasdaq"
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
class DockerNotStarted(Exception):
|
class DockerNotStarted(Exception):
|
||||||
'Prolly you dint start da daemon bruh'
|
'Prolly you dint start da daemon bruh'
|
||||||
|
|
||||||
|
|
||||||
class ContainerError(RuntimeError):
|
|
||||||
'Error reported via app-container logging level'
|
|
||||||
|
|
||||||
|
|
||||||
@acm
|
@acm
|
||||||
async def open_docker(
|
async def open_docker(
|
||||||
url: Optional[str] = None,
|
url: Optional[str] = None,
|
||||||
|
@ -99,6 +128,9 @@ async def open_docker(
|
||||||
finally:
|
finally:
|
||||||
if client:
|
if client:
|
||||||
client.close()
|
client.close()
|
||||||
|
# client.api._custom_adapter.close()
|
||||||
|
for c in client.containers.list():
|
||||||
|
c.kill()
|
||||||
|
|
||||||
|
|
||||||
class Container:
|
class Container:
|
||||||
|
@ -154,10 +186,6 @@ class Container:
|
||||||
|
|
||||||
getattr(log, level, log.error)(f'{msg}')
|
getattr(log, level, log.error)(f'{msg}')
|
||||||
|
|
||||||
# print(f'level: {level}')
|
|
||||||
if level in ('error', 'fatal'):
|
|
||||||
raise ContainerError(msg)
|
|
||||||
|
|
||||||
if patt in msg:
|
if patt in msg:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
@ -182,111 +210,132 @@ class Container:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
except docker.errors.APIError as err:
|
except docker.errors.APIError as err:
|
||||||
|
# _err = err
|
||||||
if 'is not running' in err.explanation:
|
if 'is not running' in err.explanation:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
async def cancel(
|
async def cancel(
|
||||||
self,
|
self,
|
||||||
stop_msg: str,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
cid = self.cntr.id
|
cid = self.cntr.id
|
||||||
# first try a graceful cancel
|
|
||||||
log.cancel(
|
|
||||||
f'SIGINT cancelling container: {cid}\n'
|
|
||||||
f'waiting on stop msg: "{stop_msg}"'
|
|
||||||
)
|
|
||||||
self.try_signal('SIGINT')
|
self.try_signal('SIGINT')
|
||||||
|
|
||||||
start = time.time()
|
|
||||||
for _ in range(30):
|
|
||||||
|
|
||||||
with trio.move_on_after(0.5) as cs:
|
with trio.move_on_after(0.5) as cs:
|
||||||
cs.shield = True
|
cs.shield = True
|
||||||
await self.process_logs_until(stop_msg)
|
# print('PROCESSINGN LOGS')
|
||||||
|
await self.process_logs_until('initiating graceful shutdown')
|
||||||
|
# print('SHUTDOWN REPORTED BY CONTAINER')
|
||||||
|
await self.process_logs_until('exiting...',)
|
||||||
|
|
||||||
# if we aren't cancelled on above checkpoint then we
|
for _ in range(10):
|
||||||
# assume we read the expected stop msg and terminated.
|
with trio.move_on_after(0.5) as cs:
|
||||||
|
cs.shield = True
|
||||||
|
# print('waiting on EXITING')
|
||||||
|
await self.process_logs_until('exiting...',)
|
||||||
|
# print('got EXITING')
|
||||||
break
|
break
|
||||||
|
|
||||||
try:
|
if cs.cancelled_caught:
|
||||||
log.info(f'Polling for container shutdown:\n{cid}')
|
|
||||||
|
|
||||||
if self.cntr.status not in {'exited', 'not-running'}:
|
|
||||||
self.cntr.wait(
|
|
||||||
timeout=0.1,
|
|
||||||
condition='not-running',
|
|
||||||
)
|
|
||||||
|
|
||||||
break
|
|
||||||
|
|
||||||
except (
|
|
||||||
ReadTimeout,
|
|
||||||
):
|
|
||||||
log.info(f'Still waiting on container:\n{cid}')
|
|
||||||
continue
|
|
||||||
|
|
||||||
except (
|
|
||||||
docker.errors.APIError,
|
|
||||||
ConnectionError,
|
|
||||||
):
|
|
||||||
log.exception('Docker connection failure')
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
delay = time.time() - start
|
|
||||||
log.error(
|
|
||||||
f'Failed to kill container {cid} after {delay}s\n'
|
|
||||||
'sending SIGKILL..'
|
|
||||||
)
|
|
||||||
# get out the big guns, bc apparently marketstore
|
# get out the big guns, bc apparently marketstore
|
||||||
# doesn't actually know how to terminate gracefully
|
# doesn't actually know how to terminate gracefully
|
||||||
# :eyeroll:...
|
# :eyeroll:...
|
||||||
self.try_signal('SIGKILL')
|
self.try_signal('SIGKILL')
|
||||||
|
|
||||||
|
try:
|
||||||
|
log.info('Waiting on container shutdown: {cid}')
|
||||||
self.cntr.wait(
|
self.cntr.wait(
|
||||||
timeout=3,
|
timeout=0.1,
|
||||||
condition='not-running',
|
condition='not-running',
|
||||||
)
|
)
|
||||||
|
break
|
||||||
|
|
||||||
|
except (
|
||||||
|
ReadTimeout,
|
||||||
|
ConnectionError,
|
||||||
|
):
|
||||||
|
log.error(f'failed to wait on container {cid}')
|
||||||
|
raise
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise RuntimeError('Failed to cancel container {cid}')
|
||||||
|
|
||||||
log.cancel(f'Container stopped: {cid}')
|
log.cancel(f'Container stopped: {cid}')
|
||||||
|
|
||||||
|
|
||||||
@tractor.context
|
@tractor.context
|
||||||
async def open_ahabd(
|
async def open_marketstored(
|
||||||
ctx: tractor.Context,
|
ctx: tractor.Context,
|
||||||
endpoint: str, # ns-pointer str-msg-type
|
|
||||||
|
|
||||||
**kwargs,
|
**kwargs,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
get_console_log('info', name=__name__)
|
'''
|
||||||
|
Start and supervise a marketstore instance with its config bind-mounted
|
||||||
|
in from the piker config directory on the system.
|
||||||
|
|
||||||
|
The equivalent cli cmd to this code is:
|
||||||
|
|
||||||
|
sudo docker run --mount \
|
||||||
|
type=bind,source="$HOME/.config/piker/",target="/etc" -i -p \
|
||||||
|
5993:5993 alpacamarkets/marketstore:latest
|
||||||
|
|
||||||
|
'''
|
||||||
|
log = get_console_log('info', name=__name__)
|
||||||
|
|
||||||
async with open_docker() as client:
|
async with open_docker() as client:
|
||||||
|
|
||||||
# TODO: eventually offer a config-oriented API to do the mounts,
|
# create a mount from user's local piker config dir into container
|
||||||
# params, etc. passing to ``Containter.run()``?
|
config_dir_mnt = docker.types.Mount(
|
||||||
# call into endpoint for container config/init
|
target='/etc',
|
||||||
ep_func = NamespacePath(endpoint).load_ref()
|
source=config._config_dir,
|
||||||
(
|
type='bind',
|
||||||
dcntr,
|
)
|
||||||
cntr_config,
|
|
||||||
start_msg,
|
# create a user config subdir where the marketstore
|
||||||
stop_msg,
|
# backing filesystem database can be persisted.
|
||||||
) = ep_func(client)
|
persistent_data_dir = os.path.join(
|
||||||
|
config._config_dir, 'data',
|
||||||
|
)
|
||||||
|
if not os.path.isdir(persistent_data_dir):
|
||||||
|
os.mkdir(persistent_data_dir)
|
||||||
|
|
||||||
|
data_dir_mnt = docker.types.Mount(
|
||||||
|
target='/data',
|
||||||
|
source=persistent_data_dir,
|
||||||
|
type='bind',
|
||||||
|
)
|
||||||
|
|
||||||
|
dcntr: DockerContainer = client.containers.run(
|
||||||
|
'alpacamarkets/marketstore:latest',
|
||||||
|
# do we need this for cmds?
|
||||||
|
# '-i',
|
||||||
|
|
||||||
|
# '-p 5993:5993',
|
||||||
|
ports={
|
||||||
|
'5993/tcp': 5993, # jsonrpc
|
||||||
|
'5995/tcp': 5995, # grpc
|
||||||
|
},
|
||||||
|
mounts=[config_dir_mnt, data_dir_mnt],
|
||||||
|
detach=True,
|
||||||
|
# stop_signal='SIGINT',
|
||||||
|
init=True,
|
||||||
|
# remove=True,
|
||||||
|
)
|
||||||
cntr = Container(dcntr)
|
cntr = Container(dcntr)
|
||||||
|
|
||||||
with trio.move_on_after(1):
|
with trio.move_on_after(1):
|
||||||
found = await cntr.process_logs_until(start_msg)
|
found = await cntr.process_logs_until(
|
||||||
|
"launching tcp listener for all services...",
|
||||||
|
)
|
||||||
|
|
||||||
if not found and cntr not in client.containers.list():
|
if not found and cntr not in client.containers.list():
|
||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
'Failed to start `marketstore` check logs deats'
|
'Failed to start `marketstore` check logs deats'
|
||||||
)
|
)
|
||||||
|
|
||||||
await ctx.started((
|
await ctx.started((cntr.cntr.id, os.getpid()))
|
||||||
cntr.cntr.id,
|
|
||||||
os.getpid(),
|
# async with ctx.open_stream() as stream:
|
||||||
cntr_config,
|
|
||||||
))
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
||||||
|
@ -295,20 +344,41 @@ async def open_ahabd(
|
||||||
# callers to have root perms?
|
# callers to have root perms?
|
||||||
await trio.sleep_forever()
|
await trio.sleep_forever()
|
||||||
|
|
||||||
finally:
|
# await cntr.cancel()
|
||||||
|
# with trio.CancelScope(shield=True):
|
||||||
|
# # block for the expected "teardown log msg"..
|
||||||
|
# # await cntr.process_logs_until('exiting...',)
|
||||||
|
|
||||||
|
# # only msg should be to signal killing the
|
||||||
|
# # container and this super daemon.
|
||||||
|
# msg = await stream.receive()
|
||||||
|
# # print("GOT CANCEL MSG")
|
||||||
|
|
||||||
|
# cid = msg['cancel']
|
||||||
|
# log.cancel(f'Cancelling container {cid}')
|
||||||
|
|
||||||
|
# # print("CANCELLING CONTAINER")
|
||||||
|
# await cntr.cancel()
|
||||||
|
|
||||||
|
# # print("SENDING ACK")
|
||||||
|
# await stream.send('ack')
|
||||||
|
|
||||||
|
except (
|
||||||
|
BaseException,
|
||||||
|
# trio.Cancelled,
|
||||||
|
# KeyboardInterrupt,
|
||||||
|
):
|
||||||
|
|
||||||
with trio.CancelScope(shield=True):
|
with trio.CancelScope(shield=True):
|
||||||
await cntr.cancel(stop_msg)
|
await cntr.cancel()
|
||||||
|
# await stream.send('ack')
|
||||||
|
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
async def start_ahab(
|
async def start_ahab(
|
||||||
service_name: str,
|
service_name: str,
|
||||||
endpoint: Callable[docker.DockerClient, DockerContainer],
|
task_status: TaskStatus[trio.Event] = trio.TASK_STATUS_IGNORED,
|
||||||
task_status: TaskStatus[
|
|
||||||
tuple[
|
|
||||||
trio.Event,
|
|
||||||
dict[str, Any],
|
|
||||||
],
|
|
||||||
] = trio.TASK_STATUS_IGNORED,
|
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
|
@ -347,20 +417,24 @@ async def start_ahab(
|
||||||
)[2] # named user's uid
|
)[2] # named user's uid
|
||||||
)
|
)
|
||||||
|
|
||||||
|
task_status.started(cn_ready)
|
||||||
|
|
||||||
async with portal.open_context(
|
async with portal.open_context(
|
||||||
open_ahabd,
|
open_marketstored,
|
||||||
endpoint=str(NamespacePath.from_ref(endpoint)),
|
|
||||||
) as (ctx, first):
|
) as (ctx, first):
|
||||||
|
|
||||||
cid, pid, cntr_config = first
|
cid, pid = first
|
||||||
|
|
||||||
task_status.started((
|
|
||||||
cn_ready,
|
|
||||||
cntr_config,
|
|
||||||
(cid, pid),
|
|
||||||
))
|
|
||||||
|
|
||||||
await trio.sleep_forever()
|
await trio.sleep_forever()
|
||||||
|
# async with ctx.open_stream() as stream:
|
||||||
|
# try:
|
||||||
|
# # run till cancelled
|
||||||
|
# await trio.sleep_forever()
|
||||||
|
# finally:
|
||||||
|
# with trio.CancelScope(shield=True):
|
||||||
|
# # print('SENDING CANCEL TO MARKETSTORED')
|
||||||
|
# await stream.send({'cancel': (cid, pid)})
|
||||||
|
# assert await stream.receive() == 'ack'
|
||||||
|
|
||||||
# since we demoted root perms in this parent
|
# since we demoted root perms in this parent
|
||||||
# we'll get a perms error on proc cleanup in
|
# we'll get a perms error on proc cleanup in
|
||||||
|
@ -371,6 +445,7 @@ async def start_ahab(
|
||||||
# TODO: we could also consider adding
|
# TODO: we could also consider adding
|
||||||
# a ``tractor.ZombieDetected`` or something that we could raise
|
# a ``tractor.ZombieDetected`` or something that we could raise
|
||||||
# if we find the child didn't terminate.
|
# if we find the child didn't terminate.
|
||||||
|
# await tractor.breakpoint()
|
||||||
except PermissionError:
|
except PermissionError:
|
||||||
log.warning('Failed to cancel root permsed container')
|
log.warning('Failed to cancel root permsed container')
|
||||||
|
|
||||||
|
@ -383,3 +458,12 @@ async def start_ahab(
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
await start_ahab()
|
||||||
|
await trio.sleep_forever()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
trio.run(main)
|
||||||
|
|
|
@ -22,7 +22,7 @@ financial data flows.
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
import time
|
import time
|
||||||
from typing import TYPE_CHECKING, Optional, Union
|
from typing import TYPE_CHECKING, Optional
|
||||||
|
|
||||||
import tractor
|
import tractor
|
||||||
import trio
|
import trio
|
||||||
|
@ -32,7 +32,6 @@ from ..log import get_logger
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from ._sharedmem import ShmArray
|
from ._sharedmem import ShmArray
|
||||||
from .feed import _FeedsBus
|
|
||||||
|
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
@ -143,17 +142,11 @@ async def broadcast(
|
||||||
shm: Optional[ShmArray] = None,
|
shm: Optional[ShmArray] = None,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
# broadcast the buffer index step to any subscribers for
|
||||||
Broadcast the given ``shm: ShmArray``'s buffer index step to any
|
# a given sample period.
|
||||||
subscribers for a given sample period.
|
|
||||||
|
|
||||||
The sent msg will include the first and last index which slice into
|
|
||||||
the buffer's non-empty data.
|
|
||||||
|
|
||||||
'''
|
|
||||||
subs = sampler.subscribers.get(delay_s, ())
|
subs = sampler.subscribers.get(delay_s, ())
|
||||||
|
|
||||||
first = last = -1
|
last = -1
|
||||||
|
|
||||||
if shm is None:
|
if shm is None:
|
||||||
periods = sampler.ohlcv_shms.keys()
|
periods = sampler.ohlcv_shms.keys()
|
||||||
|
@ -163,16 +156,11 @@ async def broadcast(
|
||||||
if periods:
|
if periods:
|
||||||
lowest = min(periods)
|
lowest = min(periods)
|
||||||
shm = sampler.ohlcv_shms[lowest][0]
|
shm = sampler.ohlcv_shms[lowest][0]
|
||||||
first = shm._first.value
|
|
||||||
last = shm._last.value
|
last = shm._last.value
|
||||||
|
|
||||||
for stream in subs:
|
for stream in subs:
|
||||||
try:
|
try:
|
||||||
await stream.send({
|
await stream.send({'index': last})
|
||||||
'first': first,
|
|
||||||
'last': last,
|
|
||||||
'index': last,
|
|
||||||
})
|
|
||||||
except (
|
except (
|
||||||
trio.BrokenResourceError,
|
trio.BrokenResourceError,
|
||||||
trio.ClosedResourceError
|
trio.ClosedResourceError
|
||||||
|
@ -220,7 +208,7 @@ async def iter_ohlc_periods(
|
||||||
|
|
||||||
async def sample_and_broadcast(
|
async def sample_and_broadcast(
|
||||||
|
|
||||||
bus: _FeedsBus, # noqa
|
bus: '_FeedsBus', # noqa
|
||||||
shm: ShmArray,
|
shm: ShmArray,
|
||||||
quote_stream: trio.abc.ReceiveChannel,
|
quote_stream: trio.abc.ReceiveChannel,
|
||||||
brokername: str,
|
brokername: str,
|
||||||
|
@ -299,13 +287,7 @@ async def sample_and_broadcast(
|
||||||
# end up triggering backpressure which which will
|
# end up triggering backpressure which which will
|
||||||
# eventually block this producer end of the feed and
|
# eventually block this producer end of the feed and
|
||||||
# thus other consumers still attached.
|
# thus other consumers still attached.
|
||||||
subs: list[
|
subs = bus._subscribers[broker_symbol.lower()]
|
||||||
tuple[
|
|
||||||
Union[tractor.MsgStream, trio.MemorySendChannel],
|
|
||||||
tractor.Context,
|
|
||||||
Optional[float], # tick throttle in Hz
|
|
||||||
]
|
|
||||||
] = bus._subscribers[broker_symbol.lower()]
|
|
||||||
|
|
||||||
# NOTE: by default the broker backend doesn't append
|
# NOTE: by default the broker backend doesn't append
|
||||||
# it's own "name" into the fqsn schema (but maybe it
|
# it's own "name" into the fqsn schema (but maybe it
|
||||||
|
@ -314,7 +296,7 @@ async def sample_and_broadcast(
|
||||||
bsym = f'{broker_symbol}.{brokername}'
|
bsym = f'{broker_symbol}.{brokername}'
|
||||||
lags: int = 0
|
lags: int = 0
|
||||||
|
|
||||||
for (stream, ctx, tick_throttle) in subs:
|
for (stream, tick_throttle) in subs:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with trio.move_on_after(0.2) as cs:
|
with trio.move_on_after(0.2) as cs:
|
||||||
|
@ -326,41 +308,25 @@ async def sample_and_broadcast(
|
||||||
(bsym, quote)
|
(bsym, quote)
|
||||||
)
|
)
|
||||||
except trio.WouldBlock:
|
except trio.WouldBlock:
|
||||||
chan = ctx.chan
|
ctx = getattr(stream, '_ctx', None)
|
||||||
if ctx:
|
if ctx:
|
||||||
log.warning(
|
log.warning(
|
||||||
f'Feed overrun {bus.brokername} ->'
|
f'Feed overrun {bus.brokername} ->'
|
||||||
f'{chan.uid} !!!'
|
f'{ctx.channel.uid} !!!'
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
key = id(stream)
|
key = id(stream)
|
||||||
overruns[key] += 1
|
overruns[key] += 1
|
||||||
log.warning(
|
log.warning(
|
||||||
f'Feed overrun {broker_symbol}'
|
f'Feed overrun {bus.brokername} -> '
|
||||||
'@{bus.brokername} -> '
|
|
||||||
f'feed @ {tick_throttle} Hz'
|
f'feed @ {tick_throttle} Hz'
|
||||||
)
|
)
|
||||||
if overruns[key] > 6:
|
if overruns[key] > 6:
|
||||||
# TODO: should we check for the
|
|
||||||
# context being cancelled? this
|
|
||||||
# could happen but the
|
|
||||||
# channel-ipc-pipe is still up.
|
|
||||||
if not chan.connected():
|
|
||||||
log.warning(
|
log.warning(
|
||||||
'Dropping broken consumer:\n'
|
f'Dropping consumer {stream}'
|
||||||
f'{broker_symbol}:'
|
|
||||||
f'{ctx.cid}@{chan.uid}'
|
|
||||||
)
|
)
|
||||||
await stream.aclose()
|
await stream.aclose()
|
||||||
raise trio.BrokenResourceError
|
raise trio.BrokenResourceError
|
||||||
else:
|
|
||||||
log.warning(
|
|
||||||
'Feed getting overrun bro!\n'
|
|
||||||
f'{broker_symbol}:'
|
|
||||||
f'{ctx.cid}@{chan.uid}'
|
|
||||||
)
|
|
||||||
continue
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
await stream.send(
|
await stream.send(
|
||||||
{bsym: quote}
|
{bsym: quote}
|
||||||
|
@ -376,12 +342,11 @@ async def sample_and_broadcast(
|
||||||
trio.ClosedResourceError,
|
trio.ClosedResourceError,
|
||||||
trio.EndOfChannel,
|
trio.EndOfChannel,
|
||||||
):
|
):
|
||||||
chan = ctx.chan
|
ctx = getattr(stream, '_ctx', None)
|
||||||
if ctx:
|
if ctx:
|
||||||
log.warning(
|
log.warning(
|
||||||
'Dropped `brokerd`-quotes-feed connection:\n'
|
f'{ctx.chan.uid} dropped '
|
||||||
f'{broker_symbol}:'
|
'`brokerd`-quotes-feed connection'
|
||||||
f'{ctx.cid}@{chan.uid}'
|
|
||||||
)
|
)
|
||||||
if tick_throttle:
|
if tick_throttle:
|
||||||
assert stream._closed
|
assert stream._closed
|
||||||
|
@ -394,11 +359,7 @@ async def sample_and_broadcast(
|
||||||
try:
|
try:
|
||||||
subs.remove((stream, tick_throttle))
|
subs.remove((stream, tick_throttle))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
log.error(
|
log.error(f'{stream} was already removed from subs!?')
|
||||||
f'Stream was already removed from subs!?\n'
|
|
||||||
f'{broker_symbol}:'
|
|
||||||
f'{ctx.cid}@{chan.uid}'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: a less naive throttler, here's some snippets:
|
# TODO: a less naive throttler, here's some snippets:
|
||||||
|
@ -510,7 +471,6 @@ async def uniform_rate_send(
|
||||||
# if the feed consumer goes down then drop
|
# if the feed consumer goes down then drop
|
||||||
# out of this rate limiter
|
# out of this rate limiter
|
||||||
log.warning(f'{stream} closed')
|
log.warning(f'{stream} closed')
|
||||||
await stream.aclose()
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# reset send cycle state
|
# reset send cycle state
|
||||||
|
|
|
@ -20,7 +20,6 @@ NumPy compatible shared memory buffers for real-time IPC streaming.
|
||||||
"""
|
"""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
from sys import byteorder
|
from sys import byteorder
|
||||||
import time
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from multiprocessing.shared_memory import SharedMemory, _USE_POSIX
|
from multiprocessing.shared_memory import SharedMemory, _USE_POSIX
|
||||||
|
|
||||||
|
@ -99,12 +98,7 @@ class SharedInt:
|
||||||
if _USE_POSIX:
|
if _USE_POSIX:
|
||||||
# We manually unlink to bypass all the "resource tracker"
|
# We manually unlink to bypass all the "resource tracker"
|
||||||
# nonsense meant for non-SC systems.
|
# nonsense meant for non-SC systems.
|
||||||
name = self._shm.name
|
shm_unlink(self._shm.name)
|
||||||
try:
|
|
||||||
shm_unlink(name)
|
|
||||||
except FileNotFoundError:
|
|
||||||
# might be a teardown race here?
|
|
||||||
log.warning(f'Shm for {name} already unlinked?')
|
|
||||||
|
|
||||||
|
|
||||||
class _Token(BaseModel):
|
class _Token(BaseModel):
|
||||||
|
@ -542,26 +536,8 @@ def attach_shm_array(
|
||||||
if key in _known_tokens:
|
if key in _known_tokens:
|
||||||
assert _Token.from_msg(_known_tokens[key]) == token, "WTF"
|
assert _Token.from_msg(_known_tokens[key]) == token, "WTF"
|
||||||
|
|
||||||
# XXX: ugh, looks like due to the ``shm_open()`` C api we can't
|
|
||||||
# actually place files in a subdir, see discussion here:
|
|
||||||
# https://stackoverflow.com/a/11103289
|
|
||||||
|
|
||||||
# attach to array buffer and view as per dtype
|
# attach to array buffer and view as per dtype
|
||||||
_err: Optional[Exception] = None
|
shm = SharedMemory(name=key)
|
||||||
for _ in range(3):
|
|
||||||
try:
|
|
||||||
shm = SharedMemory(
|
|
||||||
name=key,
|
|
||||||
create=False,
|
|
||||||
)
|
|
||||||
break
|
|
||||||
except OSError as oserr:
|
|
||||||
_err = oserr
|
|
||||||
time.sleep(0.1)
|
|
||||||
else:
|
|
||||||
if _err:
|
|
||||||
raise _err
|
|
||||||
|
|
||||||
shmarr = np.ndarray(
|
shmarr = np.ndarray(
|
||||||
(size,),
|
(size,),
|
||||||
dtype=token.dtype,
|
dtype=token.dtype,
|
||||||
|
|
|
@ -33,7 +33,7 @@ ohlc_fields = [
|
||||||
('high', float),
|
('high', float),
|
||||||
('low', float),
|
('low', float),
|
||||||
('close', float),
|
('close', float),
|
||||||
('volume', float),
|
('volume', int),
|
||||||
('bar_wap', float),
|
('bar_wap', float),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,6 @@ from dataclasses import dataclass, field
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from contextlib import asynccontextmanager
|
from contextlib import asynccontextmanager
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from pprint import pformat
|
|
||||||
from types import ModuleType
|
from types import ModuleType
|
||||||
from typing import (
|
from typing import (
|
||||||
Any,
|
Any,
|
||||||
|
@ -33,7 +32,6 @@ from typing import (
|
||||||
Generator,
|
Generator,
|
||||||
Awaitable,
|
Awaitable,
|
||||||
TYPE_CHECKING,
|
TYPE_CHECKING,
|
||||||
Union,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
import trio
|
import trio
|
||||||
|
@ -41,13 +39,12 @@ from trio.abc import ReceiveChannel
|
||||||
from trio_typing import TaskStatus
|
from trio_typing import TaskStatus
|
||||||
import trimeter
|
import trimeter
|
||||||
import tractor
|
import tractor
|
||||||
from tractor.trionics import maybe_open_context
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
import pendulum
|
import pendulum
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from ..brokers import get_brokermod
|
from ..brokers import get_brokermod
|
||||||
from ..calc import humanize
|
from .._cacheables import maybe_open_context
|
||||||
from ..log import get_logger, get_console_log
|
from ..log import get_logger, get_console_log
|
||||||
from .._daemon import (
|
from .._daemon import (
|
||||||
maybe_spawn_brokerd,
|
maybe_spawn_brokerd,
|
||||||
|
@ -117,13 +114,7 @@ class _FeedsBus(BaseModel):
|
||||||
# https://github.com/samuelcolvin/pydantic/issues/2816
|
# https://github.com/samuelcolvin/pydantic/issues/2816
|
||||||
_subscribers: dict[
|
_subscribers: dict[
|
||||||
str,
|
str,
|
||||||
list[
|
list[tuple[tractor.MsgStream, Optional[float]]]
|
||||||
tuple[
|
|
||||||
Union[tractor.MsgStream, trio.MemorySendChannel],
|
|
||||||
tractor.Context,
|
|
||||||
Optional[float], # tick throttle in Hz
|
|
||||||
]
|
|
||||||
]
|
|
||||||
] = {}
|
] = {}
|
||||||
|
|
||||||
async def start_task(
|
async def start_task(
|
||||||
|
@ -230,19 +221,12 @@ def diff_history(
|
||||||
# write to shm.
|
# write to shm.
|
||||||
if (
|
if (
|
||||||
s_diff < 0
|
s_diff < 0
|
||||||
|
and abs(s_diff) < len(array)
|
||||||
):
|
):
|
||||||
if abs(s_diff) < len(array):
|
|
||||||
# the + 1 is because ``last_tsdb_dt`` is pulled from
|
# the + 1 is because ``last_tsdb_dt`` is pulled from
|
||||||
# the last row entry for the ``'time'`` field retreived
|
# the last row entry for the ``'time'`` field retreived
|
||||||
# from the tsdb.
|
# from the tsdb.
|
||||||
to_push = array[abs(s_diff) + 1:]
|
to_push = array[abs(s_diff)+1:]
|
||||||
|
|
||||||
else:
|
|
||||||
# pass back only the portion of the array that is
|
|
||||||
# greater then the last time stamp in the tsdb.
|
|
||||||
time = array['time']
|
|
||||||
to_push = array[time >= last_tsdb_dt.timestamp()]
|
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
f'Pushing partial frame {to_push.size} to shm'
|
f'Pushing partial frame {to_push.size} to shm'
|
||||||
)
|
)
|
||||||
|
@ -257,8 +241,7 @@ async def start_backfill(
|
||||||
|
|
||||||
last_tsdb_dt: Optional[datetime] = None,
|
last_tsdb_dt: Optional[datetime] = None,
|
||||||
storage: Optional[Storage] = None,
|
storage: Optional[Storage] = None,
|
||||||
write_tsdb: bool = True,
|
write_tsdb: bool = False,
|
||||||
tsdb_is_up: bool = False,
|
|
||||||
|
|
||||||
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
|
task_status: TaskStatus[trio.CancelScope] = trio.TASK_STATUS_IGNORED,
|
||||||
|
|
||||||
|
@ -274,8 +257,8 @@ async def start_backfill(
|
||||||
|
|
||||||
# sample period step size in seconds
|
# sample period step size in seconds
|
||||||
step_size_s = (
|
step_size_s = (
|
||||||
pendulum.from_timestamp(times[-1])
|
pendulum.from_timestamp(times[-1]) -
|
||||||
- pendulum.from_timestamp(times[-2])
|
pendulum.from_timestamp(times[-2])
|
||||||
).seconds
|
).seconds
|
||||||
|
|
||||||
# "frame"'s worth of sample period steps in seconds
|
# "frame"'s worth of sample period steps in seconds
|
||||||
|
@ -300,56 +283,48 @@ async def start_backfill(
|
||||||
# let caller unblock and deliver latest history frame
|
# let caller unblock and deliver latest history frame
|
||||||
task_status.started((shm, start_dt, end_dt, bf_done))
|
task_status.started((shm, start_dt, end_dt, bf_done))
|
||||||
|
|
||||||
# based on the sample step size, maybe load a certain amount history
|
|
||||||
if last_tsdb_dt is None:
|
if last_tsdb_dt is None:
|
||||||
if step_size_s not in (1, 60):
|
# maybe a better default (they don't seem to define epoch?!)
|
||||||
|
|
||||||
|
# based on the sample step size load a certain amount
|
||||||
|
# history
|
||||||
|
if step_size_s == 1:
|
||||||
|
last_tsdb_dt = pendulum.now().subtract(days=2)
|
||||||
|
|
||||||
|
elif step_size_s == 60:
|
||||||
|
last_tsdb_dt = pendulum.now().subtract(years=2)
|
||||||
|
|
||||||
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
'`piker` only needs to support 1m and 1s sampling '
|
'`piker` only needs to support 1m and 1s sampling '
|
||||||
'but ur api is trying to deliver a longer '
|
'but ur api is trying to deliver a longer '
|
||||||
f'timeframe of {step_size_s} ' 'seconds.. so ye, dun '
|
f'timeframe of {step_size_s} ' 'seconds.. so ye, dun '
|
||||||
'do dat brudder.'
|
'do dat bruh.'
|
||||||
)
|
)
|
||||||
|
|
||||||
# when no tsdb "last datum" is provided, we just load
|
|
||||||
# some near-term history.
|
|
||||||
periods = {
|
|
||||||
1: {'days': 1},
|
|
||||||
60: {'days': 14},
|
|
||||||
}
|
|
||||||
|
|
||||||
if tsdb_is_up:
|
|
||||||
# do a decently sized backfill and load it into storage.
|
|
||||||
periods = {
|
|
||||||
1: {'days': 6},
|
|
||||||
60: {'years': 2},
|
|
||||||
}
|
|
||||||
|
|
||||||
kwargs = periods[step_size_s]
|
|
||||||
last_tsdb_dt = start_dt.subtract(**kwargs)
|
|
||||||
|
|
||||||
# configure async query throttling
|
# configure async query throttling
|
||||||
erlangs = config.get('erlangs', 1)
|
erlangs = config.get('erlangs', 1)
|
||||||
rate = config.get('rate', 1)
|
rate = config.get('rate', 1)
|
||||||
frames = {}
|
frames = {}
|
||||||
|
|
||||||
def iter_dts(start: datetime):
|
def iter_dts(start: datetime):
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
hist_period = pendulum.period(
|
hist_period = pendulum.period(
|
||||||
start,
|
start.subtract(seconds=step_size_s),
|
||||||
last_tsdb_dt,
|
last_tsdb_dt,
|
||||||
)
|
)
|
||||||
dtrange = list(hist_period.range('seconds', frame_size_s))
|
dtrange = hist_period.range('seconds', frame_size_s)
|
||||||
log.debug(f'New datetime index:\n{pformat(dtrange)}')
|
|
||||||
|
|
||||||
for end_dt in dtrange:
|
for end_dt in dtrange:
|
||||||
log.info(f'Yielding next frame start {end_dt}')
|
log.warning(f'Yielding next frame start {end_dt}')
|
||||||
start = yield end_dt
|
start = yield end_dt
|
||||||
|
|
||||||
# if caller sends a new start date, reset to that
|
# if caller sends a new start date, reset to that
|
||||||
if start is not None:
|
if start is not None:
|
||||||
log.warning(f'Resetting date range: {start}')
|
log.warning(f'Resetting date range: {start}')
|
||||||
|
# import pdbpp
|
||||||
|
# pdbpp.set_trace()
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
# from while
|
# from while
|
||||||
|
@ -534,74 +509,27 @@ async def start_backfill(
|
||||||
|
|
||||||
epochs = list(reversed(sorted(frames)))
|
epochs = list(reversed(sorted(frames)))
|
||||||
for epoch in epochs:
|
for epoch in epochs:
|
||||||
|
|
||||||
start = shm.array['time'][0]
|
start = shm.array['time'][0]
|
||||||
last_shm_prepend_dt = pendulum.from_timestamp(start)
|
|
||||||
earliest_frame_queue_dt = pendulum.from_timestamp(epoch)
|
|
||||||
|
|
||||||
diff = start - epoch
|
diff = epoch - start
|
||||||
|
if abs(diff) > step_size_s:
|
||||||
|
|
||||||
if diff < 0:
|
if earliest_end_dt < end_dt:
|
||||||
log.warning(
|
|
||||||
'Discarding out of order frame:\n'
|
|
||||||
f'{earliest_frame_queue_dt}'
|
|
||||||
)
|
|
||||||
frames.pop(epoch)
|
|
||||||
continue
|
|
||||||
# await tractor.breakpoint()
|
|
||||||
|
|
||||||
if diff > step_size_s:
|
|
||||||
|
|
||||||
if earliest_end_dt < earliest_frame_queue_dt:
|
|
||||||
# XXX: an expected gap was encountered (see
|
# XXX: an expected gap was encountered (see
|
||||||
# logic in ``get_ohlc_frame()``, so allow
|
# logic in ``get_ohlc_frame()``, so allow
|
||||||
# this frame through to the storage layer.
|
# this frame through to the storage layer.
|
||||||
log.warning(
|
log.warning(
|
||||||
f'Expected history gap of {diff}s:\n'
|
f'there is an expected history gap of {diff}s:'
|
||||||
f'{earliest_frame_queue_dt} <- '
|
|
||||||
f'{earliest_end_dt}'
|
|
||||||
)
|
)
|
||||||
|
|
||||||
elif (
|
elif (
|
||||||
erlangs > 1
|
erlangs > 1
|
||||||
|
and len(epochs) < erlangs
|
||||||
):
|
):
|
||||||
# we don't yet have the next frame to push
|
# we don't yet have the next frame to push
|
||||||
# so break back to the async request loop
|
# so break back to the async request loop
|
||||||
# while we wait for more async frame-results
|
# while we wait for more async frame-results
|
||||||
# to arrive.
|
# to arrive.
|
||||||
if len(frames) >= erlangs:
|
|
||||||
log.warning(
|
|
||||||
'Frame count in async-queue is greater '
|
|
||||||
'then erlangs?\n'
|
|
||||||
'There seems to be a gap between:\n'
|
|
||||||
f'{earliest_frame_queue_dt} <- '
|
|
||||||
f'{last_shm_prepend_dt}\n'
|
|
||||||
'Conducting manual call for frame ending: '
|
|
||||||
f'{last_shm_prepend_dt}'
|
|
||||||
)
|
|
||||||
(
|
|
||||||
to_push,
|
|
||||||
start_dt,
|
|
||||||
end_dt,
|
|
||||||
) = await get_ohlc_frame(
|
|
||||||
input_end_dt=last_shm_prepend_dt,
|
|
||||||
iter_dts_gen=idts,
|
|
||||||
)
|
|
||||||
last_epoch = to_push['time'][-1]
|
|
||||||
diff = start - last_epoch
|
|
||||||
|
|
||||||
if diff > step_size_s:
|
|
||||||
await tractor.breakpoint()
|
|
||||||
raise DataUnavailable(
|
|
||||||
'An awkward frame was found:\n'
|
|
||||||
f'{start_dt} -> {end_dt}:\n{to_push}'
|
|
||||||
)
|
|
||||||
|
|
||||||
else:
|
|
||||||
frames[last_epoch] = (
|
|
||||||
to_push, start_dt, end_dt)
|
|
||||||
break
|
|
||||||
|
|
||||||
expect_end = pendulum.from_timestamp(start)
|
expect_end = pendulum.from_timestamp(start)
|
||||||
expect_start = expect_end.subtract(
|
expect_start = expect_end.subtract(
|
||||||
seconds=frame_size_s)
|
seconds=frame_size_s)
|
||||||
|
@ -700,7 +628,6 @@ async def manage_history(
|
||||||
|
|
||||||
bfqsn = fqsn.replace('.' + mod.name, '')
|
bfqsn = fqsn.replace('.' + mod.name, '')
|
||||||
open_history_client = getattr(mod, 'open_history_client', None)
|
open_history_client = getattr(mod, 'open_history_client', None)
|
||||||
assert open_history_client
|
|
||||||
|
|
||||||
if is_up and opened and open_history_client:
|
if is_up and opened and open_history_client:
|
||||||
|
|
||||||
|
@ -729,7 +656,6 @@ async def manage_history(
|
||||||
bfqsn,
|
bfqsn,
|
||||||
shm,
|
shm,
|
||||||
last_tsdb_dt=last_tsdb_dt,
|
last_tsdb_dt=last_tsdb_dt,
|
||||||
tsdb_is_up=True,
|
|
||||||
storage=storage,
|
storage=storage,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -813,15 +739,6 @@ async def manage_history(
|
||||||
|
|
||||||
# manually trigger step update to update charts/fsps
|
# manually trigger step update to update charts/fsps
|
||||||
# which need an incremental update.
|
# which need an incremental update.
|
||||||
# NOTE: the way this works is super duper
|
|
||||||
# un-intuitive right now:
|
|
||||||
# - the broadcaster fires a msg to the fsp subsystem.
|
|
||||||
# - fsp subsys then checks for a sample step diff and
|
|
||||||
# possibly recomputes prepended history.
|
|
||||||
# - the fsp then sends back to the parent actor
|
|
||||||
# (usually a chart showing graphics for said fsp)
|
|
||||||
# which tells the chart to conduct a manual full
|
|
||||||
# graphics loop cycle.
|
|
||||||
for delay_s in sampler.subscribers:
|
for delay_s in sampler.subscribers:
|
||||||
await broadcast(delay_s)
|
await broadcast(delay_s)
|
||||||
|
|
||||||
|
@ -1125,10 +1042,10 @@ async def open_feed_bus(
|
||||||
recv,
|
recv,
|
||||||
stream,
|
stream,
|
||||||
)
|
)
|
||||||
sub = (send, ctx, tick_throttle)
|
sub = (send, tick_throttle)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
sub = (stream, ctx, tick_throttle)
|
sub = (stream, tick_throttle)
|
||||||
|
|
||||||
subs = bus._subscribers[bfqsn]
|
subs = bus._subscribers[bfqsn]
|
||||||
subs.append(sub)
|
subs.append(sub)
|
||||||
|
@ -1211,10 +1128,10 @@ class Feed:
|
||||||
shm: ShmArray
|
shm: ShmArray
|
||||||
mod: ModuleType
|
mod: ModuleType
|
||||||
first_quotes: dict # symbol names to first quote dicts
|
first_quotes: dict # symbol names to first quote dicts
|
||||||
_portal: tractor.Portal
|
|
||||||
stream: trio.abc.ReceiveChannel[dict[str, Any]]
|
|
||||||
status: dict[str, Any]
|
|
||||||
|
|
||||||
|
_portal: tractor.Portal
|
||||||
|
|
||||||
|
stream: trio.abc.ReceiveChannel[dict[str, Any]]
|
||||||
throttle_rate: Optional[int] = None
|
throttle_rate: Optional[int] = None
|
||||||
|
|
||||||
_trade_stream: Optional[AsyncIterator[dict[str, Any]]] = None
|
_trade_stream: Optional[AsyncIterator[dict[str, Any]]] = None
|
||||||
|
@ -1355,24 +1272,9 @@ async def open_feed(
|
||||||
first_quotes=first_quotes,
|
first_quotes=first_quotes,
|
||||||
stream=stream,
|
stream=stream,
|
||||||
_portal=portal,
|
_portal=portal,
|
||||||
status={},
|
|
||||||
throttle_rate=tick_throttle,
|
throttle_rate=tick_throttle,
|
||||||
)
|
)
|
||||||
|
|
||||||
# fill out "status info" that the UI can show
|
|
||||||
host, port = feed.portal.channel.raddr
|
|
||||||
if host == '127.0.0.1':
|
|
||||||
host = 'localhost'
|
|
||||||
|
|
||||||
feed.status.update({
|
|
||||||
'actor_name': feed.portal.channel.uid[0],
|
|
||||||
'host': host,
|
|
||||||
'port': port,
|
|
||||||
'shm': f'{humanize(feed.shm._shm.size)}',
|
|
||||||
'throttle_rate': feed.throttle_rate,
|
|
||||||
})
|
|
||||||
feed.status.update(init_msg.pop('status', {}))
|
|
||||||
|
|
||||||
for sym, data in init_msg.items():
|
for sym, data in init_msg.items():
|
||||||
si = data['symbol_info']
|
si = data['symbol_info']
|
||||||
fqsn = data['fqsn'] + f'.{brokername}'
|
fqsn = data['fqsn'] + f'.{brokername}'
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
- todo: tick sequence stream-cloning for testing
|
- todo: tick sequence stream-cloning for testing
|
||||||
|
|
||||||
'''
|
'''
|
||||||
from __future__ import annotations
|
|
||||||
from contextlib import asynccontextmanager as acm
|
from contextlib import asynccontextmanager as acm
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from pprint import pformat
|
from pprint import pformat
|
||||||
|
@ -31,7 +30,6 @@ from typing import (
|
||||||
Any,
|
Any,
|
||||||
Optional,
|
Optional,
|
||||||
Union,
|
Union,
|
||||||
TYPE_CHECKING,
|
|
||||||
)
|
)
|
||||||
import time
|
import time
|
||||||
from math import isnan
|
from math import isnan
|
||||||
|
@ -40,6 +38,7 @@ from bidict import bidict
|
||||||
import msgpack
|
import msgpack
|
||||||
import pyqtgraph as pg
|
import pyqtgraph as pg
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
import tractor
|
import tractor
|
||||||
from trio_websocket import open_websocket_url
|
from trio_websocket import open_websocket_url
|
||||||
from anyio_marketstore import (
|
from anyio_marketstore import (
|
||||||
|
@ -50,151 +49,12 @@ from anyio_marketstore import (
|
||||||
import pendulum
|
import pendulum
|
||||||
import purerpc
|
import purerpc
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
import docker
|
|
||||||
from ._ahab import DockerContainer
|
|
||||||
|
|
||||||
from .feed import maybe_open_feed
|
from .feed import maybe_open_feed
|
||||||
from ..log import get_logger, get_console_log
|
from ..log import get_logger, get_console_log
|
||||||
|
|
||||||
|
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# container level config
|
|
||||||
_config = {
|
|
||||||
'grpc_listen_port': 5995,
|
|
||||||
'ws_listen_port': 5993,
|
|
||||||
'log_level': 'debug',
|
|
||||||
}
|
|
||||||
|
|
||||||
_yaml_config = '''
|
|
||||||
# piker's ``marketstore`` config.
|
|
||||||
|
|
||||||
# mount this config using:
|
|
||||||
# sudo docker run --mount \
|
|
||||||
# type=bind,source="$HOME/.config/piker/",target="/etc" -i -p \
|
|
||||||
# 5993:5993 alpacamarkets/marketstore:latest
|
|
||||||
|
|
||||||
root_directory: data
|
|
||||||
listen_port: {ws_listen_port}
|
|
||||||
grpc_listen_port: {grpc_listen_port}
|
|
||||||
log_level: {log_level}
|
|
||||||
queryable: true
|
|
||||||
stop_grace_period: 0
|
|
||||||
wal_rotate_interval: 5
|
|
||||||
stale_threshold: 5
|
|
||||||
enable_add: true
|
|
||||||
enable_remove: false
|
|
||||||
|
|
||||||
triggers:
|
|
||||||
- module: ondiskagg.so
|
|
||||||
on: "*/1Sec/OHLCV"
|
|
||||||
config:
|
|
||||||
# filter: "nasdaq"
|
|
||||||
destinations:
|
|
||||||
- 1Min
|
|
||||||
- 5Min
|
|
||||||
- 15Min
|
|
||||||
- 1H
|
|
||||||
- 1D
|
|
||||||
|
|
||||||
- module: stream.so
|
|
||||||
on: '*/*/*'
|
|
||||||
# config:
|
|
||||||
# filter: "nasdaq"
|
|
||||||
|
|
||||||
'''.format(**_config)
|
|
||||||
|
|
||||||
|
|
||||||
def start_marketstore(
|
|
||||||
client: docker.DockerClient,
|
|
||||||
|
|
||||||
**kwargs,
|
|
||||||
|
|
||||||
) -> tuple[DockerContainer, dict[str, Any]]:
|
|
||||||
'''
|
|
||||||
Start and supervise a marketstore instance with its config bind-mounted
|
|
||||||
in from the piker config directory on the system.
|
|
||||||
|
|
||||||
The equivalent cli cmd to this code is:
|
|
||||||
|
|
||||||
sudo docker run --mount \
|
|
||||||
type=bind,source="$HOME/.config/piker/",target="/etc" -i -p \
|
|
||||||
5993:5993 alpacamarkets/marketstore:latest
|
|
||||||
|
|
||||||
'''
|
|
||||||
import os
|
|
||||||
import docker
|
|
||||||
from .. import config
|
|
||||||
get_console_log('info', name=__name__)
|
|
||||||
|
|
||||||
mktsdir = os.path.join(config._config_dir, 'marketstore')
|
|
||||||
|
|
||||||
# create when dne
|
|
||||||
if not os.path.isdir(mktsdir):
|
|
||||||
os.mkdir(mktsdir)
|
|
||||||
|
|
||||||
yml_file = os.path.join(mktsdir, 'mkts.yml')
|
|
||||||
if not os.path.isfile(yml_file):
|
|
||||||
log.warning(
|
|
||||||
f'No `marketstore` config exists?: {yml_file}\n'
|
|
||||||
'Generating new file from template:\n'
|
|
||||||
f'{_yaml_config}\n'
|
|
||||||
)
|
|
||||||
with open(yml_file, 'w') as yf:
|
|
||||||
yf.write(_yaml_config)
|
|
||||||
|
|
||||||
# create a mount from user's local piker config dir into container
|
|
||||||
config_dir_mnt = docker.types.Mount(
|
|
||||||
target='/etc',
|
|
||||||
source=mktsdir,
|
|
||||||
type='bind',
|
|
||||||
)
|
|
||||||
|
|
||||||
# create a user config subdir where the marketstore
|
|
||||||
# backing filesystem database can be persisted.
|
|
||||||
persistent_data_dir = os.path.join(
|
|
||||||
mktsdir, 'data',
|
|
||||||
)
|
|
||||||
if not os.path.isdir(persistent_data_dir):
|
|
||||||
os.mkdir(persistent_data_dir)
|
|
||||||
|
|
||||||
data_dir_mnt = docker.types.Mount(
|
|
||||||
target='/data',
|
|
||||||
source=persistent_data_dir,
|
|
||||||
type='bind',
|
|
||||||
)
|
|
||||||
|
|
||||||
dcntr: DockerContainer = client.containers.run(
|
|
||||||
'alpacamarkets/marketstore:latest',
|
|
||||||
# do we need this for cmds?
|
|
||||||
# '-i',
|
|
||||||
|
|
||||||
# '-p 5993:5993',
|
|
||||||
ports={
|
|
||||||
'5993/tcp': 5993, # jsonrpc / ws?
|
|
||||||
'5995/tcp': 5995, # grpc
|
|
||||||
},
|
|
||||||
mounts=[
|
|
||||||
config_dir_mnt,
|
|
||||||
data_dir_mnt,
|
|
||||||
],
|
|
||||||
detach=True,
|
|
||||||
# stop_signal='SIGINT',
|
|
||||||
init=True,
|
|
||||||
# remove=True,
|
|
||||||
)
|
|
||||||
return (
|
|
||||||
dcntr,
|
|
||||||
_config,
|
|
||||||
|
|
||||||
# expected startup and stop msgs
|
|
||||||
"launching tcp listener for all services...",
|
|
||||||
"exiting...",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
_tick_tbk_ids: tuple[str, str] = ('1Sec', 'TICK')
|
_tick_tbk_ids: tuple[str, str] = ('1Sec', 'TICK')
|
||||||
_tick_tbk: str = '{}/' + '/'.join(_tick_tbk_ids)
|
_tick_tbk: str = '{}/' + '/'.join(_tick_tbk_ids)
|
||||||
|
|
||||||
|
@ -242,8 +102,8 @@ _ohlcv_dt = [
|
||||||
# ohlcv sampling
|
# ohlcv sampling
|
||||||
('Open', 'f4'),
|
('Open', 'f4'),
|
||||||
('High', 'f4'),
|
('High', 'f4'),
|
||||||
('Low', 'f4'),
|
('Low', 'i8'),
|
||||||
('Close', 'f4'),
|
('Close', 'i8'),
|
||||||
('Volume', 'f4'),
|
('Volume', 'f4'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -279,7 +139,7 @@ def quote_to_marketstore_structarray(
|
||||||
'''
|
'''
|
||||||
if last_fill:
|
if last_fill:
|
||||||
# new fill bby
|
# new fill bby
|
||||||
now = int(pendulum.parse(last_fill).timestamp)
|
now = timestamp(last_fill)
|
||||||
else:
|
else:
|
||||||
# this should get inserted upstream by the broker-client to
|
# this should get inserted upstream by the broker-client to
|
||||||
# subtract from IPC latency
|
# subtract from IPC latency
|
||||||
|
@ -309,6 +169,15 @@ def quote_to_marketstore_structarray(
|
||||||
return np.array([tuple(array_input)], dtype=_quote_dt)
|
return np.array([tuple(array_input)], dtype=_quote_dt)
|
||||||
|
|
||||||
|
|
||||||
|
def timestamp(date, **kwargs) -> int:
|
||||||
|
'''
|
||||||
|
Return marketstore compatible 'Epoch' integer in nanoseconds
|
||||||
|
from a date formatted str.
|
||||||
|
|
||||||
|
'''
|
||||||
|
return int(pd.Timestamp(date, **kwargs).value)
|
||||||
|
|
||||||
|
|
||||||
@acm
|
@acm
|
||||||
async def get_client(
|
async def get_client(
|
||||||
host: str = 'localhost',
|
host: str = 'localhost',
|
||||||
|
@ -395,12 +264,7 @@ class Storage:
|
||||||
]:
|
]:
|
||||||
|
|
||||||
first_tsdb_dt, last_tsdb_dt = None, None
|
first_tsdb_dt, last_tsdb_dt = None, None
|
||||||
tsdb_arrays = await self.read_ohlcv(
|
tsdb_arrays = await self.read_ohlcv(fqsn)
|
||||||
fqsn,
|
|
||||||
# on first load we don't need to pull the max
|
|
||||||
# history per request size worth.
|
|
||||||
limit=3000,
|
|
||||||
)
|
|
||||||
log.info(f'Loaded tsdb history {tsdb_arrays}')
|
log.info(f'Loaded tsdb history {tsdb_arrays}')
|
||||||
|
|
||||||
if tsdb_arrays:
|
if tsdb_arrays:
|
||||||
|
@ -418,7 +282,6 @@ class Storage:
|
||||||
fqsn: str,
|
fqsn: str,
|
||||||
timeframe: Optional[Union[int, str]] = None,
|
timeframe: Optional[Union[int, str]] = None,
|
||||||
end: Optional[int] = None,
|
end: Optional[int] = None,
|
||||||
limit: int = int(800e3),
|
|
||||||
|
|
||||||
) -> tuple[
|
) -> tuple[
|
||||||
MarketstoreClient,
|
MarketstoreClient,
|
||||||
|
@ -441,7 +304,7 @@ class Storage:
|
||||||
|
|
||||||
# TODO: figure the max limit here given the
|
# TODO: figure the max limit here given the
|
||||||
# ``purepc`` msg size limit of purerpc: 33554432
|
# ``purepc`` msg size limit of purerpc: 33554432
|
||||||
limit=limit,
|
limit=int(800e3),
|
||||||
)
|
)
|
||||||
|
|
||||||
if timeframe is None:
|
if timeframe is None:
|
||||||
|
@ -565,17 +428,6 @@ class Storage:
|
||||||
if err:
|
if err:
|
||||||
raise MarketStoreError(err)
|
raise MarketStoreError(err)
|
||||||
|
|
||||||
# XXX: currently the only way to do this is through the CLI:
|
|
||||||
|
|
||||||
# sudo ./marketstore connect --dir ~/.config/piker/data
|
|
||||||
# >> \show mnq.globex.20220617.ib/1Sec/OHLCV 2022-05-15
|
|
||||||
# and this seems to block and use up mem..
|
|
||||||
# >> \trim mnq.globex.20220617.ib/1Sec/OHLCV 2022-05-15
|
|
||||||
|
|
||||||
# relevant source code for this is here:
|
|
||||||
# https://github.com/alpacahq/marketstore/blob/master/cmd/connect/session/trim.go#L14
|
|
||||||
# def delete_range(self, start_dt, end_dt) -> None:
|
|
||||||
# ...
|
|
||||||
|
|
||||||
@acm
|
@acm
|
||||||
async def open_storage_client(
|
async def open_storage_client(
|
||||||
|
@ -642,6 +494,7 @@ async def tsdb_history_update(
|
||||||
):
|
):
|
||||||
profiler(f'opened feed for {fqsn}')
|
profiler(f'opened feed for {fqsn}')
|
||||||
|
|
||||||
|
|
||||||
to_append = feed.shm.array
|
to_append = feed.shm.array
|
||||||
to_prepend = None
|
to_prepend = None
|
||||||
|
|
||||||
|
@ -657,13 +510,9 @@ async def tsdb_history_update(
|
||||||
tsdb_arrays = await storage.read_ohlcv(fqsn)
|
tsdb_arrays = await storage.read_ohlcv(fqsn)
|
||||||
# hist diffing
|
# hist diffing
|
||||||
if tsdb_arrays:
|
if tsdb_arrays:
|
||||||
for secs in (1, 60):
|
onesec = tsdb_arrays[1]
|
||||||
ts = tsdb_arrays.get(secs)
|
to_append = ohlcv[ohlcv['time'] > onesec['Epoch'][-1]]
|
||||||
if ts is not None and len(ts):
|
to_prepend = ohlcv[ohlcv['time'] < onesec['Epoch'][0]]
|
||||||
# these aren't currently used but can be referenced from
|
|
||||||
# within the embedded ipython shell below.
|
|
||||||
to_append = ohlcv[ohlcv['time'] > ts['Epoch'][-1]]
|
|
||||||
to_prepend = ohlcv[ohlcv['time'] < ts['Epoch'][0]]
|
|
||||||
|
|
||||||
profiler('Finished db arrays diffs')
|
profiler('Finished db arrays diffs')
|
||||||
|
|
||||||
|
|
|
@ -361,7 +361,7 @@ async def cascade(
|
||||||
) -> tuple[TaskTracker, int]:
|
) -> tuple[TaskTracker, int]:
|
||||||
# TODO: adopt an incremental update engine/approach
|
# TODO: adopt an incremental update engine/approach
|
||||||
# where possible here eventually!
|
# where possible here eventually!
|
||||||
log.debug(f're-syncing fsp {func_name} to source')
|
log.warning(f're-syncing fsp {func_name} to source')
|
||||||
tracker.cs.cancel()
|
tracker.cs.cancel()
|
||||||
await tracker.complete.wait()
|
await tracker.complete.wait()
|
||||||
tracker, index = await n.start(fsp_target)
|
tracker, index = await n.start(fsp_target)
|
||||||
|
@ -369,12 +369,7 @@ async def cascade(
|
||||||
# always trigger UI refresh after history update,
|
# always trigger UI refresh after history update,
|
||||||
# see ``piker.ui._fsp.FspAdmin.open_chain()`` and
|
# see ``piker.ui._fsp.FspAdmin.open_chain()`` and
|
||||||
# ``piker.ui._display.trigger_update()``.
|
# ``piker.ui._display.trigger_update()``.
|
||||||
await client_stream.send({
|
await client_stream.send('update')
|
||||||
'fsp_update': {
|
|
||||||
'key': dst_shm_token,
|
|
||||||
'first': dst._first.value,
|
|
||||||
'last': dst._last.value,
|
|
||||||
}})
|
|
||||||
return tracker, index
|
return tracker, index
|
||||||
|
|
||||||
def is_synced(
|
def is_synced(
|
||||||
|
|
|
@ -25,13 +25,10 @@ from pygments import highlight, lexers, formatters
|
||||||
|
|
||||||
# Makes it so we only see the full module name when using ``__name__``
|
# Makes it so we only see the full module name when using ``__name__``
|
||||||
# without the extra "piker." prefix.
|
# without the extra "piker." prefix.
|
||||||
_proj_name: str = 'piker'
|
_proj_name = 'piker'
|
||||||
|
|
||||||
|
|
||||||
def get_logger(
|
def get_logger(name: str = None) -> logging.Logger:
|
||||||
name: str = None,
|
|
||||||
|
|
||||||
) -> logging.Logger:
|
|
||||||
'''Return the package log or a sub-log for `name` if provided.
|
'''Return the package log or a sub-log for `name` if provided.
|
||||||
'''
|
'''
|
||||||
return tractor.log.get_logger(name=name, _root_name=_proj_name)
|
return tractor.log.get_logger(name=name, _root_name=_proj_name)
|
||||||
|
|
|
@ -19,10 +19,10 @@ Chart axes graphics and behavior.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
from typing import Optional, Callable
|
from typing import List, Tuple, Optional, Callable
|
||||||
from math import floor
|
from math import floor
|
||||||
|
|
||||||
import numpy as np
|
import pandas as pd
|
||||||
import pyqtgraph as pg
|
import pyqtgraph as pg
|
||||||
from PyQt5 import QtCore, QtGui, QtWidgets
|
from PyQt5 import QtCore, QtGui, QtWidgets
|
||||||
from PyQt5.QtCore import QPointF
|
from PyQt5.QtCore import QPointF
|
||||||
|
@ -103,7 +103,7 @@ class Axis(pg.AxisItem):
|
||||||
def size_to_values(self) -> None:
|
def size_to_values(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def txt_offsets(self) -> tuple[int, int]:
|
def txt_offsets(self) -> Tuple[int, int]:
|
||||||
return tuple(self.style['tickTextOffset'])
|
return tuple(self.style['tickTextOffset'])
|
||||||
|
|
||||||
|
|
||||||
|
@ -218,9 +218,9 @@ class DynamicDateAxis(Axis):
|
||||||
|
|
||||||
def _indexes_to_timestrs(
|
def _indexes_to_timestrs(
|
||||||
self,
|
self,
|
||||||
indexes: list[int],
|
indexes: List[int],
|
||||||
|
|
||||||
) -> list[str]:
|
) -> List[str]:
|
||||||
|
|
||||||
chart = self.linkedsplits.chart
|
chart = self.linkedsplits.chart
|
||||||
flow = chart._flows[chart.name]
|
flow = chart._flows[chart.name]
|
||||||
|
@ -242,17 +242,10 @@ class DynamicDateAxis(Axis):
|
||||||
)]
|
)]
|
||||||
|
|
||||||
# TODO: **don't** have this hard coded shift to EST
|
# TODO: **don't** have this hard coded shift to EST
|
||||||
# delay = times[-1] - times[-2]
|
dts = pd.to_datetime(epochs, unit='s') # - 4*pd.offsets.Hour()
|
||||||
dts = np.array(epochs, dtype='datetime64[s]')
|
|
||||||
|
|
||||||
# see units listing:
|
delay = times[-1] - times[-2]
|
||||||
# https://numpy.org/devdocs/reference/arrays.datetime.html#datetime-units
|
return dts.strftime(self.tick_tpl[delay])
|
||||||
return list(np.datetime_as_string(dts))
|
|
||||||
|
|
||||||
# TODO: per timeframe formatting?
|
|
||||||
# - we probably need this based on zoom now right?
|
|
||||||
# prec = self.np_dt_precision[delay]
|
|
||||||
# return dts.strftime(self.tick_tpl[delay])
|
|
||||||
|
|
||||||
def tickStrings(
|
def tickStrings(
|
||||||
self,
|
self,
|
||||||
|
@ -438,7 +431,7 @@ class XAxisLabel(AxisLabel):
|
||||||
| QtCore.Qt.AlignCenter
|
| QtCore.Qt.AlignCenter
|
||||||
)
|
)
|
||||||
|
|
||||||
def size_hint(self) -> tuple[float, float]:
|
def size_hint(self) -> Tuple[float, float]:
|
||||||
# size to parent axis height
|
# size to parent axis height
|
||||||
return self._parent.height(), None
|
return self._parent.height(), None
|
||||||
|
|
||||||
|
@ -452,11 +445,11 @@ class XAxisLabel(AxisLabel):
|
||||||
|
|
||||||
timestrs = self._parent._indexes_to_timestrs([int(value)])
|
timestrs = self._parent._indexes_to_timestrs([int(value)])
|
||||||
|
|
||||||
if not len(timestrs):
|
if not timestrs.any():
|
||||||
return
|
return
|
||||||
|
|
||||||
pad = 1*' '
|
pad = 1*' '
|
||||||
self.label_str = pad + str(timestrs[0]) + pad
|
self.label_str = pad + timestrs[0] + pad
|
||||||
|
|
||||||
_, y_offset = self._parent.txt_offsets()
|
_, y_offset = self._parent.txt_offsets()
|
||||||
|
|
||||||
|
@ -517,7 +510,7 @@ class YAxisLabel(AxisLabel):
|
||||||
if getattr(self._parent, 'txt_offsets', False):
|
if getattr(self._parent, 'txt_offsets', False):
|
||||||
self.x_offset, y_offset = self._parent.txt_offsets()
|
self.x_offset, y_offset = self._parent.txt_offsets()
|
||||||
|
|
||||||
def size_hint(self) -> tuple[float, float]:
|
def size_hint(self) -> Tuple[float, float]:
|
||||||
# size to parent axis width(-ish)
|
# size to parent axis width(-ish)
|
||||||
wsh = self._dpifont.boundingRect(' ').height() / 2
|
wsh = self._dpifont.boundingRect(' ').height() / 2
|
||||||
return (
|
return (
|
||||||
|
|
|
@ -50,10 +50,7 @@ from ._cursor import (
|
||||||
from ..data._sharedmem import ShmArray
|
from ..data._sharedmem import ShmArray
|
||||||
from ._l1 import L1Labels
|
from ._l1 import L1Labels
|
||||||
from ._ohlc import BarItems
|
from ._ohlc import BarItems
|
||||||
from ._curve import (
|
from ._curve import FastAppendCurve
|
||||||
Curve,
|
|
||||||
StepCurve,
|
|
||||||
)
|
|
||||||
from ._style import (
|
from ._style import (
|
||||||
hcolor,
|
hcolor,
|
||||||
CHART_MARGINS,
|
CHART_MARGINS,
|
||||||
|
@ -982,6 +979,11 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
graphics=graphics,
|
graphics=graphics,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# TODO: i think we can eventually remove this if
|
||||||
|
# we write the ``Flow.update_graphics()`` method right?
|
||||||
|
# draw after to allow self.scene() to work...
|
||||||
|
graphics.draw_from_data(shm.array)
|
||||||
|
|
||||||
self._add_sticky(name, bg_color='davies')
|
self._add_sticky(name, bg_color='davies')
|
||||||
|
|
||||||
return graphics, data_key
|
return graphics, data_key
|
||||||
|
@ -1054,7 +1056,6 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
color: Optional[str] = None,
|
color: Optional[str] = None,
|
||||||
add_label: bool = True,
|
add_label: bool = True,
|
||||||
pi: Optional[pg.PlotItem] = None,
|
pi: Optional[pg.PlotItem] = None,
|
||||||
step_mode: bool = False,
|
|
||||||
|
|
||||||
**pdi_kwargs,
|
**pdi_kwargs,
|
||||||
|
|
||||||
|
@ -1071,18 +1072,31 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
|
|
||||||
data_key = array_key or name
|
data_key = array_key or name
|
||||||
|
|
||||||
curve_type = {
|
# yah, we wrote our own B)
|
||||||
None: Curve,
|
data = shm.array
|
||||||
'step': StepCurve,
|
curve = FastAppendCurve(
|
||||||
# TODO:
|
y=data[data_key],
|
||||||
# 'bars': BarsItems
|
x=data['index'],
|
||||||
}['step' if step_mode else None]
|
# antialias=True,
|
||||||
|
|
||||||
curve = curve_type(
|
|
||||||
name=name,
|
name=name,
|
||||||
|
|
||||||
|
# XXX: pretty sure this is just more overhead
|
||||||
|
# on data reads and makes graphics rendering no faster
|
||||||
|
# clipToView=True,
|
||||||
|
|
||||||
**pdi_kwargs,
|
**pdi_kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# XXX: see explanation for different caching modes:
|
||||||
|
# https://stackoverflow.com/a/39410081
|
||||||
|
# seems to only be useful if we don't re-generate the entire
|
||||||
|
# QPainterPath every time
|
||||||
|
# curve.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
|
||||||
|
|
||||||
|
# don't ever use this - it's a colossal nightmare of artefacts
|
||||||
|
# and is disastrous for performance.
|
||||||
|
# curve.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache)
|
||||||
|
|
||||||
pi = pi or self.plotItem
|
pi = pi or self.plotItem
|
||||||
|
|
||||||
self._flows[data_key] = Flow(
|
self._flows[data_key] = Flow(
|
||||||
|
@ -1244,9 +1258,7 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
def maxmin(
|
def maxmin(
|
||||||
self,
|
self,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
bars_range: Optional[tuple[
|
bars_range: Optional[tuple[int, int, int, int]] = None,
|
||||||
int, int, int, int, int, int
|
|
||||||
]] = None,
|
|
||||||
|
|
||||||
) -> tuple[float, float]:
|
) -> tuple[float, float]:
|
||||||
'''
|
'''
|
||||||
|
@ -1255,11 +1267,10 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
If ``bars_range`` is provided use that range.
|
If ``bars_range`` is provided use that range.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
# print(f'Chart[{self.name}].maxmin()')
|
|
||||||
profiler = pg.debug.Profiler(
|
profiler = pg.debug.Profiler(
|
||||||
msg=f'`{str(self)}.maxmin(name={name})`: `{self.name}`',
|
msg=f'`{str(self)}.maxmin()` loop cycle for: `{self.name}`',
|
||||||
disabled=not pg_profile_enabled(),
|
disabled=not pg_profile_enabled(),
|
||||||
ms_threshold=ms_slower_then,
|
gt=ms_slower_then,
|
||||||
delayed=True,
|
delayed=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1275,14 +1286,7 @@ class ChartPlotWidget(pg.PlotWidget):
|
||||||
key = res = 0, 0
|
key = res = 0, 0
|
||||||
|
|
||||||
else:
|
else:
|
||||||
(
|
first, l, lbar, rbar, r, last = bars_range or flow.datums_range()
|
||||||
first,
|
|
||||||
l,
|
|
||||||
lbar,
|
|
||||||
rbar,
|
|
||||||
r,
|
|
||||||
last,
|
|
||||||
) = bars_range or flow.datums_range()
|
|
||||||
profiler(f'{self.name} got bars range')
|
profiler(f'{self.name} got bars range')
|
||||||
|
|
||||||
key = round(lbar), round(rbar)
|
key = round(lbar), round(rbar)
|
||||||
|
|
|
@ -138,20 +138,51 @@ def ohlc_flatten(
|
||||||
return x, flat
|
return x, flat
|
||||||
|
|
||||||
|
|
||||||
|
def ohlc_to_m4_line(
|
||||||
|
ohlc: np.ndarray,
|
||||||
|
px_width: int,
|
||||||
|
|
||||||
|
downsample: bool = False,
|
||||||
|
uppx: Optional[float] = None,
|
||||||
|
pretrace: bool = False,
|
||||||
|
|
||||||
|
) -> tuple[np.ndarray, np.ndarray]:
|
||||||
|
'''
|
||||||
|
Convert an OHLC struct-array to a m4 downsampled 1-d array.
|
||||||
|
|
||||||
|
'''
|
||||||
|
xpts, flat = ohlc_flatten(
|
||||||
|
ohlc,
|
||||||
|
use_mxmn=pretrace,
|
||||||
|
)
|
||||||
|
|
||||||
|
if downsample:
|
||||||
|
bins, x, y = ds_m4(
|
||||||
|
xpts,
|
||||||
|
flat,
|
||||||
|
px_width=px_width,
|
||||||
|
uppx=uppx,
|
||||||
|
# log_scale=bool(uppx)
|
||||||
|
)
|
||||||
|
x = np.broadcast_to(x[:, None], y.shape)
|
||||||
|
x = (x + np.array([-0.43, 0, 0, 0.43])).flatten()
|
||||||
|
y = y.flatten()
|
||||||
|
|
||||||
|
return x, y
|
||||||
|
else:
|
||||||
|
return xpts, flat
|
||||||
|
|
||||||
|
|
||||||
def ds_m4(
|
def ds_m4(
|
||||||
x: np.ndarray,
|
x: np.ndarray,
|
||||||
y: np.ndarray,
|
y: np.ndarray,
|
||||||
# units-per-pixel-x(dimension)
|
|
||||||
uppx: float,
|
|
||||||
|
|
||||||
# XXX: troll zone / easter egg..
|
|
||||||
# want to mess with ur pal, pass in the actual
|
|
||||||
# pixel width here instead of uppx-proper (i.e. pass
|
|
||||||
# in our ``pg.GraphicsObject`` derivative's ``.px_width()``
|
|
||||||
# gto mega-trip-out ur bud). Hint, it used to be implemented
|
|
||||||
# (wrongly) using "pixel width", so check the git history ;)
|
|
||||||
|
|
||||||
|
# this is the width of the data in view
|
||||||
|
# in display-device-local pixel units.
|
||||||
|
px_width: int,
|
||||||
|
uppx: Optional[float] = None,
|
||||||
xrange: Optional[float] = None,
|
xrange: Optional[float] = None,
|
||||||
|
# log_scale: bool = True,
|
||||||
|
|
||||||
) -> tuple[int, np.ndarray, np.ndarray]:
|
) -> tuple[int, np.ndarray, np.ndarray]:
|
||||||
'''
|
'''
|
||||||
|
@ -178,8 +209,29 @@ def ds_m4(
|
||||||
# "i didn't show it in the sample code, but it's accounted for
|
# "i didn't show it in the sample code, but it's accounted for
|
||||||
# in the start and end indices and number of bins"
|
# in the start and end indices and number of bins"
|
||||||
|
|
||||||
|
# optionally log-scale down the "supposed pxs on screen"
|
||||||
|
# as the units-per-px (uppx) get's large.
|
||||||
|
# if log_scale:
|
||||||
|
# assert uppx, 'You must provide a `uppx` value to use log scaling!'
|
||||||
|
# # uppx = uppx * math.log(uppx, 2)
|
||||||
|
|
||||||
|
# # scaler = 2**7 / (1 + math.log(uppx, 2))
|
||||||
|
# scaler = round(
|
||||||
|
# max(
|
||||||
|
# # NOTE: found that a 16x px width brought greater
|
||||||
|
# # detail, likely due to dpi scaling?
|
||||||
|
# # px_width=px_width * 16,
|
||||||
|
# 2**7 / (1 + math.log(uppx, 2)),
|
||||||
|
# 1
|
||||||
|
# )
|
||||||
|
# )
|
||||||
|
# px_width *= scaler
|
||||||
|
|
||||||
|
# else:
|
||||||
|
# px_width *= 16
|
||||||
|
|
||||||
# should never get called unless actually needed
|
# should never get called unless actually needed
|
||||||
assert uppx > 1
|
assert px_width > 1 and uppx > 0
|
||||||
|
|
||||||
# NOTE: if we didn't pre-slice the data to downsample
|
# NOTE: if we didn't pre-slice the data to downsample
|
||||||
# you could in theory pass these as the slicing params,
|
# you could in theory pass these as the slicing params,
|
||||||
|
@ -196,9 +248,16 @@ def ds_m4(
|
||||||
# uppx *= max(4 / (1 + math.log(uppx, 2)), 1)
|
# uppx *= max(4 / (1 + math.log(uppx, 2)), 1)
|
||||||
|
|
||||||
pxw = math.ceil(xrange / uppx)
|
pxw = math.ceil(xrange / uppx)
|
||||||
|
# px_width = math.ceil(px_width)
|
||||||
|
|
||||||
# scale up the frame "width" directly with uppx
|
# ratio of indexed x-value to width of raster in pixels.
|
||||||
w = uppx
|
# this is more or less, uppx: units-per-pixel.
|
||||||
|
# w = xrange / float(px_width)
|
||||||
|
# uppx = uppx * math.log(uppx, 2)
|
||||||
|
# w2 = px_width / uppx
|
||||||
|
|
||||||
|
# scale up the width as the uppx get's large
|
||||||
|
w = uppx # * math.log(uppx, 666)
|
||||||
|
|
||||||
# ensure we make more then enough
|
# ensure we make more then enough
|
||||||
# frames (windows) for the output pixel
|
# frames (windows) for the output pixel
|
||||||
|
@ -217,7 +276,9 @@ def ds_m4(
|
||||||
# print(
|
# print(
|
||||||
# f'uppx: {uppx}\n'
|
# f'uppx: {uppx}\n'
|
||||||
# f'xrange: {xrange}\n'
|
# f'xrange: {xrange}\n'
|
||||||
|
# f'px_width: {px_width}\n'
|
||||||
# f'pxw: {pxw}\n'
|
# f'pxw: {pxw}\n'
|
||||||
|
# f'WTF w:{w}, w2:{w2}\n'
|
||||||
# f'frames: {frames}\n'
|
# f'frames: {frames}\n'
|
||||||
# )
|
# )
|
||||||
assert frames >= (xrange / uppx)
|
assert frames >= (xrange / uppx)
|
||||||
|
|
|
@ -98,30 +98,25 @@ class LineDot(pg.CurvePoint):
|
||||||
ev: QtCore.QEvent,
|
ev: QtCore.QEvent,
|
||||||
|
|
||||||
) -> bool:
|
) -> bool:
|
||||||
|
if not isinstance(
|
||||||
if (
|
ev, QtCore.QDynamicPropertyChangeEvent
|
||||||
not isinstance(ev, QtCore.QDynamicPropertyChangeEvent)
|
) or self.curve() is None:
|
||||||
or self.curve() is None
|
|
||||||
):
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# TODO: get rid of this ``.getData()`` and
|
# TODO: get rid of this ``.getData()`` and
|
||||||
# make a more pythonic api to retreive backing
|
# make a more pythonic api to retreive backing
|
||||||
# numpy arrays...
|
# numpy arrays...
|
||||||
# (x, y) = self.curve().getData()
|
(x, y) = self.curve().getData()
|
||||||
# index = self.property('index')
|
index = self.property('index')
|
||||||
# # first = self._plot._arrays['ohlc'][0]['index']
|
# first = self._plot._arrays['ohlc'][0]['index']
|
||||||
# # first = x[0]
|
# first = x[0]
|
||||||
# # i = index - first
|
# i = index - first
|
||||||
# if index:
|
if index:
|
||||||
# i = round(index - x[0])
|
i = round(index - x[0])
|
||||||
# if i > 0 and i < len(y):
|
if i > 0 and i < len(y):
|
||||||
# newPos = (index, y[i])
|
newPos = (index, y[i])
|
||||||
# QtWidgets.QGraphicsItem.setPos(
|
QtWidgets.QGraphicsItem.setPos(self, *newPos)
|
||||||
# self,
|
return True
|
||||||
# *newPos,
|
|
||||||
# )
|
|
||||||
# return True
|
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -18,37 +18,106 @@
|
||||||
Fast, smooth, sexy curves.
|
Fast, smooth, sexy curves.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from contextlib import contextmanager as cm
|
from typing import Optional
|
||||||
from typing import Optional, Callable
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyqtgraph as pg
|
import pyqtgraph as pg
|
||||||
from PyQt5 import QtWidgets
|
from PyQt5 import QtGui, QtWidgets
|
||||||
from PyQt5.QtWidgets import QGraphicsItem
|
from PyQt5.QtWidgets import QGraphicsItem
|
||||||
from PyQt5.QtCore import (
|
from PyQt5.QtCore import (
|
||||||
Qt,
|
Qt,
|
||||||
QLineF,
|
QLineF,
|
||||||
QSizeF,
|
QSizeF,
|
||||||
QRectF,
|
QRectF,
|
||||||
# QRect,
|
|
||||||
QPointF,
|
QPointF,
|
||||||
)
|
)
|
||||||
from PyQt5.QtGui import (
|
|
||||||
QPainter,
|
|
||||||
QPainterPath,
|
|
||||||
)
|
|
||||||
from .._profile import pg_profile_enabled, ms_slower_then
|
from .._profile import pg_profile_enabled, ms_slower_then
|
||||||
from ._style import hcolor
|
from ._style import hcolor
|
||||||
# from ._compression import (
|
from ._compression import (
|
||||||
# # ohlc_to_m4_line,
|
# ohlc_to_m4_line,
|
||||||
# ds_m4,
|
ds_m4,
|
||||||
# )
|
)
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
|
|
||||||
|
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: numba this instead..
|
||||||
|
# def step_path_arrays_from_1d(
|
||||||
|
# x: np.ndarray,
|
||||||
|
# y: np.ndarray,
|
||||||
|
# include_endpoints: bool = True,
|
||||||
|
|
||||||
|
# ) -> (np.ndarray, np.ndarray):
|
||||||
|
# '''
|
||||||
|
# Generate a "step mode" curve aligned with OHLC style bars
|
||||||
|
# such that each segment spans each bar (aka "centered" style).
|
||||||
|
|
||||||
|
# '''
|
||||||
|
# # y_out = y.copy()
|
||||||
|
# # x_out = x.copy()
|
||||||
|
|
||||||
|
# # x2 = np.empty(
|
||||||
|
# # # the data + 2 endpoints on either end for
|
||||||
|
# # # "termination of the path".
|
||||||
|
# # (len(x) + 1, 2),
|
||||||
|
# # # we want to align with OHLC or other sampling style
|
||||||
|
# # # bars likely so we need fractinal values
|
||||||
|
# # dtype=float,
|
||||||
|
# # )
|
||||||
|
|
||||||
|
# x2 = np.broadcast_to(
|
||||||
|
# x[:, None],
|
||||||
|
# (
|
||||||
|
# x.size + 1,
|
||||||
|
# # 4, # only ohlc
|
||||||
|
# 2,
|
||||||
|
# ),
|
||||||
|
# ) + np.array([-0.5, 0.5])
|
||||||
|
|
||||||
|
# # x2[0] = x[0] - 0.5
|
||||||
|
# # x2[1] = x[0] + 0.5
|
||||||
|
# # x2[0, 0] = x[0] - 0.5
|
||||||
|
# # x2[0, 1] = x[0] + 0.5
|
||||||
|
# # x2[1:] = x[:, np.newaxis] + 0.5
|
||||||
|
# # import pdbpp
|
||||||
|
# # pdbpp.set_trace()
|
||||||
|
|
||||||
|
# # flatten to 1-d
|
||||||
|
# # x_out = x2.reshape(x2.size)
|
||||||
|
# # x_out = x2
|
||||||
|
|
||||||
|
# # we create a 1d with 2 extra indexes to
|
||||||
|
# # hold the start and (current) end value for the steps
|
||||||
|
# # on either end
|
||||||
|
# y2 = np.empty(
|
||||||
|
# (len(y) + 1, 2),
|
||||||
|
# dtype=y.dtype,
|
||||||
|
# )
|
||||||
|
# y2[:] = y[:, np.newaxis]
|
||||||
|
# # y2[-1] = 0
|
||||||
|
|
||||||
|
# # y_out = y2
|
||||||
|
|
||||||
|
# # y_out = np.empty(
|
||||||
|
# # 2*len(y) + 2,
|
||||||
|
# # dtype=y.dtype
|
||||||
|
# # )
|
||||||
|
|
||||||
|
# # flatten and set 0 endpoints
|
||||||
|
# # y_out[1:-1] = y2.reshape(y2.size)
|
||||||
|
# # y_out[0] = 0
|
||||||
|
# # y_out[-1] = 0
|
||||||
|
|
||||||
|
# if not include_endpoints:
|
||||||
|
# return x2[:-1], y2[:-1]
|
||||||
|
|
||||||
|
# else:
|
||||||
|
# return x2, y2
|
||||||
|
|
||||||
|
|
||||||
_line_styles: dict[str, int] = {
|
_line_styles: dict[str, int] = {
|
||||||
'solid': Qt.PenStyle.SolidLine,
|
'solid': Qt.PenStyle.SolidLine,
|
||||||
'dash': Qt.PenStyle.DashLine,
|
'dash': Qt.PenStyle.DashLine,
|
||||||
|
@ -57,43 +126,24 @@ _line_styles: dict[str, int] = {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class Curve(pg.GraphicsObject):
|
class FastAppendCurve(pg.GraphicsObject):
|
||||||
'''
|
'''
|
||||||
A faster, simpler, append friendly version of
|
A faster, append friendly version of ``pyqtgraph.PlotCurveItem``
|
||||||
``pyqtgraph.PlotCurveItem`` built for highly customizable real-time
|
built for real-time data updates.
|
||||||
updates.
|
|
||||||
|
|
||||||
This type is a much stripped down version of a ``pyqtgraph`` style
|
The main difference is avoiding regeneration of the entire
|
||||||
"graphics object" in the sense that the internal lower level
|
historical path where possible and instead only updating the "new"
|
||||||
graphics which are drawn in the ``.paint()`` method are actually
|
segment(s) via a ``numpy`` array diff calc. Further the "last"
|
||||||
rendered outside of this class entirely and instead are assigned as
|
graphic segment is drawn independently such that near-term (high
|
||||||
state (instance vars) here and then drawn during a Qt graphics
|
frequency) discrete-time-sampled style updates don't trigger a full
|
||||||
cycle.
|
path redraw.
|
||||||
|
|
||||||
The main motivation for this more modular, composed design is that
|
|
||||||
lower level graphics data can be rendered in different threads and
|
|
||||||
then read and drawn in this main thread without having to worry
|
|
||||||
about dealing with Qt's concurrency primitives. See
|
|
||||||
``piker.ui._flows.Renderer`` for details and logic related to lower
|
|
||||||
level path generation and incremental update. The main differences in
|
|
||||||
the path generation code include:
|
|
||||||
|
|
||||||
- avoiding regeneration of the entire historical path where possible
|
|
||||||
and instead only updating the "new" segment(s) via a ``numpy``
|
|
||||||
array diff calc.
|
|
||||||
- here, the "last" graphics datum-segment is drawn independently
|
|
||||||
such that near-term (high frequency) discrete-time-sampled style
|
|
||||||
updates don't trigger a full path redraw.
|
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
# sub-type customization methods
|
|
||||||
sub_br: Optional[Callable] = None
|
|
||||||
sub_paint: Optional[Callable] = None
|
|
||||||
declare_paintables: Optional[Callable] = None
|
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
|
|
||||||
|
x: np.ndarray = None,
|
||||||
|
y: np.ndarray = None,
|
||||||
*args,
|
*args,
|
||||||
|
|
||||||
step_mode: bool = False,
|
step_mode: bool = False,
|
||||||
|
@ -107,25 +157,31 @@ class Curve(pg.GraphicsObject):
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
self._name = name
|
|
||||||
|
|
||||||
# brutaaalll, see comments within..
|
# brutaaalll, see comments within..
|
||||||
self.yData = None
|
self._y = self.yData = y
|
||||||
self.xData = None
|
self._x = self.xData = x
|
||||||
|
self._vr: Optional[tuple] = None
|
||||||
|
self._avr: Optional[tuple] = None
|
||||||
|
self._br = None
|
||||||
|
|
||||||
# self._last_cap: int = 0
|
self._name = name
|
||||||
self.path: Optional[QPainterPath] = None
|
self.path: Optional[QtGui.QPainterPath] = None
|
||||||
|
|
||||||
# additional path used for appends which tries to avoid
|
|
||||||
# triggering an update/redraw of the presumably larger
|
|
||||||
# historical ``.path`` above.
|
|
||||||
self.use_fpath = use_fpath
|
self.use_fpath = use_fpath
|
||||||
self.fast_path: Optional[QPainterPath] = None
|
self.fast_path: Optional[QtGui.QPainterPath] = None
|
||||||
|
|
||||||
# TODO: we can probably just dispense with the parent since
|
# TODO: we can probably just dispense with the parent since
|
||||||
# we're basically only using the pen setting now...
|
# we're basically only using the pen setting now...
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
# self._xrange: tuple[int, int] = self.dataBounds(ax=0)
|
||||||
|
self._xrange: Optional[tuple[int, int]] = None
|
||||||
|
# self._x_iv_range = None
|
||||||
|
|
||||||
|
# self._last_draw = time.time()
|
||||||
|
self._in_ds: bool = False
|
||||||
|
self._last_uppx: float = 0
|
||||||
|
|
||||||
# all history of curve is drawn in single px thickness
|
# all history of curve is drawn in single px thickness
|
||||||
pen = pg.mkPen(hcolor(color))
|
pen = pg.mkPen(hcolor(color))
|
||||||
pen.setStyle(_line_styles[style])
|
pen.setStyle(_line_styles[style])
|
||||||
|
@ -139,44 +195,26 @@ class Curve(pg.GraphicsObject):
|
||||||
# self.last_step_pen = pg.mkPen(hcolor(color), width=2)
|
# self.last_step_pen = pg.mkPen(hcolor(color), width=2)
|
||||||
self.last_step_pen = pg.mkPen(pen, width=2)
|
self.last_step_pen = pg.mkPen(pen, width=2)
|
||||||
|
|
||||||
# self._last_line: Optional[QLineF] = None
|
self._last_line: Optional[QLineF] = None
|
||||||
self._last_line = QLineF()
|
self._last_step_rect: Optional[QRectF] = None
|
||||||
self._last_w: float = 1
|
|
||||||
|
|
||||||
# flat-top style histogram-like discrete curve
|
# flat-top style histogram-like discrete curve
|
||||||
# self._step_mode: bool = step_mode
|
self._step_mode: bool = step_mode
|
||||||
|
|
||||||
# self._fill = True
|
# self._fill = True
|
||||||
self._brush = pg.functions.mkBrush(hcolor(fill_color or color))
|
self._brush = pg.functions.mkBrush(hcolor(fill_color or color))
|
||||||
|
|
||||||
# NOTE: this setting seems to mostly prevent redraws on mouse
|
|
||||||
# interaction which is a huge boon for avg interaction latency.
|
|
||||||
|
|
||||||
# TODO: one question still remaining is if this makes trasform
|
# TODO: one question still remaining is if this makes trasform
|
||||||
# interactions slower (such as zooming) and if so maybe if/when
|
# interactions slower (such as zooming) and if so maybe if/when
|
||||||
# we implement a "history" mode for the view we disable this in
|
# we implement a "history" mode for the view we disable this in
|
||||||
# that mode?
|
# that mode?
|
||||||
|
# if step_mode:
|
||||||
# don't enable caching by default for the case where the
|
# don't enable caching by default for the case where the
|
||||||
# only thing drawn is the "last" line segment which can
|
# only thing drawn is the "last" line segment which can
|
||||||
# have a weird artifact where it won't be fully drawn to its
|
# have a weird artifact where it won't be fully drawn to its
|
||||||
# endpoint (something we saw on trade rate curves)
|
# endpoint (something we saw on trade rate curves)
|
||||||
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
|
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
|
||||||
|
|
||||||
# XXX: see explanation for different caching modes:
|
|
||||||
# https://stackoverflow.com/a/39410081
|
|
||||||
# seems to only be useful if we don't re-generate the entire
|
|
||||||
# QPainterPath every time
|
|
||||||
# curve.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
|
|
||||||
|
|
||||||
# don't ever use this - it's a colossal nightmare of artefacts
|
|
||||||
# and is disastrous for performance.
|
|
||||||
# curve.setCacheMode(QtWidgets.QGraphicsItem.ItemCoordinateCache)
|
|
||||||
|
|
||||||
# allow sub-type customization
|
|
||||||
declare = self.declare_paintables
|
|
||||||
if declare:
|
|
||||||
declare()
|
|
||||||
|
|
||||||
# TODO: probably stick this in a new parent
|
# TODO: probably stick this in a new parent
|
||||||
# type which will contain our own version of
|
# type which will contain our own version of
|
||||||
# what ``PlotCurveItem`` had in terms of base
|
# what ``PlotCurveItem`` had in terms of base
|
||||||
|
@ -199,6 +237,9 @@ class Curve(pg.GraphicsObject):
|
||||||
vr = self.viewRect()
|
vr = self.viewRect()
|
||||||
l, r = int(vr.left()), int(vr.right())
|
l, r = int(vr.left()), int(vr.right())
|
||||||
|
|
||||||
|
if not self._xrange:
|
||||||
|
return 0
|
||||||
|
|
||||||
start, stop = self._xrange
|
start, stop = self._xrange
|
||||||
lbar = max(l, start)
|
lbar = max(l, start)
|
||||||
rbar = min(r, stop)
|
rbar = min(r, stop)
|
||||||
|
@ -207,10 +248,449 @@ class Curve(pg.GraphicsObject):
|
||||||
QLineF(lbar, 0, rbar, 0)
|
QLineF(lbar, 0, rbar, 0)
|
||||||
).length()
|
).length()
|
||||||
|
|
||||||
|
def downsample(
|
||||||
|
self,
|
||||||
|
x,
|
||||||
|
y,
|
||||||
|
px_width,
|
||||||
|
uppx,
|
||||||
|
|
||||||
|
) -> tuple[np.ndarray, np.ndarray]:
|
||||||
|
|
||||||
|
# downsample whenever more then 1 pixels per datum can be shown.
|
||||||
|
# always refresh data bounds until we get diffing
|
||||||
|
# working properly, see above..
|
||||||
|
bins, x, y = ds_m4(
|
||||||
|
x,
|
||||||
|
y,
|
||||||
|
px_width=px_width,
|
||||||
|
uppx=uppx,
|
||||||
|
# log_scale=bool(uppx)
|
||||||
|
)
|
||||||
|
x = np.broadcast_to(x[:, None], y.shape)
|
||||||
|
# x = (x + np.array([-0.43, 0, 0, 0.43])).flatten()
|
||||||
|
x = (x + np.array([-0.5, 0, 0, 0.5])).flatten()
|
||||||
|
y = y.flatten()
|
||||||
|
|
||||||
|
# presumably?
|
||||||
|
self._in_ds = True
|
||||||
|
return x, y
|
||||||
|
|
||||||
|
def update_from_array(
|
||||||
|
self,
|
||||||
|
|
||||||
|
# full array input history
|
||||||
|
x: np.ndarray,
|
||||||
|
y: np.ndarray,
|
||||||
|
|
||||||
|
# pre-sliced array data that's "in view"
|
||||||
|
x_iv: np.ndarray,
|
||||||
|
y_iv: np.ndarray,
|
||||||
|
|
||||||
|
view_range: Optional[tuple[int, int]] = None,
|
||||||
|
profiler: Optional[pg.debug.Profiler] = None,
|
||||||
|
draw_last: bool = True,
|
||||||
|
slice_to_head: int = -1,
|
||||||
|
do_append: bool = True,
|
||||||
|
should_redraw: bool = False,
|
||||||
|
|
||||||
|
) -> QtGui.QPainterPath:
|
||||||
|
'''
|
||||||
|
Update curve from input 2-d data.
|
||||||
|
|
||||||
|
Compare with a cached "x-range" state and (pre/a)ppend based on
|
||||||
|
a length diff.
|
||||||
|
|
||||||
|
'''
|
||||||
|
profiler = profiler or pg.debug.Profiler(
|
||||||
|
msg=f'FastAppendCurve.update_from_array(): `{self._name}`',
|
||||||
|
disabled=not pg_profile_enabled(),
|
||||||
|
gt=ms_slower_then,
|
||||||
|
)
|
||||||
|
flip_cache = False
|
||||||
|
|
||||||
|
if self._xrange:
|
||||||
|
istart, istop = self._xrange
|
||||||
|
else:
|
||||||
|
self._xrange = istart, istop = x[0], x[-1]
|
||||||
|
|
||||||
|
# compute the length diffs between the first/last index entry in
|
||||||
|
# the input data and the last indexes we have on record from the
|
||||||
|
# last time we updated the curve index.
|
||||||
|
prepend_length = int(istart - x[0])
|
||||||
|
append_length = int(x[-1] - istop)
|
||||||
|
|
||||||
|
# this is the diff-mode, "data"-rendered index
|
||||||
|
# tracking var..
|
||||||
|
self._xrange = x[0], x[-1]
|
||||||
|
|
||||||
|
# print(f"xrange: {self._xrange}")
|
||||||
|
|
||||||
|
# XXX: lol brutal, the internals of `CurvePoint` (inherited by
|
||||||
|
# our `LineDot`) required ``.getData()`` to work..
|
||||||
|
# self.xData = x
|
||||||
|
# self.yData = y
|
||||||
|
# self._x, self._y = x, y
|
||||||
|
|
||||||
|
# downsampling incremental state checking
|
||||||
|
uppx = self.x_uppx()
|
||||||
|
px_width = self.px_width()
|
||||||
|
uppx_diff = (uppx - self._last_uppx)
|
||||||
|
|
||||||
|
new_sample_rate = False
|
||||||
|
should_ds = self._in_ds
|
||||||
|
showing_src_data = self._in_ds
|
||||||
|
# should_redraw = False
|
||||||
|
|
||||||
|
# if a view range is passed, plan to draw the
|
||||||
|
# source ouput that's "in view" of the chart.
|
||||||
|
if (
|
||||||
|
view_range
|
||||||
|
# and not self._in_ds
|
||||||
|
# and not prepend_length > 0
|
||||||
|
):
|
||||||
|
# print(f'{self._name} vr: {view_range}')
|
||||||
|
|
||||||
|
# by default we only pull data up to the last (current) index
|
||||||
|
x_out, y_out = x_iv[:slice_to_head], y_iv[:slice_to_head]
|
||||||
|
profiler(f'view range slice {view_range}')
|
||||||
|
|
||||||
|
vl, vr = view_range
|
||||||
|
|
||||||
|
# last_ivr = self._x_iv_range
|
||||||
|
# ix_iv, iy_iv = self._x_iv_range = (x_iv[0], x_iv[-1])
|
||||||
|
|
||||||
|
zoom_or_append = False
|
||||||
|
last_vr = self._vr
|
||||||
|
last_ivr = self._avr
|
||||||
|
|
||||||
|
if last_vr:
|
||||||
|
# relative slice indices
|
||||||
|
lvl, lvr = last_vr
|
||||||
|
# abs slice indices
|
||||||
|
al, ar = last_ivr
|
||||||
|
|
||||||
|
# append_length = int(x[-1] - istop)
|
||||||
|
# append_length = int(x_iv[-1] - ar)
|
||||||
|
|
||||||
|
# left_change = abs(x_iv[0] - al) >= 1
|
||||||
|
# right_change = abs(x_iv[-1] - ar) >= 1
|
||||||
|
|
||||||
|
if (
|
||||||
|
# likely a zoom view change
|
||||||
|
(vr - lvr) > 2 or vl < lvl
|
||||||
|
# append / prepend update
|
||||||
|
# we had an append update where the view range
|
||||||
|
# didn't change but the data-viewed (shifted)
|
||||||
|
# underneath, so we need to redraw.
|
||||||
|
# or left_change and right_change and last_vr == view_range
|
||||||
|
|
||||||
|
# not (left_change and right_change) and ivr
|
||||||
|
# (
|
||||||
|
# or abs(x_iv[ivr] - livr) > 1
|
||||||
|
):
|
||||||
|
zoom_or_append = True
|
||||||
|
|
||||||
|
# if last_ivr:
|
||||||
|
# liivl, liivr = last_ivr
|
||||||
|
|
||||||
|
if (
|
||||||
|
view_range != last_vr
|
||||||
|
and (
|
||||||
|
append_length > 1
|
||||||
|
or zoom_or_append
|
||||||
|
)
|
||||||
|
):
|
||||||
|
should_redraw = True
|
||||||
|
# print("REDRAWING BRUH")
|
||||||
|
|
||||||
|
self._vr = view_range
|
||||||
|
self._avr = x_iv[0], x_iv[slice_to_head]
|
||||||
|
|
||||||
|
# x_last = x_iv[-1]
|
||||||
|
# y_last = y_iv[-1]
|
||||||
|
# self._last_vr = view_range
|
||||||
|
|
||||||
|
# self.disable_cache()
|
||||||
|
# flip_cache = True
|
||||||
|
|
||||||
|
else:
|
||||||
|
# if (
|
||||||
|
# not view_range
|
||||||
|
# or self._in_ds
|
||||||
|
# ):
|
||||||
|
# by default we only pull data up to the last (current) index
|
||||||
|
x_out, y_out = x[:slice_to_head], y[:slice_to_head]
|
||||||
|
|
||||||
|
if prepend_length > 0:
|
||||||
|
should_redraw = True
|
||||||
|
|
||||||
|
# check for downsampling conditions
|
||||||
|
if (
|
||||||
|
# std m4 downsample conditions
|
||||||
|
px_width
|
||||||
|
and abs(uppx_diff) >= 1
|
||||||
|
):
|
||||||
|
log.info(
|
||||||
|
f'{self._name} sampler change: {self._last_uppx} -> {uppx}'
|
||||||
|
)
|
||||||
|
self._last_uppx = uppx
|
||||||
|
new_sample_rate = True
|
||||||
|
showing_src_data = False
|
||||||
|
should_redraw = True
|
||||||
|
should_ds = True
|
||||||
|
|
||||||
|
elif (
|
||||||
|
uppx <= 2
|
||||||
|
and self._in_ds
|
||||||
|
):
|
||||||
|
# we should de-downsample back to our original
|
||||||
|
# source data so we clear our path data in prep
|
||||||
|
# to generate a new one from original source data.
|
||||||
|
should_redraw = True
|
||||||
|
new_sample_rate = True
|
||||||
|
should_ds = False
|
||||||
|
showing_src_data = True
|
||||||
|
|
||||||
|
# no_path_yet = self.path is None
|
||||||
|
if (
|
||||||
|
self.path is None
|
||||||
|
or should_redraw
|
||||||
|
or new_sample_rate
|
||||||
|
or prepend_length > 0
|
||||||
|
):
|
||||||
|
# if (
|
||||||
|
# not view_range
|
||||||
|
# or self._in_ds
|
||||||
|
# ):
|
||||||
|
# # by default we only pull data up to the last (current) index
|
||||||
|
# x_out, y_out = x[:-1], y[:-1]
|
||||||
|
|
||||||
|
# step mode: draw flat top discrete "step"
|
||||||
|
# over the index space for each datum.
|
||||||
|
# if self._step_mode:
|
||||||
|
# self.disable_cache()
|
||||||
|
# flip_cache = True
|
||||||
|
# x_out, y_out = step_path_arrays_from_1d(
|
||||||
|
# x_out,
|
||||||
|
# y_out,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # TODO: numba this bish
|
||||||
|
# profiler('generated step arrays')
|
||||||
|
|
||||||
|
if should_redraw:
|
||||||
|
if self.path:
|
||||||
|
# print(f'CLEARING PATH {self._name}')
|
||||||
|
self.path.clear()
|
||||||
|
|
||||||
|
if self.fast_path:
|
||||||
|
self.fast_path.clear()
|
||||||
|
|
||||||
|
profiler('cleared paths due to `should_redraw` set')
|
||||||
|
|
||||||
|
if new_sample_rate and showing_src_data:
|
||||||
|
# if self._in_ds:
|
||||||
|
log.info(f'DEDOWN -> {self._name}')
|
||||||
|
|
||||||
|
self._in_ds = False
|
||||||
|
|
||||||
|
elif should_ds and uppx and px_width > 1:
|
||||||
|
x_out, y_out = self.downsample(
|
||||||
|
x_out,
|
||||||
|
y_out,
|
||||||
|
px_width,
|
||||||
|
uppx,
|
||||||
|
)
|
||||||
|
profiler(f'FULL PATH downsample redraw={should_ds}')
|
||||||
|
self._in_ds = True
|
||||||
|
|
||||||
|
self.path = pg.functions.arrayToQPath(
|
||||||
|
x_out,
|
||||||
|
y_out,
|
||||||
|
connect='all',
|
||||||
|
finiteCheck=False,
|
||||||
|
path=self.path,
|
||||||
|
)
|
||||||
|
self.prepareGeometryChange()
|
||||||
|
profiler(
|
||||||
|
f'generated fresh path. (should_redraw: {should_redraw} should_ds: {should_ds} new_sample_rate: {new_sample_rate})'
|
||||||
|
)
|
||||||
|
# profiler(f'DRAW PATH IN VIEW -> {self._name}')
|
||||||
|
|
||||||
|
# reserve mem allocs see:
|
||||||
|
# - https://doc.qt.io/qt-5/qpainterpath.html#reserve
|
||||||
|
# - https://doc.qt.io/qt-5/qpainterpath.html#capacity
|
||||||
|
# - https://doc.qt.io/qt-5/qpainterpath.html#clear
|
||||||
|
# XXX: right now this is based on had hoc checks on a
|
||||||
|
# hidpi 3840x2160 4k monitor but we should optimize for
|
||||||
|
# the target display(s) on the sys.
|
||||||
|
# if no_path_yet:
|
||||||
|
# self.path.reserve(int(500e3))
|
||||||
|
|
||||||
|
# TODO: get this piecewise prepend working - right now it's
|
||||||
|
# giving heck on vwap...
|
||||||
|
# elif prepend_length:
|
||||||
|
# breakpoint()
|
||||||
|
|
||||||
|
# prepend_path = pg.functions.arrayToQPath(
|
||||||
|
# x[0:prepend_length],
|
||||||
|
# y[0:prepend_length],
|
||||||
|
# connect='all'
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # swap prepend path in "front"
|
||||||
|
# old_path = self.path
|
||||||
|
# self.path = prepend_path
|
||||||
|
# # self.path.moveTo(new_x[0], new_y[0])
|
||||||
|
# self.path.connectPath(old_path)
|
||||||
|
|
||||||
|
elif (
|
||||||
|
append_length > 0
|
||||||
|
and do_append
|
||||||
|
and not should_redraw
|
||||||
|
# and not view_range
|
||||||
|
):
|
||||||
|
print(f'{self._name} append len: {append_length}')
|
||||||
|
new_x = x[-append_length - 2:slice_to_head]
|
||||||
|
new_y = y[-append_length - 2:slice_to_head]
|
||||||
|
profiler('sliced append path')
|
||||||
|
|
||||||
|
# if self._step_mode:
|
||||||
|
# # new_x, new_y = step_path_arrays_from_1d(
|
||||||
|
# # new_x,
|
||||||
|
# # new_y,
|
||||||
|
# # )
|
||||||
|
# # # [1:] since we don't need the vertical line normally at
|
||||||
|
# # # the beginning of the step curve taking the first (x,
|
||||||
|
# # # y) poing down to the x-axis **because** this is an
|
||||||
|
# # # appended path graphic.
|
||||||
|
# # new_x = new_x[1:]
|
||||||
|
# # new_y = new_y[1:]
|
||||||
|
|
||||||
|
# self.disable_cache()
|
||||||
|
# flip_cache = True
|
||||||
|
|
||||||
|
# profiler('generated step data')
|
||||||
|
|
||||||
|
profiler(
|
||||||
|
f'diffed array input, append_length={append_length}'
|
||||||
|
)
|
||||||
|
|
||||||
|
# if should_ds:
|
||||||
|
# new_x, new_y = self.downsample(
|
||||||
|
# new_x,
|
||||||
|
# new_y,
|
||||||
|
# px_width,
|
||||||
|
# uppx,
|
||||||
|
# )
|
||||||
|
# profiler(f'fast path downsample redraw={should_ds}')
|
||||||
|
|
||||||
|
append_path = pg.functions.arrayToQPath(
|
||||||
|
new_x,
|
||||||
|
new_y,
|
||||||
|
connect='all',
|
||||||
|
finiteCheck=False,
|
||||||
|
path=self.fast_path,
|
||||||
|
)
|
||||||
|
profiler('generated append qpath')
|
||||||
|
|
||||||
|
if self.use_fpath:
|
||||||
|
# an attempt at trying to make append-updates faster..
|
||||||
|
if self.fast_path is None:
|
||||||
|
self.fast_path = append_path
|
||||||
|
# self.fast_path.reserve(int(6e3))
|
||||||
|
else:
|
||||||
|
self.fast_path.connectPath(append_path)
|
||||||
|
size = self.fast_path.capacity()
|
||||||
|
profiler(f'connected fast path w size: {size}')
|
||||||
|
|
||||||
|
# print(f"append_path br: {append_path.boundingRect()}")
|
||||||
|
# self.path.moveTo(new_x[0], new_y[0])
|
||||||
|
# path.connectPath(append_path)
|
||||||
|
|
||||||
|
# XXX: lol this causes a hang..
|
||||||
|
# self.path = self.path.simplified()
|
||||||
|
else:
|
||||||
|
size = self.path.capacity()
|
||||||
|
profiler(f'connected history path w size: {size}')
|
||||||
|
self.path.connectPath(append_path)
|
||||||
|
|
||||||
|
# other merging ideas:
|
||||||
|
# https://stackoverflow.com/questions/8936225/how-to-merge-qpainterpaths
|
||||||
|
# path.addPath(append_path)
|
||||||
|
# path.closeSubpath()
|
||||||
|
|
||||||
|
# TODO: try out new work from `pyqtgraph` main which
|
||||||
|
# should repair horrid perf:
|
||||||
|
# https://github.com/pyqtgraph/pyqtgraph/pull/2032
|
||||||
|
# ok, nope still horrible XD
|
||||||
|
# if self._fill:
|
||||||
|
# # XXX: super slow set "union" op
|
||||||
|
# self.path = self.path.united(append_path).simplified()
|
||||||
|
|
||||||
|
# self.disable_cache()
|
||||||
|
# flip_cache = True
|
||||||
|
|
||||||
|
if draw_last:
|
||||||
|
self.draw_last(x, y)
|
||||||
|
profiler('draw last segment')
|
||||||
|
|
||||||
|
|
||||||
|
# if flip_cache:
|
||||||
|
# # # XXX: seems to be needed to avoid artifacts (see above).
|
||||||
|
# self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
|
||||||
|
|
||||||
|
# trigger redraw of path
|
||||||
|
# do update before reverting to cache mode
|
||||||
|
self.update()
|
||||||
|
profiler('.update()')
|
||||||
|
|
||||||
|
def draw_last(
|
||||||
|
self,
|
||||||
|
x: np.ndarray,
|
||||||
|
y: np.ndarray,
|
||||||
|
|
||||||
|
) -> None:
|
||||||
|
x_last = x[-1]
|
||||||
|
y_last = y[-1]
|
||||||
|
|
||||||
|
# draw the "current" step graphic segment so it lines up with
|
||||||
|
# the "middle" of the current (OHLC) sample.
|
||||||
|
if self._step_mode:
|
||||||
|
self._last_line = QLineF(
|
||||||
|
x_last - 0.5, 0,
|
||||||
|
x_last + 0.5, 0,
|
||||||
|
# x_last, 0,
|
||||||
|
# x_last, 0,
|
||||||
|
)
|
||||||
|
self._last_step_rect = QRectF(
|
||||||
|
x_last - 0.5, 0,
|
||||||
|
x_last + 0.5, y_last
|
||||||
|
# x_last, 0,
|
||||||
|
# x_last, y_last
|
||||||
|
)
|
||||||
|
# print(
|
||||||
|
# f"path br: {self.path.boundingRect()}",
|
||||||
|
# f"fast path br: {self.fast_path.boundingRect()}",
|
||||||
|
# f"last rect br: {self._last_step_rect}",
|
||||||
|
# )
|
||||||
|
else:
|
||||||
|
self._last_line = QLineF(
|
||||||
|
x[-2], y[-2],
|
||||||
|
x_last, y_last
|
||||||
|
)
|
||||||
|
|
||||||
|
self.update()
|
||||||
|
|
||||||
# XXX: lol brutal, the internals of `CurvePoint` (inherited by
|
# XXX: lol brutal, the internals of `CurvePoint` (inherited by
|
||||||
# our `LineDot`) required ``.getData()`` to work..
|
# our `LineDot`) required ``.getData()`` to work..
|
||||||
def getData(self):
|
def getData(self):
|
||||||
return self.xData, self.yData
|
return self._x, self._y
|
||||||
|
|
||||||
|
# TODO: drop the above after ``Cursor`` re-work
|
||||||
|
def get_arrays(self) -> tuple[np.ndarray, np.ndarray]:
|
||||||
|
return self._x, self._y
|
||||||
|
|
||||||
def clear(self):
|
def clear(self):
|
||||||
'''
|
'''
|
||||||
|
@ -233,18 +713,29 @@ class Curve(pg.GraphicsObject):
|
||||||
# self.fast_path.clear()
|
# self.fast_path.clear()
|
||||||
self.fast_path = None
|
self.fast_path = None
|
||||||
|
|
||||||
@cm
|
# self.disable_cache()
|
||||||
|
# self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
|
||||||
|
|
||||||
def reset_cache(self) -> None:
|
def reset_cache(self) -> None:
|
||||||
self.setCacheMode(QtWidgets.QGraphicsItem.NoCache)
|
self.disable_cache()
|
||||||
yield
|
|
||||||
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
|
self.setCacheMode(QGraphicsItem.DeviceCoordinateCache)
|
||||||
|
|
||||||
|
def disable_cache(self) -> None:
|
||||||
|
'''
|
||||||
|
Disable the use of the pixel coordinate cache and trigger a geo event.
|
||||||
|
|
||||||
|
'''
|
||||||
|
# XXX: pretty annoying but, without this there's little
|
||||||
|
# artefacts on the append updates to the curve...
|
||||||
|
self.setCacheMode(QtWidgets.QGraphicsItem.NoCache)
|
||||||
|
# self.prepareGeometryChange()
|
||||||
|
|
||||||
def boundingRect(self):
|
def boundingRect(self):
|
||||||
'''
|
'''
|
||||||
Compute and then cache our rect.
|
Compute and then cache our rect.
|
||||||
'''
|
'''
|
||||||
if self.path is None:
|
if self.path is None:
|
||||||
return QPainterPath().boundingRect()
|
return QtGui.QPainterPath().boundingRect()
|
||||||
else:
|
else:
|
||||||
# dynamically override this method after initial
|
# dynamically override this method after initial
|
||||||
# path is created to avoid requiring the above None check
|
# path is created to avoid requiring the above None check
|
||||||
|
@ -256,15 +747,14 @@ class Curve(pg.GraphicsObject):
|
||||||
Post init ``.boundingRect()```.
|
Post init ``.boundingRect()```.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
# hb = self.path.boundingRect()
|
|
||||||
hb = self.path.controlPointRect()
|
hb = self.path.controlPointRect()
|
||||||
|
# hb = self.path.boundingRect()
|
||||||
hb_size = hb.size()
|
hb_size = hb.size()
|
||||||
|
|
||||||
fp = self.fast_path
|
fp = self.fast_path
|
||||||
if fp:
|
if fp:
|
||||||
fhb = fp.controlPointRect()
|
fhb = fp.controlPointRect()
|
||||||
hb_size = fhb.size() + hb_size
|
hb_size = fhb.size() + hb_size
|
||||||
|
|
||||||
# print(f'hb_size: {hb_size}')
|
# print(f'hb_size: {hb_size}')
|
||||||
|
|
||||||
# if self._last_step_rect:
|
# if self._last_step_rect:
|
||||||
|
@ -280,26 +770,13 @@ class Curve(pg.GraphicsObject):
|
||||||
# # hb_size.height() + 1
|
# # hb_size.height() + 1
|
||||||
# )
|
# )
|
||||||
|
|
||||||
|
# if self._last_step_rect:
|
||||||
# br = self._last_step_rect.bottomRight()
|
# br = self._last_step_rect.bottomRight()
|
||||||
|
|
||||||
w = hb_size.width()
|
# else:
|
||||||
h = hb_size.height()
|
# hb_size += QSizeF(1, 1)
|
||||||
|
w = hb_size.width() + 1
|
||||||
sbr = self.sub_br
|
h = hb_size.height() + 1
|
||||||
if sbr:
|
|
||||||
w, h = self.sub_br(w, h)
|
|
||||||
else:
|
|
||||||
# assume plain line graphic and use
|
|
||||||
# default unit step in each direction.
|
|
||||||
|
|
||||||
# only on a plane line do we include
|
|
||||||
# and extra index step's worth of width
|
|
||||||
# since in the step case the end of the curve
|
|
||||||
# actually terminates earlier so we don't need
|
|
||||||
# this for the last step.
|
|
||||||
w += self._last_w
|
|
||||||
# ll = self._last_line
|
|
||||||
h += 1 # ll.y2() - ll.y1()
|
|
||||||
|
|
||||||
# br = QPointF(
|
# br = QPointF(
|
||||||
# self._vr[-1],
|
# self._vr[-1],
|
||||||
|
@ -320,37 +797,43 @@ class Curve(pg.GraphicsObject):
|
||||||
# hb_size,
|
# hb_size,
|
||||||
QSizeF(w, h)
|
QSizeF(w, h)
|
||||||
)
|
)
|
||||||
|
self._br = br
|
||||||
# print(f'bounding rect: {br}')
|
# print(f'bounding rect: {br}')
|
||||||
return br
|
return br
|
||||||
|
|
||||||
def paint(
|
def paint(
|
||||||
self,
|
self,
|
||||||
p: QPainter,
|
p: QtGui.QPainter,
|
||||||
opt: QtWidgets.QStyleOptionGraphicsItem,
|
opt: QtWidgets.QStyleOptionGraphicsItem,
|
||||||
w: QtWidgets.QWidget
|
w: QtWidgets.QWidget
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
profiler = pg.debug.Profiler(
|
profiler = pg.debug.Profiler(
|
||||||
msg=f'Curve.paint(): `{self._name}`',
|
msg=f'FastAppendCurve.paint(): `{self._name}`',
|
||||||
disabled=not pg_profile_enabled(),
|
disabled=not pg_profile_enabled(),
|
||||||
ms_threshold=ms_slower_then,
|
gt=ms_slower_then,
|
||||||
)
|
)
|
||||||
|
self.prepareGeometryChange()
|
||||||
|
|
||||||
sub_paint = self.sub_paint
|
if (
|
||||||
if sub_paint:
|
self._step_mode
|
||||||
sub_paint(p, profiler)
|
and self._last_step_rect
|
||||||
|
):
|
||||||
|
brush = self._brush
|
||||||
|
|
||||||
|
# p.drawLines(*tuple(filter(bool, self._last_step_lines)))
|
||||||
|
# p.drawRect(self._last_step_rect)
|
||||||
|
p.fillRect(self._last_step_rect, brush)
|
||||||
|
profiler('.fillRect()')
|
||||||
|
|
||||||
|
if self._last_line:
|
||||||
p.setPen(self.last_step_pen)
|
p.setPen(self.last_step_pen)
|
||||||
p.drawLine(self._last_line)
|
p.drawLine(self._last_line)
|
||||||
profiler('.drawLine()')
|
profiler('.drawLine()')
|
||||||
p.setPen(self._pen)
|
p.setPen(self._pen)
|
||||||
|
|
||||||
path = self.path
|
path = self.path
|
||||||
# cap = path.capacity()
|
|
||||||
# if cap != self._last_cap:
|
|
||||||
# print(f'NEW CAPACITY: {self._last_cap} -> {cap}')
|
|
||||||
# self._last_cap = cap
|
|
||||||
|
|
||||||
if path:
|
if path:
|
||||||
p.drawPath(path)
|
p.drawPath(path)
|
||||||
|
@ -368,117 +851,3 @@ class Curve(pg.GraphicsObject):
|
||||||
# if self._fill:
|
# if self._fill:
|
||||||
# brush = self.opts['brush']
|
# brush = self.opts['brush']
|
||||||
# p.fillPath(self.path, brush)
|
# p.fillPath(self.path, brush)
|
||||||
|
|
||||||
def draw_last_datum(
|
|
||||||
self,
|
|
||||||
path: QPainterPath,
|
|
||||||
src_data: np.ndarray,
|
|
||||||
render_data: np.ndarray,
|
|
||||||
reset: bool,
|
|
||||||
array_key: str,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
# default line draw last call
|
|
||||||
# with self.reset_cache():
|
|
||||||
x = render_data['index']
|
|
||||||
y = render_data[array_key]
|
|
||||||
|
|
||||||
# draw the "current" step graphic segment so it
|
|
||||||
# lines up with the "middle" of the current
|
|
||||||
# (OHLC) sample.
|
|
||||||
self._last_line = QLineF(
|
|
||||||
x[-2], y[-2],
|
|
||||||
x[-1], y[-1],
|
|
||||||
)
|
|
||||||
|
|
||||||
return x, y
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: this should probably be a "downsampled" curve type
|
|
||||||
# that draws a bar-style (but for the px column) last graphics
|
|
||||||
# element such that the current datum in view can be shown
|
|
||||||
# (via it's max / min) even when highly zoomed out.
|
|
||||||
class FlattenedOHLC(Curve):
|
|
||||||
|
|
||||||
def draw_last_datum(
|
|
||||||
self,
|
|
||||||
path: QPainterPath,
|
|
||||||
src_data: np.ndarray,
|
|
||||||
render_data: np.ndarray,
|
|
||||||
reset: bool,
|
|
||||||
array_key: str,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
lasts = src_data[-2:]
|
|
||||||
x = lasts['index']
|
|
||||||
y = lasts['close']
|
|
||||||
|
|
||||||
# draw the "current" step graphic segment so it
|
|
||||||
# lines up with the "middle" of the current
|
|
||||||
# (OHLC) sample.
|
|
||||||
self._last_line = QLineF(
|
|
||||||
x[-2], y[-2],
|
|
||||||
x[-1], y[-1]
|
|
||||||
)
|
|
||||||
return x, y
|
|
||||||
|
|
||||||
|
|
||||||
class StepCurve(Curve):
|
|
||||||
|
|
||||||
def declare_paintables(
|
|
||||||
self,
|
|
||||||
) -> None:
|
|
||||||
self._last_step_rect = QRectF()
|
|
||||||
|
|
||||||
def draw_last_datum(
|
|
||||||
self,
|
|
||||||
path: QPainterPath,
|
|
||||||
src_data: np.ndarray,
|
|
||||||
render_data: np.ndarray,
|
|
||||||
reset: bool,
|
|
||||||
array_key: str,
|
|
||||||
|
|
||||||
w: float = 0.5,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
|
|
||||||
# TODO: remove this and instead place all step curve
|
|
||||||
# updating into pre-path data render callbacks.
|
|
||||||
# full input data
|
|
||||||
x = src_data['index']
|
|
||||||
y = src_data[array_key]
|
|
||||||
|
|
||||||
x_last = x[-1]
|
|
||||||
y_last = y[-1]
|
|
||||||
|
|
||||||
# lol, commenting this makes step curves
|
|
||||||
# all "black" for me :eyeroll:..
|
|
||||||
self._last_line = QLineF(
|
|
||||||
x_last - w, 0,
|
|
||||||
x_last + w, 0,
|
|
||||||
)
|
|
||||||
self._last_step_rect = QRectF(
|
|
||||||
x_last - w, 0,
|
|
||||||
x_last + w, y_last,
|
|
||||||
)
|
|
||||||
return x, y
|
|
||||||
|
|
||||||
def sub_paint(
|
|
||||||
self,
|
|
||||||
p: QPainter,
|
|
||||||
profiler: pg.debug.Profiler,
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
# p.drawLines(*tuple(filter(bool, self._last_step_lines)))
|
|
||||||
# p.drawRect(self._last_step_rect)
|
|
||||||
p.fillRect(self._last_step_rect, self._brush)
|
|
||||||
profiler('.fillRect()')
|
|
||||||
|
|
||||||
def sub_br(
|
|
||||||
self,
|
|
||||||
path_w: float,
|
|
||||||
path_h: float,
|
|
||||||
|
|
||||||
) -> (float, float):
|
|
||||||
# passthrough
|
|
||||||
return path_w, path_h
|
|
||||||
|
|
|
@ -32,7 +32,7 @@ import trio
|
||||||
import pendulum
|
import pendulum
|
||||||
import pyqtgraph as pg
|
import pyqtgraph as pg
|
||||||
|
|
||||||
# from .. import brokers
|
from .. import brokers
|
||||||
from ..data.feed import open_feed
|
from ..data.feed import open_feed
|
||||||
from ._axes import YAxisLabel
|
from ._axes import YAxisLabel
|
||||||
from ._chart import (
|
from ._chart import (
|
||||||
|
@ -54,10 +54,10 @@ from ._forms import (
|
||||||
mk_order_pane_layout,
|
mk_order_pane_layout,
|
||||||
)
|
)
|
||||||
from .order_mode import open_order_mode
|
from .order_mode import open_order_mode
|
||||||
from .._profile import (
|
# from .._profile import (
|
||||||
pg_profile_enabled,
|
# pg_profile_enabled,
|
||||||
ms_slower_then,
|
# ms_slower_then,
|
||||||
)
|
# )
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
|
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
@ -263,7 +263,6 @@ async def graphics_update_loop(
|
||||||
'vars': {
|
'vars': {
|
||||||
'tick_margin': tick_margin,
|
'tick_margin': tick_margin,
|
||||||
'i_last': i_last,
|
'i_last': i_last,
|
||||||
'i_last_append': i_last,
|
|
||||||
'last_mx_vlm': last_mx_vlm,
|
'last_mx_vlm': last_mx_vlm,
|
||||||
'last_mx': last_mx,
|
'last_mx': last_mx,
|
||||||
'last_mn': last_mn,
|
'last_mn': last_mn,
|
||||||
|
@ -310,7 +309,6 @@ def graphics_update_cycle(
|
||||||
ds: DisplayState,
|
ds: DisplayState,
|
||||||
wap_in_history: bool = False,
|
wap_in_history: bool = False,
|
||||||
trigger_all: bool = False, # flag used by prepend history updates
|
trigger_all: bool = False, # flag used by prepend history updates
|
||||||
prepend_update_index: Optional[int] = None,
|
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
# TODO: eventually optimize this whole graphics stack with ``numba``
|
# TODO: eventually optimize this whole graphics stack with ``numba``
|
||||||
|
@ -320,12 +318,9 @@ def graphics_update_cycle(
|
||||||
|
|
||||||
profiler = pg.debug.Profiler(
|
profiler = pg.debug.Profiler(
|
||||||
msg=f'Graphics loop cycle for: `{chart.name}`',
|
msg=f'Graphics loop cycle for: `{chart.name}`',
|
||||||
delayed=True,
|
disabled=True, # not pg_profile_enabled(),
|
||||||
disabled=not pg_profile_enabled(),
|
gt=1/12 * 1e3,
|
||||||
# disabled=True,
|
# gt=ms_slower_then,
|
||||||
ms_threshold=ms_slower_then,
|
|
||||||
|
|
||||||
# ms_threshold=1/12 * 1e3,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# unpack multi-referenced components
|
# unpack multi-referenced components
|
||||||
|
@ -341,7 +336,7 @@ def graphics_update_cycle(
|
||||||
for sym, quote in ds.quotes.items():
|
for sym, quote in ds.quotes.items():
|
||||||
|
|
||||||
# compute the first available graphic's x-units-per-pixel
|
# compute the first available graphic's x-units-per-pixel
|
||||||
uppx = vlm_chart.view.x_uppx()
|
xpx = vlm_chart.view.x_uppx()
|
||||||
|
|
||||||
# NOTE: vlm may be written by the ``brokerd`` backend
|
# NOTE: vlm may be written by the ``brokerd`` backend
|
||||||
# event though a tick sample is not emitted.
|
# event though a tick sample is not emitted.
|
||||||
|
@ -360,58 +355,25 @@ def graphics_update_cycle(
|
||||||
i_diff = i_step - vars['i_last']
|
i_diff = i_step - vars['i_last']
|
||||||
vars['i_last'] = i_step
|
vars['i_last'] = i_step
|
||||||
|
|
||||||
append_diff = i_step - vars['i_last_append']
|
|
||||||
|
|
||||||
# update the "last datum" (aka extending the flow graphic with
|
|
||||||
# new data) only if the number of unit steps is >= the number of
|
|
||||||
# such unit steps per pixel (aka uppx). Iow, if the zoom level
|
|
||||||
# is such that a datum(s) update to graphics wouldn't span
|
|
||||||
# to a new pixel, we don't update yet.
|
|
||||||
do_append = (append_diff >= uppx)
|
|
||||||
if do_append:
|
|
||||||
vars['i_last_append'] = i_step
|
|
||||||
|
|
||||||
do_rt_update = uppx < update_uppx
|
|
||||||
# print(
|
|
||||||
# f'append_diff:{append_diff}\n'
|
|
||||||
# f'uppx:{uppx}\n'
|
|
||||||
# f'do_append: {do_append}'
|
|
||||||
# )
|
|
||||||
|
|
||||||
# TODO: we should only run mxmn when we know
|
|
||||||
# an update is due via ``do_append`` above.
|
|
||||||
(
|
(
|
||||||
brange,
|
brange,
|
||||||
mx_in_view,
|
mx_in_view,
|
||||||
mn_in_view,
|
mn_in_view,
|
||||||
mx_vlm_in_view,
|
mx_vlm_in_view,
|
||||||
) = ds.maxmin()
|
) = ds.maxmin()
|
||||||
|
|
||||||
l, lbar, rbar, r = brange
|
l, lbar, rbar, r = brange
|
||||||
mx = mx_in_view + tick_margin
|
mx = mx_in_view + tick_margin
|
||||||
mn = mn_in_view - tick_margin
|
mn = mn_in_view - tick_margin
|
||||||
|
profiler('maxmin call')
|
||||||
profiler('`ds.maxmin()` call')
|
|
||||||
|
|
||||||
liv = r >= i_step # the last datum is in view
|
liv = r >= i_step # the last datum is in view
|
||||||
|
|
||||||
if (
|
|
||||||
prepend_update_index is not None
|
|
||||||
and lbar > prepend_update_index
|
|
||||||
):
|
|
||||||
# on a history update (usually from the FSP subsys)
|
|
||||||
# if the segment of history that is being prepended
|
|
||||||
# isn't in view there is no reason to do a graphics
|
|
||||||
# update.
|
|
||||||
log.debug('Skipping prepend graphics cycle: frame not in view')
|
|
||||||
return
|
|
||||||
|
|
||||||
# don't real-time "shift" the curve to the
|
# don't real-time "shift" the curve to the
|
||||||
# left unless we get one of the following:
|
# left unless we get one of the following:
|
||||||
if (
|
if (
|
||||||
(
|
(
|
||||||
# i_diff > 0 # no new sample step
|
i_diff > 0 # no new sample step
|
||||||
do_append
|
and xpx < 4 # chart is zoomed out very far
|
||||||
# and uppx < 4 # chart is zoomed out very far
|
|
||||||
and liv
|
and liv
|
||||||
)
|
)
|
||||||
or trigger_all
|
or trigger_all
|
||||||
|
@ -422,9 +384,66 @@ def graphics_update_cycle(
|
||||||
chart.increment_view(steps=i_diff)
|
chart.increment_view(steps=i_diff)
|
||||||
|
|
||||||
if vlm_chart:
|
if vlm_chart:
|
||||||
vlm_chart.increment_view(steps=i_diff)
|
# always update y-label
|
||||||
|
ds.vlm_sticky.update_from_data(
|
||||||
|
*array[-1][['index', 'volume']]
|
||||||
|
)
|
||||||
|
|
||||||
profiler('view incremented')
|
if (
|
||||||
|
(
|
||||||
|
xpx < update_uppx
|
||||||
|
or i_diff > 0
|
||||||
|
and liv
|
||||||
|
)
|
||||||
|
or trigger_all
|
||||||
|
):
|
||||||
|
# TODO: make it so this doesn't have to be called
|
||||||
|
# once the $vlm is up?
|
||||||
|
vlm_chart.update_graphics_from_flow(
|
||||||
|
'volume',
|
||||||
|
# UGGGh, see ``maxmin()`` impl in `._fsp` for
|
||||||
|
# the overlayed plotitems... we need a better
|
||||||
|
# bay to invoke a maxmin per overlay..
|
||||||
|
render=False,
|
||||||
|
# XXX: ^^^^ THIS IS SUPER IMPORTANT! ^^^^
|
||||||
|
# without this, since we disable the
|
||||||
|
# 'volume' (units) chart after the $vlm starts
|
||||||
|
# up we need to be sure to enable this
|
||||||
|
# auto-ranging otherwise there will be no handler
|
||||||
|
# connected to update accompanying overlay
|
||||||
|
# graphics..
|
||||||
|
)
|
||||||
|
|
||||||
|
if (
|
||||||
|
mx_vlm_in_view != vars['last_mx_vlm']
|
||||||
|
):
|
||||||
|
yrange = (0, mx_vlm_in_view * 1.375)
|
||||||
|
vlm_chart.view._set_yrange(
|
||||||
|
yrange=yrange,
|
||||||
|
)
|
||||||
|
# print(f'mx vlm: {last_mx_vlm} -> {mx_vlm_in_view}')
|
||||||
|
vars['last_mx_vlm'] = mx_vlm_in_view
|
||||||
|
|
||||||
|
for curve_name, flow in vlm_chart._flows.items():
|
||||||
|
|
||||||
|
if not flow.render:
|
||||||
|
continue
|
||||||
|
|
||||||
|
update_fsp_chart(
|
||||||
|
vlm_chart,
|
||||||
|
flow,
|
||||||
|
curve_name,
|
||||||
|
array_key=curve_name,
|
||||||
|
do_append=xpx < update_uppx,
|
||||||
|
)
|
||||||
|
# is this even doing anything?
|
||||||
|
# (pretty sure it's the real-time
|
||||||
|
# resizing from last quote?)
|
||||||
|
fvb = flow.plot.vb
|
||||||
|
fvb._set_yrange(
|
||||||
|
autoscale_linked_plots=False,
|
||||||
|
name=curve_name,
|
||||||
|
)
|
||||||
|
|
||||||
ticks_frame = quote.get('ticks', ())
|
ticks_frame = quote.get('ticks', ())
|
||||||
|
|
||||||
|
@ -471,20 +490,15 @@ def graphics_update_cycle(
|
||||||
|
|
||||||
# update ohlc sampled price bars
|
# update ohlc sampled price bars
|
||||||
if (
|
if (
|
||||||
do_rt_update
|
xpx < update_uppx
|
||||||
or do_append
|
or i_diff > 0
|
||||||
or trigger_all
|
or trigger_all
|
||||||
):
|
):
|
||||||
chart.update_graphics_from_flow(
|
chart.update_graphics_from_flow(
|
||||||
chart.name,
|
chart.name,
|
||||||
# do_append=uppx < update_uppx,
|
do_append=xpx < update_uppx,
|
||||||
do_append=do_append,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# NOTE: we always update the "last" datum
|
|
||||||
# since the current range should at least be updated
|
|
||||||
# to it's max/min on the last pixel.
|
|
||||||
|
|
||||||
# iterate in FIFO order per tick-frame
|
# iterate in FIFO order per tick-frame
|
||||||
for typ, tick in lasts.items():
|
for typ, tick in lasts.items():
|
||||||
|
|
||||||
|
@ -589,10 +603,11 @@ def graphics_update_cycle(
|
||||||
vars['last_mx'], vars['last_mn'] = mx, mn
|
vars['last_mx'], vars['last_mn'] = mx, mn
|
||||||
|
|
||||||
# run synchronous update on all linked flows
|
# run synchronous update on all linked flows
|
||||||
# TODO: should the "main" (aka source) flow be special?
|
|
||||||
for curve_name, flow in chart._flows.items():
|
for curve_name, flow in chart._flows.items():
|
||||||
# update any overlayed fsp flows
|
# TODO: should the "main" (aka source) flow be special?
|
||||||
if curve_name != chart.data_key:
|
if curve_name == chart.data_key:
|
||||||
|
continue
|
||||||
|
|
||||||
update_fsp_chart(
|
update_fsp_chart(
|
||||||
chart,
|
chart,
|
||||||
flow,
|
flow,
|
||||||
|
@ -600,105 +615,6 @@ def graphics_update_cycle(
|
||||||
array_key=curve_name,
|
array_key=curve_name,
|
||||||
)
|
)
|
||||||
|
|
||||||
# even if we're downsampled bigly
|
|
||||||
# draw the last datum in the final
|
|
||||||
# px column to give the user the mx/mn
|
|
||||||
# range of that set.
|
|
||||||
if (
|
|
||||||
not do_append
|
|
||||||
# and not do_rt_update
|
|
||||||
and liv
|
|
||||||
):
|
|
||||||
flow.draw_last(
|
|
||||||
array_key=curve_name,
|
|
||||||
only_last_uppx=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# volume chart logic..
|
|
||||||
# TODO: can we unify this with the above loop?
|
|
||||||
if vlm_chart:
|
|
||||||
# always update y-label
|
|
||||||
ds.vlm_sticky.update_from_data(
|
|
||||||
*array[-1][['index', 'volume']]
|
|
||||||
)
|
|
||||||
|
|
||||||
if (
|
|
||||||
(
|
|
||||||
do_rt_update
|
|
||||||
or do_append
|
|
||||||
and liv
|
|
||||||
)
|
|
||||||
or trigger_all
|
|
||||||
):
|
|
||||||
# TODO: make it so this doesn't have to be called
|
|
||||||
# once the $vlm is up?
|
|
||||||
vlm_chart.update_graphics_from_flow(
|
|
||||||
'volume',
|
|
||||||
# UGGGh, see ``maxmin()`` impl in `._fsp` for
|
|
||||||
# the overlayed plotitems... we need a better
|
|
||||||
# bay to invoke a maxmin per overlay..
|
|
||||||
render=False,
|
|
||||||
# XXX: ^^^^ THIS IS SUPER IMPORTANT! ^^^^
|
|
||||||
# without this, since we disable the
|
|
||||||
# 'volume' (units) chart after the $vlm starts
|
|
||||||
# up we need to be sure to enable this
|
|
||||||
# auto-ranging otherwise there will be no handler
|
|
||||||
# connected to update accompanying overlay
|
|
||||||
# graphics..
|
|
||||||
)
|
|
||||||
profiler('`vlm_chart.update_graphics_from_flow()`')
|
|
||||||
|
|
||||||
if (
|
|
||||||
mx_vlm_in_view != vars['last_mx_vlm']
|
|
||||||
):
|
|
||||||
yrange = (0, mx_vlm_in_view * 1.375)
|
|
||||||
vlm_chart.view._set_yrange(
|
|
||||||
yrange=yrange,
|
|
||||||
)
|
|
||||||
profiler('`vlm_chart.view._set_yrange()`')
|
|
||||||
# print(f'mx vlm: {last_mx_vlm} -> {mx_vlm_in_view}')
|
|
||||||
vars['last_mx_vlm'] = mx_vlm_in_view
|
|
||||||
|
|
||||||
for curve_name, flow in vlm_chart._flows.items():
|
|
||||||
|
|
||||||
if (
|
|
||||||
curve_name != 'volume' and
|
|
||||||
flow.render and (
|
|
||||||
liv and
|
|
||||||
do_rt_update or do_append
|
|
||||||
)
|
|
||||||
):
|
|
||||||
update_fsp_chart(
|
|
||||||
vlm_chart,
|
|
||||||
flow,
|
|
||||||
curve_name,
|
|
||||||
array_key=curve_name,
|
|
||||||
# do_append=uppx < update_uppx,
|
|
||||||
do_append=do_append,
|
|
||||||
)
|
|
||||||
# is this even doing anything?
|
|
||||||
# (pretty sure it's the real-time
|
|
||||||
# resizing from last quote?)
|
|
||||||
fvb = flow.plot.vb
|
|
||||||
fvb._set_yrange(
|
|
||||||
name=curve_name,
|
|
||||||
)
|
|
||||||
|
|
||||||
elif (
|
|
||||||
curve_name != 'volume'
|
|
||||||
and not do_append
|
|
||||||
and liv
|
|
||||||
and uppx >= 1
|
|
||||||
# even if we're downsampled bigly
|
|
||||||
# draw the last datum in the final
|
|
||||||
# px column to give the user the mx/mn
|
|
||||||
# range of that set.
|
|
||||||
):
|
|
||||||
# always update the last datum-element
|
|
||||||
# graphic for all flows
|
|
||||||
# print(f'drawing last {flow.name}')
|
|
||||||
flow.draw_last(array_key=curve_name)
|
|
||||||
|
|
||||||
|
|
||||||
async def display_symbol_data(
|
async def display_symbol_data(
|
||||||
godwidget: GodWidget,
|
godwidget: GodWidget,
|
||||||
|
@ -723,7 +639,7 @@ async def display_symbol_data(
|
||||||
)
|
)
|
||||||
|
|
||||||
# historical data fetch
|
# historical data fetch
|
||||||
# brokermod = brokers.get_brokermod(provider)
|
brokermod = brokers.get_brokermod(provider)
|
||||||
|
|
||||||
# ohlc_status_done = sbar.open_status(
|
# ohlc_status_done = sbar.open_status(
|
||||||
# 'retreiving OHLC history.. ',
|
# 'retreiving OHLC history.. ',
|
||||||
|
|
|
@ -49,6 +49,10 @@ from . import _style
|
||||||
log = get_logger(__name__)
|
log = get_logger(__name__)
|
||||||
|
|
||||||
# pyqtgraph global config
|
# pyqtgraph global config
|
||||||
|
# might as well enable this for now?
|
||||||
|
pg.useOpenGL = True
|
||||||
|
pg.enableExperimental = True
|
||||||
|
|
||||||
# engage core tweaks that give us better response
|
# engage core tweaks that give us better response
|
||||||
# latency then the average pg user
|
# latency then the average pg user
|
||||||
_do_overrides()
|
_do_overrides()
|
||||||
|
|
|
@ -1,83 +0,0 @@
|
||||||
# piker: trading gear for hackers
|
|
||||||
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Feed status and controls widget(s) for embedding in a UI-pane.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
from textwrap import dedent
|
|
||||||
from typing import TYPE_CHECKING
|
|
||||||
|
|
||||||
# from PyQt5.QtCore import Qt
|
|
||||||
|
|
||||||
from ._style import _font, _font_small
|
|
||||||
# from ..calc import humanize
|
|
||||||
from ._label import FormatLabel
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from ._chart import ChartPlotWidget
|
|
||||||
from ..data.feed import Feed
|
|
||||||
from ._forms import FieldsForm
|
|
||||||
|
|
||||||
|
|
||||||
def mk_feed_label(
|
|
||||||
form: FieldsForm,
|
|
||||||
feed: Feed,
|
|
||||||
chart: ChartPlotWidget,
|
|
||||||
|
|
||||||
) -> FormatLabel:
|
|
||||||
'''
|
|
||||||
Generate a label from feed meta-data to be displayed
|
|
||||||
in a UI sidepane.
|
|
||||||
|
|
||||||
TODO: eventually buttons for changing settings over
|
|
||||||
a feed control protocol.
|
|
||||||
|
|
||||||
'''
|
|
||||||
status = feed.status
|
|
||||||
assert status
|
|
||||||
|
|
||||||
msg = dedent("""
|
|
||||||
actor: **{actor_name}**\n
|
|
||||||
|_ @**{host}:{port}**\n
|
|
||||||
""")
|
|
||||||
|
|
||||||
for key, val in status.items():
|
|
||||||
if key in ('host', 'port', 'actor_name'):
|
|
||||||
continue
|
|
||||||
msg += f'\n|_ {key}: **{{{key}}}**\n'
|
|
||||||
|
|
||||||
feed_label = FormatLabel(
|
|
||||||
fmt_str=msg,
|
|
||||||
# |_ streams: **{symbols}**\n
|
|
||||||
font=_font.font,
|
|
||||||
font_size=_font_small.px_size,
|
|
||||||
font_color='default_lightest',
|
|
||||||
)
|
|
||||||
|
|
||||||
# form.vbox.setAlignment(feed_label, Qt.AlignBottom)
|
|
||||||
# form.vbox.setAlignment(Qt.AlignBottom)
|
|
||||||
_ = chart.height() - (
|
|
||||||
form.height() +
|
|
||||||
form.fill_bar.height()
|
|
||||||
# feed_label.height()
|
|
||||||
)
|
|
||||||
|
|
||||||
feed_label.format(**feed.status)
|
|
||||||
|
|
||||||
return feed_label
|
|
1473
piker/ui/_flows.py
1473
piker/ui/_flows.py
File diff suppressed because it is too large
Load Diff
|
@ -750,12 +750,12 @@ def mk_order_pane_layout(
|
||||||
parent=parent,
|
parent=parent,
|
||||||
fields_schema={
|
fields_schema={
|
||||||
'account': {
|
'account': {
|
||||||
'label': '**accnt**:',
|
'label': '**account**:',
|
||||||
'type': 'select',
|
'type': 'select',
|
||||||
'default_value': ['paper'],
|
'default_value': ['paper'],
|
||||||
},
|
},
|
||||||
'size_unit': {
|
'size_unit': {
|
||||||
'label': '**alloc**:',
|
'label': '**allocate**:',
|
||||||
'type': 'select',
|
'type': 'select',
|
||||||
'default_value': [
|
'default_value': [
|
||||||
'$ size',
|
'$ size',
|
||||||
|
|
|
@ -435,16 +435,12 @@ class FspAdmin:
|
||||||
# wait for graceful shutdown signal
|
# wait for graceful shutdown signal
|
||||||
async with stream.subscribe() as stream:
|
async with stream.subscribe() as stream:
|
||||||
async for msg in stream:
|
async for msg in stream:
|
||||||
info = msg.get('fsp_update')
|
if msg == 'update':
|
||||||
if info:
|
|
||||||
# if the chart isn't hidden try to update
|
# if the chart isn't hidden try to update
|
||||||
# the data on screen.
|
# the data on screen.
|
||||||
if not self.linked.isHidden():
|
if not self.linked.isHidden():
|
||||||
log.debug(f'Re-syncing graphics for fsp: {ns_path}')
|
log.info(f'Re-syncing graphics for fsp: {ns_path}')
|
||||||
self.linked.graphics_cycle(
|
self.linked.graphics_cycle(trigger_all=True)
|
||||||
trigger_all=True,
|
|
||||||
prepend_update_index=info['first'],
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
log.info(f'recved unexpected fsp engine msg: {msg}')
|
log.info(f'recved unexpected fsp engine msg: {msg}')
|
||||||
|
|
||||||
|
@ -635,7 +631,7 @@ async def open_vlm_displays(
|
||||||
)
|
)
|
||||||
|
|
||||||
# force 0 to always be in view
|
# force 0 to always be in view
|
||||||
def multi_maxmin(
|
def maxmin(
|
||||||
names: list[str],
|
names: list[str],
|
||||||
|
|
||||||
) -> tuple[float, float]:
|
) -> tuple[float, float]:
|
||||||
|
@ -651,7 +647,7 @@ async def open_vlm_displays(
|
||||||
|
|
||||||
return 0, mx
|
return 0, mx
|
||||||
|
|
||||||
chart.view.maxmin = partial(multi_maxmin, names=['volume'])
|
chart.view.maxmin = partial(maxmin, names=['volume'])
|
||||||
|
|
||||||
# TODO: fix the x-axis label issue where if you put
|
# TODO: fix the x-axis label issue where if you put
|
||||||
# the axis on the left it's totally not lined up...
|
# the axis on the left it's totally not lined up...
|
||||||
|
@ -741,20 +737,19 @@ async def open_vlm_displays(
|
||||||
'dolla_vlm',
|
'dolla_vlm',
|
||||||
'dark_vlm',
|
'dark_vlm',
|
||||||
]
|
]
|
||||||
# dvlm_rate_fields = [
|
dvlm_rate_fields = [
|
||||||
# 'dvlm_rate',
|
'dvlm_rate',
|
||||||
# 'dark_dvlm_rate',
|
'dark_dvlm_rate',
|
||||||
# ]
|
]
|
||||||
trade_rate_fields = [
|
trade_rate_fields = [
|
||||||
'trade_rate',
|
'trade_rate',
|
||||||
'dark_trade_rate',
|
'dark_trade_rate',
|
||||||
]
|
]
|
||||||
|
|
||||||
group_mxmn = partial(
|
group_mxmn = partial(
|
||||||
multi_maxmin,
|
maxmin,
|
||||||
# keep both regular and dark vlm in view
|
# keep both regular and dark vlm in view
|
||||||
names=fields,
|
names=fields + dvlm_rate_fields,
|
||||||
# names=fields + dvlm_rate_fields,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# add custom auto range handler
|
# add custom auto range handler
|
||||||
|
@ -821,11 +816,11 @@ async def open_vlm_displays(
|
||||||
)
|
)
|
||||||
await started.wait()
|
await started.wait()
|
||||||
|
|
||||||
# chart_curves(
|
chart_curves(
|
||||||
# dvlm_rate_fields,
|
dvlm_rate_fields,
|
||||||
# dvlm_pi,
|
dvlm_pi,
|
||||||
# fr_shm,
|
fr_shm,
|
||||||
# )
|
)
|
||||||
|
|
||||||
# TODO: is there a way to "sync" the dual axes such that only
|
# TODO: is there a way to "sync" the dual axes such that only
|
||||||
# one curve is needed?
|
# one curve is needed?
|
||||||
|
@ -863,7 +858,7 @@ async def open_vlm_displays(
|
||||||
)
|
)
|
||||||
# add custom auto range handler
|
# add custom auto range handler
|
||||||
tr_pi.vb.maxmin = partial(
|
tr_pi.vb.maxmin = partial(
|
||||||
multi_maxmin,
|
maxmin,
|
||||||
# keep both regular and dark vlm in view
|
# keep both regular and dark vlm in view
|
||||||
names=trade_rate_fields,
|
names=trade_rate_fields,
|
||||||
)
|
)
|
||||||
|
|
|
@ -570,13 +570,6 @@ class ChartView(ViewBox):
|
||||||
self._resetTarget()
|
self._resetTarget()
|
||||||
self.scaleBy(s, focal)
|
self.scaleBy(s, focal)
|
||||||
|
|
||||||
# XXX: the order of the next 2 lines i'm pretty sure
|
|
||||||
# matters, we want the resize to trigger before the graphics
|
|
||||||
# update, but i gotta feelin that because this one is signal
|
|
||||||
# based (and thus not necessarily sync invoked right away)
|
|
||||||
# that calling the resize method manually might work better.
|
|
||||||
self.sigRangeChangedManually.emit(mask)
|
|
||||||
|
|
||||||
# XXX: without this is seems as though sometimes
|
# XXX: without this is seems as though sometimes
|
||||||
# when zooming in from far out (and maybe vice versa?)
|
# when zooming in from far out (and maybe vice versa?)
|
||||||
# the signal isn't being fired enough since if you pan
|
# the signal isn't being fired enough since if you pan
|
||||||
|
@ -587,6 +580,12 @@ class ChartView(ViewBox):
|
||||||
# fires don't happen?
|
# fires don't happen?
|
||||||
self.maybe_downsample_graphics()
|
self.maybe_downsample_graphics()
|
||||||
|
|
||||||
|
self.sigRangeChangedManually.emit(mask)
|
||||||
|
|
||||||
|
# self._ic.set()
|
||||||
|
# self._ic = None
|
||||||
|
# self.chart.resume_all_feeds()
|
||||||
|
|
||||||
ev.accept()
|
ev.accept()
|
||||||
|
|
||||||
def mouseDragEvent(
|
def mouseDragEvent(
|
||||||
|
@ -749,6 +748,7 @@ class ChartView(ViewBox):
|
||||||
# set from recursion errors.
|
# set from recursion errors.
|
||||||
autoscale_linked_plots: bool = False,
|
autoscale_linked_plots: bool = False,
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
|
# autoscale_overlays: bool = False,
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
'''
|
'''
|
||||||
|
@ -759,12 +759,10 @@ class ChartView(ViewBox):
|
||||||
data set.
|
data set.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
name = self.name
|
|
||||||
# print(f'YRANGE ON {name}')
|
|
||||||
profiler = pg.debug.Profiler(
|
profiler = pg.debug.Profiler(
|
||||||
msg=f'`ChartView._set_yrange()`: `{name}`',
|
msg=f'`ChartView._set_yrange()`: `{self.name}`',
|
||||||
disabled=not pg_profile_enabled(),
|
disabled=not pg_profile_enabled(),
|
||||||
ms_threshold=ms_slower_then,
|
gt=ms_slower_then,
|
||||||
delayed=True,
|
delayed=True,
|
||||||
)
|
)
|
||||||
set_range = True
|
set_range = True
|
||||||
|
@ -790,22 +788,52 @@ class ChartView(ViewBox):
|
||||||
elif yrange is not None:
|
elif yrange is not None:
|
||||||
ylow, yhigh = yrange
|
ylow, yhigh = yrange
|
||||||
|
|
||||||
|
# calculate max, min y values in viewable x-range from data.
|
||||||
|
# Make sure min bars/datums on screen is adhered.
|
||||||
|
# else:
|
||||||
|
# TODO: eventually we should point to the
|
||||||
|
# ``FlowsTable`` (or wtv) which should perform
|
||||||
|
# the group operations?
|
||||||
|
|
||||||
|
# flow = chart._flows[name or chart.name]
|
||||||
|
# br = bars_range or chart.bars_range()
|
||||||
|
# br = bars_range or chart.bars_range()
|
||||||
|
# profiler(f'got bars range: {br}')
|
||||||
|
|
||||||
|
# TODO: maybe should be a method on the
|
||||||
|
# chart widget/item?
|
||||||
|
# if False:
|
||||||
|
# if autoscale_linked_plots:
|
||||||
|
# # avoid recursion by sibling plots
|
||||||
|
# linked = self.linkedsplits
|
||||||
|
# plots = list(linked.subplots.copy().values())
|
||||||
|
# main = linked.chart
|
||||||
|
# if main:
|
||||||
|
# plots.append(main)
|
||||||
|
|
||||||
|
# for chart in plots:
|
||||||
|
# if chart and not chart._static_yrange:
|
||||||
|
# chart.cv._set_yrange(
|
||||||
|
# # bars_range=br,
|
||||||
|
# autoscale_linked_plots=False,
|
||||||
|
# )
|
||||||
|
# profiler('autoscaled linked plots')
|
||||||
|
|
||||||
if set_range:
|
if set_range:
|
||||||
|
|
||||||
|
if not yrange:
|
||||||
# XXX: only compute the mxmn range
|
# XXX: only compute the mxmn range
|
||||||
# if none is provided as input!
|
# if none is provided as input!
|
||||||
if not yrange:
|
|
||||||
# flow = chart._flows[name]
|
|
||||||
yrange = self._maxmin()
|
yrange = self._maxmin()
|
||||||
|
|
||||||
if yrange is None:
|
if yrange is None:
|
||||||
log.warning(f'No yrange provided for {name}!?')
|
log.warning(f'No yrange provided for {self.name}!?')
|
||||||
print(f"WTF NO YRANGE {name}")
|
print(f"WTF NO YRANGE {self.name}")
|
||||||
return
|
return
|
||||||
|
|
||||||
ylow, yhigh = yrange
|
ylow, yhigh = yrange
|
||||||
|
|
||||||
profiler(f'callback ._maxmin(): {yrange}')
|
profiler(f'maxmin(): {yrange}')
|
||||||
|
|
||||||
# view margins: stay within a % of the "true range"
|
# view margins: stay within a % of the "true range"
|
||||||
diff = yhigh - ylow
|
diff = yhigh - ylow
|
||||||
|
@ -893,10 +921,7 @@ class ChartView(ViewBox):
|
||||||
else:
|
else:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def maybe_downsample_graphics(
|
def maybe_downsample_graphics(self):
|
||||||
self,
|
|
||||||
autoscale_overlays: bool = True,
|
|
||||||
):
|
|
||||||
|
|
||||||
profiler = pg.debug.Profiler(
|
profiler = pg.debug.Profiler(
|
||||||
msg=f'ChartView.maybe_downsample_graphics() for {self.name}',
|
msg=f'ChartView.maybe_downsample_graphics() for {self.name}',
|
||||||
|
@ -906,8 +931,9 @@ class ChartView(ViewBox):
|
||||||
# ``.update_graphics_from_flow()`` nested profiling likely
|
# ``.update_graphics_from_flow()`` nested profiling likely
|
||||||
# due to the way delaying works and garbage collection of
|
# due to the way delaying works and garbage collection of
|
||||||
# the profiler in the delegated method calls.
|
# the profiler in the delegated method calls.
|
||||||
ms_threshold=6,
|
delayed=False,
|
||||||
# ms_threshold=ms_slower_then,
|
# gt=3,
|
||||||
|
gt=ms_slower_then,
|
||||||
)
|
)
|
||||||
|
|
||||||
# TODO: a faster single-loop-iterator way of doing this XD
|
# TODO: a faster single-loop-iterator way of doing this XD
|
||||||
|
@ -930,18 +956,11 @@ class ChartView(ViewBox):
|
||||||
chart.update_graphics_from_flow(
|
chart.update_graphics_from_flow(
|
||||||
name,
|
name,
|
||||||
use_vr=True,
|
use_vr=True,
|
||||||
|
|
||||||
|
# gets passed down into graphics obj
|
||||||
|
profiler=profiler,
|
||||||
)
|
)
|
||||||
|
|
||||||
# for each overlay on this chart auto-scale the
|
profiler(f'range change updated {chart_name}:{name}')
|
||||||
# y-range to max-min values.
|
|
||||||
if autoscale_overlays:
|
|
||||||
overlay = chart.pi_overlay
|
|
||||||
if overlay:
|
|
||||||
for pi in overlay.overlays:
|
|
||||||
pi.vb._set_yrange(
|
|
||||||
# TODO: get the range once up front...
|
|
||||||
# bars_range=br,
|
|
||||||
)
|
|
||||||
profiler('autoscaled linked plots')
|
|
||||||
|
|
||||||
profiler(f'<{chart_name}>.update_graphics_from_flow({name})')
|
profiler.finish()
|
||||||
|
|
|
@ -25,13 +25,17 @@ from typing import (
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pyqtgraph as pg
|
import pyqtgraph as pg
|
||||||
|
from numba import njit, float64, int64 # , optional
|
||||||
from PyQt5 import QtCore, QtGui, QtWidgets
|
from PyQt5 import QtCore, QtGui, QtWidgets
|
||||||
from PyQt5.QtCore import QLineF, QPointF
|
from PyQt5.QtCore import QLineF, QPointF
|
||||||
from PyQt5.QtGui import QPainterPath
|
# from numba import types as ntypes
|
||||||
|
# from ..data._source import numba_ohlc_dtype
|
||||||
|
|
||||||
from .._profile import pg_profile_enabled, ms_slower_then
|
from .._profile import pg_profile_enabled, ms_slower_then
|
||||||
from ._style import hcolor
|
from ._style import hcolor
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
|
from ._curve import FastAppendCurve
|
||||||
|
from ._compression import ohlc_flatten
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from ._chart import LinkedSplits
|
from ._chart import LinkedSplits
|
||||||
|
@ -42,7 +46,6 @@ log = get_logger(__name__)
|
||||||
|
|
||||||
def bar_from_ohlc_row(
|
def bar_from_ohlc_row(
|
||||||
row: np.ndarray,
|
row: np.ndarray,
|
||||||
# 0.5 is no overlap between arms, 1.0 is full overlap
|
|
||||||
w: float = 0.43
|
w: float = 0.43
|
||||||
|
|
||||||
) -> tuple[QLineF]:
|
) -> tuple[QLineF]:
|
||||||
|
@ -81,11 +84,129 @@ def bar_from_ohlc_row(
|
||||||
return [hl, o, c]
|
return [hl, o, c]
|
||||||
|
|
||||||
|
|
||||||
|
@njit(
|
||||||
|
# TODO: for now need to construct this manually for readonly arrays, see
|
||||||
|
# https://github.com/numba/numba/issues/4511
|
||||||
|
# ntypes.tuple((float64[:], float64[:], float64[:]))(
|
||||||
|
# numba_ohlc_dtype[::1], # contiguous
|
||||||
|
# int64,
|
||||||
|
# optional(float64),
|
||||||
|
# ),
|
||||||
|
nogil=True
|
||||||
|
)
|
||||||
|
def path_arrays_from_ohlc(
|
||||||
|
data: np.ndarray,
|
||||||
|
start: int64,
|
||||||
|
bar_gap: float64 = 0.43,
|
||||||
|
|
||||||
|
) -> np.ndarray:
|
||||||
|
'''
|
||||||
|
Generate an array of lines objects from input ohlc data.
|
||||||
|
|
||||||
|
'''
|
||||||
|
size = int(data.shape[0] * 6)
|
||||||
|
|
||||||
|
x = np.zeros(
|
||||||
|
# data,
|
||||||
|
shape=size,
|
||||||
|
dtype=float64,
|
||||||
|
)
|
||||||
|
y, c = x.copy(), x.copy()
|
||||||
|
|
||||||
|
# TODO: report bug for assert @
|
||||||
|
# /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
|
||||||
|
for i, q in enumerate(data[start:], start):
|
||||||
|
|
||||||
|
# TODO: ask numba why this doesn't work..
|
||||||
|
# open, high, low, close, index = q[
|
||||||
|
# ['open', 'high', 'low', 'close', 'index']]
|
||||||
|
|
||||||
|
open = q['open']
|
||||||
|
high = q['high']
|
||||||
|
low = q['low']
|
||||||
|
close = q['close']
|
||||||
|
index = float64(q['index'])
|
||||||
|
|
||||||
|
istart = i * 6
|
||||||
|
istop = istart + 6
|
||||||
|
|
||||||
|
# x,y detail the 6 points which connect all vertexes of a ohlc bar
|
||||||
|
x[istart:istop] = (
|
||||||
|
index - bar_gap,
|
||||||
|
index,
|
||||||
|
index,
|
||||||
|
index,
|
||||||
|
index,
|
||||||
|
index + bar_gap,
|
||||||
|
)
|
||||||
|
y[istart:istop] = (
|
||||||
|
open,
|
||||||
|
open,
|
||||||
|
low,
|
||||||
|
high,
|
||||||
|
close,
|
||||||
|
close,
|
||||||
|
)
|
||||||
|
|
||||||
|
# specifies that the first edge is never connected to the
|
||||||
|
# prior bars last edge thus providing a small "gap"/"space"
|
||||||
|
# between bars determined by ``bar_gap``.
|
||||||
|
c[istart:istop] = (1, 1, 1, 1, 1, 0)
|
||||||
|
|
||||||
|
return x, y, c
|
||||||
|
|
||||||
|
|
||||||
|
def gen_qpath(
|
||||||
|
data: np.ndarray,
|
||||||
|
start: int = 0, # XXX: do we need this?
|
||||||
|
# 0.5 is no overlap between arms, 1.0 is full overlap
|
||||||
|
w: float = 0.43,
|
||||||
|
path: Optional[QtGui.QPainterPath] = None,
|
||||||
|
|
||||||
|
) -> QtGui.QPainterPath:
|
||||||
|
|
||||||
|
path_was_none = path is None
|
||||||
|
|
||||||
|
profiler = pg.debug.Profiler(
|
||||||
|
msg='gen_qpath ohlc',
|
||||||
|
disabled=not pg_profile_enabled(),
|
||||||
|
gt=ms_slower_then,
|
||||||
|
)
|
||||||
|
|
||||||
|
x, y, c = path_arrays_from_ohlc(
|
||||||
|
data,
|
||||||
|
start,
|
||||||
|
bar_gap=w,
|
||||||
|
)
|
||||||
|
profiler("generate stream with numba")
|
||||||
|
|
||||||
|
# TODO: numba the internals of this!
|
||||||
|
path = pg.functions.arrayToQPath(
|
||||||
|
x,
|
||||||
|
y,
|
||||||
|
connect=c,
|
||||||
|
path=path,
|
||||||
|
)
|
||||||
|
|
||||||
|
# avoid mem allocs if possible
|
||||||
|
if path_was_none:
|
||||||
|
path.reserve(path.capacity())
|
||||||
|
|
||||||
|
profiler("generate path with arrayToQPath")
|
||||||
|
|
||||||
|
return path
|
||||||
|
|
||||||
|
|
||||||
class BarItems(pg.GraphicsObject):
|
class BarItems(pg.GraphicsObject):
|
||||||
'''
|
'''
|
||||||
"Price range" bars graphics rendered from a OHLC sampled sequence.
|
"Price range" bars graphics rendered from a OHLC sampled sequence.
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
sigPlotChanged = QtCore.pyqtSignal(object)
|
||||||
|
|
||||||
|
# 0.5 is no overlap between arms, 1.0 is full overlap
|
||||||
|
w: float = 0.43
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
linked: LinkedSplits,
|
linked: LinkedSplits,
|
||||||
|
@ -105,14 +226,426 @@ class BarItems(pg.GraphicsObject):
|
||||||
self.last_bar_pen = pg.mkPen(hcolor(last_bar_color), width=2)
|
self.last_bar_pen = pg.mkPen(hcolor(last_bar_color), width=2)
|
||||||
self._name = name
|
self._name = name
|
||||||
|
|
||||||
|
self._ds_line_xy: Optional[
|
||||||
|
tuple[np.ndarray, np.ndarray]
|
||||||
|
] = None
|
||||||
|
|
||||||
|
# NOTE: this prevents redraws on mouse interaction which is
|
||||||
|
# a huge boon for avg interaction latency.
|
||||||
|
|
||||||
|
# TODO: one question still remaining is if this makes trasform
|
||||||
|
# interactions slower (such as zooming) and if so maybe if/when
|
||||||
|
# we implement a "history" mode for the view we disable this in
|
||||||
|
# that mode?
|
||||||
self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
|
self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
|
||||||
self.path = QPainterPath()
|
|
||||||
|
self._pi = plotitem
|
||||||
|
self.path = QtGui.QPainterPath()
|
||||||
|
self.fast_path = QtGui.QPainterPath()
|
||||||
|
|
||||||
|
self._xrange: tuple[int, int]
|
||||||
|
# self._yrange: tuple[float, float]
|
||||||
|
self._vrange = None
|
||||||
|
|
||||||
|
# TODO: don't render the full backing array each time
|
||||||
|
# self._path_data = None
|
||||||
self._last_bar_lines: Optional[tuple[QLineF, ...]] = None
|
self._last_bar_lines: Optional[tuple[QLineF, ...]] = None
|
||||||
|
|
||||||
|
# track the current length of drawable lines within the larger array
|
||||||
|
self.start_index: int = 0
|
||||||
|
self.stop_index: int = 0
|
||||||
|
|
||||||
|
# downsampler-line state
|
||||||
|
self._in_ds: bool = False
|
||||||
|
self._ds_line: Optional[FastAppendCurve] = None
|
||||||
|
self._dsi: tuple[int, int] = 0, 0
|
||||||
|
self._xs_in_px: float = 0
|
||||||
|
|
||||||
|
def draw_from_data(
|
||||||
|
self,
|
||||||
|
ohlc: np.ndarray,
|
||||||
|
start: int = 0,
|
||||||
|
|
||||||
|
) -> QtGui.QPainterPath:
|
||||||
|
'''
|
||||||
|
Draw OHLC datum graphics from a ``np.ndarray``.
|
||||||
|
|
||||||
|
This routine is usually only called to draw the initial history.
|
||||||
|
|
||||||
|
'''
|
||||||
|
hist, last = ohlc[:-1], ohlc[-1]
|
||||||
|
self.path = gen_qpath(hist, start, self.w)
|
||||||
|
|
||||||
|
# save graphics for later reference and keep track
|
||||||
|
# of current internal "last index"
|
||||||
|
# self.start_index = len(ohlc)
|
||||||
|
index = ohlc['index']
|
||||||
|
self._xrange = (index[0], index[-1])
|
||||||
|
# self._yrange = (
|
||||||
|
# np.nanmax(ohlc['high']),
|
||||||
|
# np.nanmin(ohlc['low']),
|
||||||
|
# )
|
||||||
|
|
||||||
|
# up to last to avoid double draw of last bar
|
||||||
|
self._last_bar_lines = bar_from_ohlc_row(last, self.w)
|
||||||
|
|
||||||
|
x, y = self._ds_line_xy = ohlc_flatten(ohlc)
|
||||||
|
|
||||||
|
# TODO: figuring out the most optimial size for the ideal
|
||||||
|
# curve-path by,
|
||||||
|
# - calcing the display's max px width `.screen()`
|
||||||
|
# - drawing a curve and figuring out it's capacity:
|
||||||
|
# https://doc.qt.io/qt-5/qpainterpath.html#capacity
|
||||||
|
# - reserving that cap for each curve-mapped-to-shm with
|
||||||
|
|
||||||
|
# - leveraging clearing when needed to redraw the entire
|
||||||
|
# curve that does not release mem allocs:
|
||||||
|
# https://doc.qt.io/qt-5/qpainterpath.html#clear
|
||||||
|
curve = FastAppendCurve(
|
||||||
|
y=y,
|
||||||
|
x=x,
|
||||||
|
name='OHLC',
|
||||||
|
color=self._color,
|
||||||
|
)
|
||||||
|
curve.hide()
|
||||||
|
self._pi.addItem(curve)
|
||||||
|
self._ds_line = curve
|
||||||
|
|
||||||
|
# self._ds_xrange = (index[0], index[-1])
|
||||||
|
|
||||||
|
# trigger render
|
||||||
|
# https://doc.qt.io/qt-5/qgraphicsitem.html#update
|
||||||
|
self.update()
|
||||||
|
|
||||||
|
return self.path
|
||||||
|
|
||||||
def x_uppx(self) -> int:
|
def x_uppx(self) -> int:
|
||||||
# we expect the downsample curve report this.
|
if self._ds_line:
|
||||||
|
return self._ds_line.x_uppx()
|
||||||
|
else:
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
# def update_from_array(
|
||||||
|
# self,
|
||||||
|
|
||||||
|
# # full array input history
|
||||||
|
# ohlc: np.ndarray,
|
||||||
|
|
||||||
|
# # pre-sliced array data that's "in view"
|
||||||
|
# ohlc_iv: np.ndarray,
|
||||||
|
|
||||||
|
# view_range: Optional[tuple[int, int]] = None,
|
||||||
|
# profiler: Optional[pg.debug.Profiler] = None,
|
||||||
|
|
||||||
|
# ) -> None:
|
||||||
|
# '''
|
||||||
|
# Update the last datum's bar graphic from input data array.
|
||||||
|
|
||||||
|
# This routine should be interface compatible with
|
||||||
|
# ``pg.PlotCurveItem.setData()``. Normally this method in
|
||||||
|
# ``pyqtgraph`` seems to update all the data passed to the
|
||||||
|
# graphics object, and then update/rerender, but here we're
|
||||||
|
# assuming the prior graphics havent changed (OHLC history rarely
|
||||||
|
# does) so this "should" be simpler and faster.
|
||||||
|
|
||||||
|
# This routine should be made (transitively) as fast as possible.
|
||||||
|
|
||||||
|
# '''
|
||||||
|
# profiler = profiler or pg.debug.Profiler(
|
||||||
|
# disabled=not pg_profile_enabled(),
|
||||||
|
# gt=ms_slower_then,
|
||||||
|
# delayed=True,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # index = self.start_index
|
||||||
|
# istart, istop = self._xrange
|
||||||
|
# # ds_istart, ds_istop = self._ds_xrange
|
||||||
|
|
||||||
|
# index = ohlc['index']
|
||||||
|
# first_index, last_index = index[0], index[-1]
|
||||||
|
|
||||||
|
# # length = len(ohlc)
|
||||||
|
# # prepend_length = istart - first_index
|
||||||
|
# # append_length = last_index - istop
|
||||||
|
|
||||||
|
# # ds_prepend_length = ds_istart - first_index
|
||||||
|
# # ds_append_length = last_index - ds_istop
|
||||||
|
|
||||||
|
# flip_cache = False
|
||||||
|
|
||||||
|
# x_gt = 16
|
||||||
|
# if self._ds_line:
|
||||||
|
# uppx = self._ds_line.x_uppx()
|
||||||
|
# else:
|
||||||
|
# uppx = 0
|
||||||
|
|
||||||
|
# should_line = self._in_ds
|
||||||
|
# if (
|
||||||
|
# self._in_ds
|
||||||
|
# and uppx < x_gt
|
||||||
|
# ):
|
||||||
|
# should_line = False
|
||||||
|
|
||||||
|
# elif (
|
||||||
|
# not self._in_ds
|
||||||
|
# and uppx >= x_gt
|
||||||
|
# ):
|
||||||
|
# should_line = True
|
||||||
|
|
||||||
|
# profiler('ds logic complete')
|
||||||
|
|
||||||
|
# if should_line:
|
||||||
|
# # update the line graphic
|
||||||
|
# # x, y = self._ds_line_xy = ohlc_flatten(ohlc_iv)
|
||||||
|
# x, y = self._ds_line_xy = ohlc_flatten(ohlc)
|
||||||
|
# x_iv, y_iv = self._ds_line_xy = ohlc_flatten(ohlc_iv)
|
||||||
|
# profiler('flattening bars to line')
|
||||||
|
|
||||||
|
# # TODO: we should be diffing the amount of new data which
|
||||||
|
# # needs to be downsampled. Ideally we actually are just
|
||||||
|
# # doing all the ds-ing in sibling actors so that the data
|
||||||
|
# # can just be read and rendered to graphics on events of our
|
||||||
|
# # choice.
|
||||||
|
# # diff = do_diff(ohlc, new_bit)
|
||||||
|
# curve = self._ds_line
|
||||||
|
# curve.update_from_array(
|
||||||
|
# x=x,
|
||||||
|
# y=y,
|
||||||
|
# x_iv=x_iv,
|
||||||
|
# y_iv=y_iv,
|
||||||
|
# view_range=None, # hack
|
||||||
|
# profiler=profiler,
|
||||||
|
# )
|
||||||
|
# profiler('updated ds line')
|
||||||
|
|
||||||
|
# if not self._in_ds:
|
||||||
|
# # hide bars and show line
|
||||||
|
# self.hide()
|
||||||
|
# # XXX: is this actually any faster?
|
||||||
|
# # self._pi.removeItem(self)
|
||||||
|
|
||||||
|
# # TODO: a `.ui()` log level?
|
||||||
|
# log.info(
|
||||||
|
# f'downsampling to line graphic {self._name}'
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # self._pi.addItem(curve)
|
||||||
|
# curve.show()
|
||||||
|
# curve.update()
|
||||||
|
# self._in_ds = True
|
||||||
|
|
||||||
|
# # stop here since we don't need to update bars path any more
|
||||||
|
# # as we delegate to the downsample line with updates.
|
||||||
|
|
||||||
|
# else:
|
||||||
|
# # we should be in bars mode
|
||||||
|
|
||||||
|
# if self._in_ds:
|
||||||
|
# # flip back to bars graphics and hide the downsample line.
|
||||||
|
# log.info(f'showing bars graphic {self._name}')
|
||||||
|
|
||||||
|
# curve = self._ds_line
|
||||||
|
# curve.hide()
|
||||||
|
# # self._pi.removeItem(curve)
|
||||||
|
|
||||||
|
# # XXX: is this actually any faster?
|
||||||
|
# # self._pi.addItem(self)
|
||||||
|
# self.show()
|
||||||
|
# self._in_ds = False
|
||||||
|
|
||||||
|
# # generate in_view path
|
||||||
|
# self.path = gen_qpath(
|
||||||
|
# ohlc_iv,
|
||||||
|
# 0,
|
||||||
|
# self.w,
|
||||||
|
# # path=self.path,
|
||||||
|
# )
|
||||||
|
|
||||||
|
# # TODO: to make the downsampling faster
|
||||||
|
# # - allow mapping only a range of lines thus only drawing as
|
||||||
|
# # many bars as exactly specified.
|
||||||
|
# # - move ohlc "flattening" to a shmarr
|
||||||
|
# # - maybe move all this embedded logic to a higher
|
||||||
|
# # level type?
|
||||||
|
|
||||||
|
# # if prepend_length:
|
||||||
|
# # # new history was added and we need to render a new path
|
||||||
|
# # prepend_bars = ohlc[:prepend_length]
|
||||||
|
|
||||||
|
# # if ds_prepend_length:
|
||||||
|
# # ds_prepend_bars = ohlc[:ds_prepend_length]
|
||||||
|
# # pre_x, pre_y = ohlc_flatten(ds_prepend_bars)
|
||||||
|
# # fx = np.concatenate((pre_x, fx))
|
||||||
|
# # fy = np.concatenate((pre_y, fy))
|
||||||
|
# # profiler('ds line prepend diff complete')
|
||||||
|
|
||||||
|
# # if append_length:
|
||||||
|
# # # generate new graphics to match provided array
|
||||||
|
# # # path appending logic:
|
||||||
|
# # # we need to get the previous "current bar(s)" for the time step
|
||||||
|
# # # and convert it to a sub-path to append to the historical set
|
||||||
|
# # # new_bars = ohlc[istop - 1:istop + append_length - 1]
|
||||||
|
# # append_bars = ohlc[-append_length - 1:-1]
|
||||||
|
# # # print(f'ohlc bars to append size: {append_bars.size}\n')
|
||||||
|
|
||||||
|
# # if ds_append_length:
|
||||||
|
# # ds_append_bars = ohlc[-ds_append_length - 1:-1]
|
||||||
|
# # post_x, post_y = ohlc_flatten(ds_append_bars)
|
||||||
|
# # print(
|
||||||
|
# # f'ds curve to append sizes: {(post_x.size, post_y.size)}'
|
||||||
|
# # )
|
||||||
|
# # fx = np.concatenate((fx, post_x))
|
||||||
|
# # fy = np.concatenate((fy, post_y))
|
||||||
|
|
||||||
|
# # profiler('ds line append diff complete')
|
||||||
|
|
||||||
|
# profiler('array diffs complete')
|
||||||
|
|
||||||
|
# # does this work?
|
||||||
|
# last = ohlc[-1]
|
||||||
|
# # fy[-1] = last['close']
|
||||||
|
|
||||||
|
# # # incremental update and cache line datums
|
||||||
|
# # self._ds_line_xy = fx, fy
|
||||||
|
|
||||||
|
# # maybe downsample to line
|
||||||
|
# # ds = self.maybe_downsample()
|
||||||
|
# # if ds:
|
||||||
|
# # # if we downsample to a line don't bother with
|
||||||
|
# # # any more path generation / updates
|
||||||
|
# # self._ds_xrange = first_index, last_index
|
||||||
|
# # profiler('downsampled to line')
|
||||||
|
# # return
|
||||||
|
|
||||||
|
# # print(in_view.size)
|
||||||
|
|
||||||
|
# # if self.path:
|
||||||
|
# # self.path = path
|
||||||
|
# # self.path.reserve(path.capacity())
|
||||||
|
# # self.path.swap(path)
|
||||||
|
|
||||||
|
# # path updates
|
||||||
|
# # if prepend_length:
|
||||||
|
# # # XXX: SOMETHING IS MAYBE FISHY HERE what with the old_path
|
||||||
|
# # # y value not matching the first value from
|
||||||
|
# # # ohlc[prepend_length + 1] ???
|
||||||
|
# # prepend_path = gen_qpath(prepend_bars, 0, self.w)
|
||||||
|
# # old_path = self.path
|
||||||
|
# # self.path = prepend_path
|
||||||
|
# # self.path.addPath(old_path)
|
||||||
|
# # profiler('path PREPEND')
|
||||||
|
|
||||||
|
# # if append_length:
|
||||||
|
# # append_path = gen_qpath(append_bars, 0, self.w)
|
||||||
|
|
||||||
|
# # self.path.moveTo(
|
||||||
|
# # float(istop - self.w),
|
||||||
|
# # float(append_bars[0]['open'])
|
||||||
|
# # )
|
||||||
|
# # self.path.addPath(append_path)
|
||||||
|
|
||||||
|
# # profiler('path APPEND')
|
||||||
|
# # fp = self.fast_path
|
||||||
|
# # if fp is None:
|
||||||
|
# # self.fast_path = append_path
|
||||||
|
|
||||||
|
# # else:
|
||||||
|
# # fp.moveTo(
|
||||||
|
# # float(istop - self.w), float(new_bars[0]['open'])
|
||||||
|
# # )
|
||||||
|
# # fp.addPath(append_path)
|
||||||
|
|
||||||
|
# # self.setCacheMode(QtWidgets.QGraphicsItem.NoCache)
|
||||||
|
# # flip_cache = True
|
||||||
|
|
||||||
|
# self._xrange = first_index, last_index
|
||||||
|
|
||||||
|
# # trigger redraw despite caching
|
||||||
|
# self.prepareGeometryChange()
|
||||||
|
|
||||||
|
# self.draw_last(last)
|
||||||
|
|
||||||
|
# # # generate new lines objects for updatable "current bar"
|
||||||
|
# # self._last_bar_lines = bar_from_ohlc_row(last, self.w)
|
||||||
|
|
||||||
|
# # # last bar update
|
||||||
|
# # i, o, h, l, last, v = last[
|
||||||
|
# # ['index', 'open', 'high', 'low', 'close', 'volume']
|
||||||
|
# # ]
|
||||||
|
# # # assert i == self.start_index - 1
|
||||||
|
# # # assert i == last_index
|
||||||
|
# # body, larm, rarm = self._last_bar_lines
|
||||||
|
|
||||||
|
# # # XXX: is there a faster way to modify this?
|
||||||
|
# # rarm.setLine(rarm.x1(), last, rarm.x2(), last)
|
||||||
|
|
||||||
|
# # # writer is responsible for changing open on "first" volume of bar
|
||||||
|
# # larm.setLine(larm.x1(), o, larm.x2(), o)
|
||||||
|
|
||||||
|
# # if l != h: # noqa
|
||||||
|
|
||||||
|
# # if body is None:
|
||||||
|
# # body = self._last_bar_lines[0] = QLineF(i, l, i, h)
|
||||||
|
# # else:
|
||||||
|
# # # update body
|
||||||
|
# # body.setLine(i, l, i, h)
|
||||||
|
|
||||||
|
# # # XXX: pretty sure this is causing an issue where the bar has
|
||||||
|
# # # a large upward move right before the next sample and the body
|
||||||
|
# # # is getting set to None since the next bar is flat but the shm
|
||||||
|
# # # array index update wasn't read by the time this code runs. Iow
|
||||||
|
# # # we're doing this removal of the body for a bar index that is
|
||||||
|
# # # now out of date / from some previous sample. It's weird
|
||||||
|
# # # though because i've seen it do this to bars i - 3 back?
|
||||||
|
|
||||||
|
# profiler('last bar set')
|
||||||
|
|
||||||
|
# self.update()
|
||||||
|
# profiler('.update()')
|
||||||
|
|
||||||
|
# if flip_cache:
|
||||||
|
# self.setCacheMode(QtWidgets.QGraphicsItem.DeviceCoordinateCache)
|
||||||
|
|
||||||
|
# # profiler.finish()
|
||||||
|
|
||||||
|
def draw_last(
|
||||||
|
self,
|
||||||
|
last: np.ndarray,
|
||||||
|
|
||||||
|
) -> None:
|
||||||
|
# generate new lines objects for updatable "current bar"
|
||||||
|
self._last_bar_lines = bar_from_ohlc_row(last, self.w)
|
||||||
|
|
||||||
|
# last bar update
|
||||||
|
i, o, h, l, last, v = last[
|
||||||
|
['index', 'open', 'high', 'low', 'close', 'volume']
|
||||||
|
]
|
||||||
|
# assert i == self.start_index - 1
|
||||||
|
# assert i == last_index
|
||||||
|
body, larm, rarm = self._last_bar_lines
|
||||||
|
|
||||||
|
# XXX: is there a faster way to modify this?
|
||||||
|
rarm.setLine(rarm.x1(), last, rarm.x2(), last)
|
||||||
|
|
||||||
|
# writer is responsible for changing open on "first" volume of bar
|
||||||
|
larm.setLine(larm.x1(), o, larm.x2(), o)
|
||||||
|
|
||||||
|
if l != h: # noqa
|
||||||
|
|
||||||
|
if body is None:
|
||||||
|
body = self._last_bar_lines[0] = QLineF(i, l, i, h)
|
||||||
|
else:
|
||||||
|
# update body
|
||||||
|
body.setLine(i, l, i, h)
|
||||||
|
|
||||||
|
# XXX: pretty sure this is causing an issue where the bar has
|
||||||
|
# a large upward move right before the next sample and the body
|
||||||
|
# is getting set to None since the next bar is flat but the shm
|
||||||
|
# array index update wasn't read by the time this code runs. Iow
|
||||||
|
# we're doing this removal of the body for a bar index that is
|
||||||
|
# now out of date / from some previous sample. It's weird
|
||||||
|
# though because i've seen it do this to bars i - 3 back?
|
||||||
|
|
||||||
def boundingRect(self):
|
def boundingRect(self):
|
||||||
# Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect
|
# Qt docs: https://doc.qt.io/qt-5/qgraphicsitem.html#boundingRect
|
||||||
|
|
||||||
|
@ -135,6 +668,16 @@ class BarItems(pg.GraphicsObject):
|
||||||
hb.bottomRight(),
|
hb.bottomRight(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# fp = self.fast_path
|
||||||
|
# if fp:
|
||||||
|
# fhb = fp.controlPointRect()
|
||||||
|
# print((hb_tl, hb_br))
|
||||||
|
# print(fhb)
|
||||||
|
# hb_tl, hb_br = (
|
||||||
|
# fhb.topLeft() + hb.topLeft(),
|
||||||
|
# fhb.bottomRight() + hb.bottomRight(),
|
||||||
|
# )
|
||||||
|
|
||||||
# need to include last bar height or BR will be off
|
# need to include last bar height or BR will be off
|
||||||
mx_y = hb_br.y()
|
mx_y = hb_br.y()
|
||||||
mn_y = hb_tl.y()
|
mn_y = hb_tl.y()
|
||||||
|
@ -170,9 +713,12 @@ class BarItems(pg.GraphicsObject):
|
||||||
|
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
|
if self._in_ds:
|
||||||
|
return
|
||||||
|
|
||||||
profiler = pg.debug.Profiler(
|
profiler = pg.debug.Profiler(
|
||||||
disabled=not pg_profile_enabled(),
|
disabled=not pg_profile_enabled(),
|
||||||
ms_threshold=ms_slower_then,
|
gt=ms_slower_then,
|
||||||
)
|
)
|
||||||
|
|
||||||
# p.setCompositionMode(0)
|
# p.setCompositionMode(0)
|
||||||
|
@ -184,7 +730,6 @@ class BarItems(pg.GraphicsObject):
|
||||||
# lead to any perf gains other then when zoomed in to less bars
|
# lead to any perf gains other then when zoomed in to less bars
|
||||||
# in view.
|
# in view.
|
||||||
p.setPen(self.last_bar_pen)
|
p.setPen(self.last_bar_pen)
|
||||||
if self._last_bar_lines:
|
|
||||||
p.drawLines(*tuple(filter(bool, self._last_bar_lines)))
|
p.drawLines(*tuple(filter(bool, self._last_bar_lines)))
|
||||||
profiler('draw last bar')
|
profiler('draw last bar')
|
||||||
|
|
||||||
|
@ -192,59 +737,6 @@ class BarItems(pg.GraphicsObject):
|
||||||
p.drawPath(self.path)
|
p.drawPath(self.path)
|
||||||
profiler(f'draw history path: {self.path.capacity()}')
|
profiler(f'draw history path: {self.path.capacity()}')
|
||||||
|
|
||||||
def draw_last_datum(
|
# if self.fast_path:
|
||||||
self,
|
# p.drawPath(self.fast_path)
|
||||||
path: QPainterPath,
|
# profiler('draw fast path')
|
||||||
src_data: np.ndarray,
|
|
||||||
render_data: np.ndarray,
|
|
||||||
reset: bool,
|
|
||||||
array_key: str,
|
|
||||||
|
|
||||||
fields: list[str] = [
|
|
||||||
'index',
|
|
||||||
'open',
|
|
||||||
'high',
|
|
||||||
'low',
|
|
||||||
'close',
|
|
||||||
],
|
|
||||||
|
|
||||||
) -> None:
|
|
||||||
|
|
||||||
# relevant fields
|
|
||||||
ohlc = src_data[fields]
|
|
||||||
last_row = ohlc[-1:]
|
|
||||||
|
|
||||||
# individual values
|
|
||||||
last_row = i, o, h, l, last = ohlc[-1]
|
|
||||||
|
|
||||||
# generate new lines objects for updatable "current bar"
|
|
||||||
self._last_bar_lines = bar_from_ohlc_row(last_row)
|
|
||||||
|
|
||||||
# assert i == graphics.start_index - 1
|
|
||||||
# assert i == last_index
|
|
||||||
body, larm, rarm = self._last_bar_lines
|
|
||||||
|
|
||||||
# XXX: is there a faster way to modify this?
|
|
||||||
rarm.setLine(rarm.x1(), last, rarm.x2(), last)
|
|
||||||
|
|
||||||
# writer is responsible for changing open on "first" volume of bar
|
|
||||||
larm.setLine(larm.x1(), o, larm.x2(), o)
|
|
||||||
|
|
||||||
if l != h: # noqa
|
|
||||||
|
|
||||||
if body is None:
|
|
||||||
body = self._last_bar_lines[0] = QLineF(i, l, i, h)
|
|
||||||
else:
|
|
||||||
# update body
|
|
||||||
body.setLine(i, l, i, h)
|
|
||||||
|
|
||||||
# XXX: pretty sure this is causing an issue where the
|
|
||||||
# bar has a large upward move right before the next
|
|
||||||
# sample and the body is getting set to None since the
|
|
||||||
# next bar is flat but the shm array index update wasn't
|
|
||||||
# read by the time this code runs. Iow we're doing this
|
|
||||||
# removal of the body for a bar index that is now out of
|
|
||||||
# date / from some previous sample. It's weird though
|
|
||||||
# because i've seen it do this to bars i - 3 back?
|
|
||||||
|
|
||||||
return ohlc['index'], ohlc['close']
|
|
||||||
|
|
|
@ -1,236 +0,0 @@
|
||||||
# piker: trading gear for hackers
|
|
||||||
# Copyright (C) 2018-present Tyler Goodlet (in stewardship of piker0)
|
|
||||||
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Affero General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Affero General Public License for more details.
|
|
||||||
|
|
||||||
# You should have received a copy of the GNU Affero General Public License
|
|
||||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
"""
|
|
||||||
Super fast ``QPainterPath`` generation related operator routines.
|
|
||||||
|
|
||||||
"""
|
|
||||||
from __future__ import annotations
|
|
||||||
from typing import (
|
|
||||||
# Optional,
|
|
||||||
TYPE_CHECKING,
|
|
||||||
)
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
from numpy.lib import recfunctions as rfn
|
|
||||||
from numba import njit, float64, int64 # , optional
|
|
||||||
# import pyqtgraph as pg
|
|
||||||
from PyQt5 import QtGui
|
|
||||||
# from PyQt5.QtCore import QLineF, QPointF
|
|
||||||
|
|
||||||
from ..data._sharedmem import (
|
|
||||||
ShmArray,
|
|
||||||
)
|
|
||||||
# from .._profile import pg_profile_enabled, ms_slower_then
|
|
||||||
from ._compression import (
|
|
||||||
ds_m4,
|
|
||||||
)
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
|
||||||
from ._flows import Renderer
|
|
||||||
|
|
||||||
|
|
||||||
def xy_downsample(
|
|
||||||
x,
|
|
||||||
y,
|
|
||||||
uppx,
|
|
||||||
|
|
||||||
x_spacer: float = 0.5,
|
|
||||||
|
|
||||||
) -> tuple[np.ndarray, np.ndarray]:
|
|
||||||
|
|
||||||
# downsample whenever more then 1 pixels per datum can be shown.
|
|
||||||
# always refresh data bounds until we get diffing
|
|
||||||
# working properly, see above..
|
|
||||||
bins, x, y = ds_m4(
|
|
||||||
x,
|
|
||||||
y,
|
|
||||||
uppx,
|
|
||||||
)
|
|
||||||
|
|
||||||
# flatten output to 1d arrays suitable for path-graphics generation.
|
|
||||||
x = np.broadcast_to(x[:, None], y.shape)
|
|
||||||
x = (x + np.array(
|
|
||||||
[-x_spacer, 0, 0, x_spacer]
|
|
||||||
)).flatten()
|
|
||||||
y = y.flatten()
|
|
||||||
|
|
||||||
return x, y
|
|
||||||
|
|
||||||
|
|
||||||
@njit(
|
|
||||||
# TODO: for now need to construct this manually for readonly arrays, see
|
|
||||||
# https://github.com/numba/numba/issues/4511
|
|
||||||
# ntypes.tuple((float64[:], float64[:], float64[:]))(
|
|
||||||
# numba_ohlc_dtype[::1], # contiguous
|
|
||||||
# int64,
|
|
||||||
# optional(float64),
|
|
||||||
# ),
|
|
||||||
nogil=True
|
|
||||||
)
|
|
||||||
def path_arrays_from_ohlc(
|
|
||||||
data: np.ndarray,
|
|
||||||
start: int64,
|
|
||||||
bar_gap: float64 = 0.43,
|
|
||||||
|
|
||||||
) -> np.ndarray:
|
|
||||||
'''
|
|
||||||
Generate an array of lines objects from input ohlc data.
|
|
||||||
|
|
||||||
'''
|
|
||||||
size = int(data.shape[0] * 6)
|
|
||||||
|
|
||||||
x = np.zeros(
|
|
||||||
# data,
|
|
||||||
shape=size,
|
|
||||||
dtype=float64,
|
|
||||||
)
|
|
||||||
y, c = x.copy(), x.copy()
|
|
||||||
|
|
||||||
# TODO: report bug for assert @
|
|
||||||
# /home/goodboy/repos/piker/env/lib/python3.8/site-packages/numba/core/typing/builtins.py:991
|
|
||||||
for i, q in enumerate(data[start:], start):
|
|
||||||
|
|
||||||
# TODO: ask numba why this doesn't work..
|
|
||||||
# open, high, low, close, index = q[
|
|
||||||
# ['open', 'high', 'low', 'close', 'index']]
|
|
||||||
|
|
||||||
open = q['open']
|
|
||||||
high = q['high']
|
|
||||||
low = q['low']
|
|
||||||
close = q['close']
|
|
||||||
index = float64(q['index'])
|
|
||||||
|
|
||||||
istart = i * 6
|
|
||||||
istop = istart + 6
|
|
||||||
|
|
||||||
# x,y detail the 6 points which connect all vertexes of a ohlc bar
|
|
||||||
x[istart:istop] = (
|
|
||||||
index - bar_gap,
|
|
||||||
index,
|
|
||||||
index,
|
|
||||||
index,
|
|
||||||
index,
|
|
||||||
index + bar_gap,
|
|
||||||
)
|
|
||||||
y[istart:istop] = (
|
|
||||||
open,
|
|
||||||
open,
|
|
||||||
low,
|
|
||||||
high,
|
|
||||||
close,
|
|
||||||
close,
|
|
||||||
)
|
|
||||||
|
|
||||||
# specifies that the first edge is never connected to the
|
|
||||||
# prior bars last edge thus providing a small "gap"/"space"
|
|
||||||
# between bars determined by ``bar_gap``.
|
|
||||||
c[istart:istop] = (1, 1, 1, 1, 1, 0)
|
|
||||||
|
|
||||||
return x, y, c
|
|
||||||
|
|
||||||
|
|
||||||
def gen_ohlc_qpath(
|
|
||||||
r: Renderer,
|
|
||||||
data: np.ndarray,
|
|
||||||
array_key: str, # we ignore this
|
|
||||||
vr: tuple[int, int],
|
|
||||||
|
|
||||||
start: int = 0, # XXX: do we need this?
|
|
||||||
# 0.5 is no overlap between arms, 1.0 is full overlap
|
|
||||||
w: float = 0.43,
|
|
||||||
|
|
||||||
) -> QtGui.QPainterPath:
|
|
||||||
'''
|
|
||||||
More or less direct proxy to ``path_arrays_from_ohlc()``
|
|
||||||
but with closed in kwargs for line spacing.
|
|
||||||
|
|
||||||
'''
|
|
||||||
x, y, c = path_arrays_from_ohlc(
|
|
||||||
data,
|
|
||||||
start,
|
|
||||||
bar_gap=w,
|
|
||||||
)
|
|
||||||
return x, y, c
|
|
||||||
|
|
||||||
|
|
||||||
def ohlc_to_line(
|
|
||||||
ohlc_shm: ShmArray,
|
|
||||||
data_field: str,
|
|
||||||
fields: list[str] = ['open', 'high', 'low', 'close']
|
|
||||||
|
|
||||||
) -> tuple[
|
|
||||||
np.ndarray,
|
|
||||||
np.ndarray,
|
|
||||||
]:
|
|
||||||
'''
|
|
||||||
Convert an input struct-array holding OHLC samples into a pair of
|
|
||||||
flattened x, y arrays with the same size (datums wise) as the source
|
|
||||||
data.
|
|
||||||
|
|
||||||
'''
|
|
||||||
y_out = ohlc_shm.ustruct(fields)
|
|
||||||
first = ohlc_shm._first.value
|
|
||||||
last = ohlc_shm._last.value
|
|
||||||
|
|
||||||
# write pushed data to flattened copy
|
|
||||||
y_out[first:last] = rfn.structured_to_unstructured(
|
|
||||||
ohlc_shm.array[fields]
|
|
||||||
)
|
|
||||||
|
|
||||||
# generate an flat-interpolated x-domain
|
|
||||||
x_out = (
|
|
||||||
np.broadcast_to(
|
|
||||||
ohlc_shm._array['index'][:, None],
|
|
||||||
(
|
|
||||||
ohlc_shm._array.size,
|
|
||||||
# 4, # only ohlc
|
|
||||||
y_out.shape[1],
|
|
||||||
),
|
|
||||||
) + np.array([-0.5, 0, 0, 0.5])
|
|
||||||
)
|
|
||||||
assert y_out.any()
|
|
||||||
|
|
||||||
return (
|
|
||||||
x_out,
|
|
||||||
y_out,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def to_step_format(
|
|
||||||
shm: ShmArray,
|
|
||||||
data_field: str,
|
|
||||||
index_field: str = 'index',
|
|
||||||
|
|
||||||
) -> tuple[int, np.ndarray, np.ndarray]:
|
|
||||||
'''
|
|
||||||
Convert an input 1d shm array to a "step array" format
|
|
||||||
for use by path graphics generation.
|
|
||||||
|
|
||||||
'''
|
|
||||||
i = shm._array['index'].copy()
|
|
||||||
out = shm._array[data_field].copy()
|
|
||||||
|
|
||||||
x_out = np.broadcast_to(
|
|
||||||
i[:, None],
|
|
||||||
(i.size, 2),
|
|
||||||
) + np.array([-0.5, 0.5])
|
|
||||||
|
|
||||||
y_out = np.empty((len(out), 2), dtype=out.dtype)
|
|
||||||
y_out[:] = out[:, np.newaxis]
|
|
||||||
|
|
||||||
# start y at origin level
|
|
||||||
y_out[0, 0] = 0
|
|
||||||
return x_out, y_out
|
|
|
@ -287,6 +287,7 @@ class MainWindow(QtGui.QMainWindow):
|
||||||
app = QtGui.QApplication.instance()
|
app = QtGui.QApplication.instance()
|
||||||
geo = self.current_screen().geometry()
|
geo = self.current_screen().geometry()
|
||||||
h, w = geo.height(), geo.width()
|
h, w = geo.height(), geo.width()
|
||||||
|
self.setMaximumSize(w, h)
|
||||||
# use approx 1/3 of the area of the screen by default
|
# use approx 1/3 of the area of the screen by default
|
||||||
self._size = round(w * .666), round(h * .666)
|
self._size = round(w * .666), round(h * .666)
|
||||||
|
|
||||||
|
|
|
@ -30,7 +30,6 @@ import uuid
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
import tractor
|
import tractor
|
||||||
import trio
|
import trio
|
||||||
from PyQt5.QtCore import Qt
|
|
||||||
|
|
||||||
from .. import config
|
from .. import config
|
||||||
from ..clearing._client import open_ems, OrderBook
|
from ..clearing._client import open_ems, OrderBook
|
||||||
|
@ -38,7 +37,6 @@ from ..clearing._allocate import (
|
||||||
mk_allocator,
|
mk_allocator,
|
||||||
Position,
|
Position,
|
||||||
)
|
)
|
||||||
from ._style import _font
|
|
||||||
from ..data._source import Symbol
|
from ..data._source import Symbol
|
||||||
from ..data.feed import Feed
|
from ..data.feed import Feed
|
||||||
from ..log import get_logger
|
from ..log import get_logger
|
||||||
|
@ -48,8 +46,7 @@ from ._position import (
|
||||||
PositionTracker,
|
PositionTracker,
|
||||||
SettingsPane,
|
SettingsPane,
|
||||||
)
|
)
|
||||||
from ._forms import FieldsForm
|
from ._label import FormatLabel
|
||||||
# from ._label import FormatLabel
|
|
||||||
from ._window import MultiStatus
|
from ._window import MultiStatus
|
||||||
from ..clearing._messages import Order, BrokerdPosition
|
from ..clearing._messages import Order, BrokerdPosition
|
||||||
from ._forms import open_form_input_handling
|
from ._forms import open_form_input_handling
|
||||||
|
@ -642,21 +639,63 @@ async def open_order_mode(
|
||||||
pp_tracker.hide_info()
|
pp_tracker.hide_info()
|
||||||
|
|
||||||
# setup order mode sidepane widgets
|
# setup order mode sidepane widgets
|
||||||
form: FieldsForm = chart.sidepane
|
form = chart.sidepane
|
||||||
form.vbox.setSpacing(
|
vbox = form.vbox
|
||||||
|
|
||||||
|
from textwrap import dedent
|
||||||
|
|
||||||
|
from PyQt5.QtCore import Qt
|
||||||
|
|
||||||
|
from ._style import _font, _font_small
|
||||||
|
from ..calc import humanize
|
||||||
|
|
||||||
|
feed_label = FormatLabel(
|
||||||
|
fmt_str=dedent("""
|
||||||
|
actor: **{actor_name}**\n
|
||||||
|
|_ @**{host}:{port}**\n
|
||||||
|
|_ throttle_hz: **{throttle_rate}**\n
|
||||||
|
|_ streams: **{symbols}**\n
|
||||||
|
|_ shm: **{shm}**\n
|
||||||
|
"""),
|
||||||
|
font=_font.font,
|
||||||
|
font_size=_font_small.px_size,
|
||||||
|
font_color='default_lightest',
|
||||||
|
)
|
||||||
|
|
||||||
|
form.feed_label = feed_label
|
||||||
|
|
||||||
|
# add feed info label to top
|
||||||
|
vbox.insertWidget(
|
||||||
|
0,
|
||||||
|
feed_label,
|
||||||
|
alignment=Qt.AlignBottom,
|
||||||
|
)
|
||||||
|
# vbox.setAlignment(feed_label, Qt.AlignBottom)
|
||||||
|
# vbox.setAlignment(Qt.AlignBottom)
|
||||||
|
_ = chart.height() - (
|
||||||
|
form.height() +
|
||||||
|
form.fill_bar.height()
|
||||||
|
# feed_label.height()
|
||||||
|
)
|
||||||
|
vbox.setSpacing(
|
||||||
int((1 + 5/8)*_font.px_size)
|
int((1 + 5/8)*_font.px_size)
|
||||||
)
|
)
|
||||||
|
|
||||||
from ._feedstatus import mk_feed_label
|
# fill in brokerd feed info
|
||||||
|
host, port = feed.portal.channel.raddr
|
||||||
feed_label = mk_feed_label(
|
if host == '127.0.0.1':
|
||||||
form,
|
host = 'localhost'
|
||||||
feed,
|
mpshm = feed.shm._shm
|
||||||
chart,
|
shmstr = f'{humanize(mpshm.size)}'
|
||||||
|
form.feed_label.format(
|
||||||
|
actor_name=feed.portal.channel.uid[0],
|
||||||
|
host=host,
|
||||||
|
port=port,
|
||||||
|
symbols=len(feed.symbols),
|
||||||
|
shm=shmstr,
|
||||||
|
throttle_rate=feed.throttle_rate,
|
||||||
)
|
)
|
||||||
|
|
||||||
# XXX: we set this because?
|
|
||||||
form.feed_label = feed_label
|
|
||||||
order_pane = SettingsPane(
|
order_pane = SettingsPane(
|
||||||
form=form,
|
form=form,
|
||||||
# XXX: ugh, so hideous...
|
# XXX: ugh, so hideous...
|
||||||
|
@ -667,11 +706,6 @@ async def open_order_mode(
|
||||||
)
|
)
|
||||||
order_pane.set_accounts(list(trackers.keys()))
|
order_pane.set_accounts(list(trackers.keys()))
|
||||||
|
|
||||||
form.vbox.addWidget(
|
|
||||||
feed_label,
|
|
||||||
alignment=Qt.AlignBottom,
|
|
||||||
)
|
|
||||||
|
|
||||||
# update pp icons
|
# update pp icons
|
||||||
for name, tracker in trackers.items():
|
for name, tracker in trackers.items():
|
||||||
order_pane.update_account_icons({name: tracker.live_pp})
|
order_pane.update_account_icons({name: tracker.live_pp})
|
||||||
|
@ -873,9 +907,7 @@ async def process_trades_and_update_ui(
|
||||||
mode.lines.remove_line(uuid=oid)
|
mode.lines.remove_line(uuid=oid)
|
||||||
|
|
||||||
# each clearing tick is responded individually
|
# each clearing tick is responded individually
|
||||||
elif resp in (
|
elif resp in ('broker_filled',):
|
||||||
'broker_filled',
|
|
||||||
):
|
|
||||||
|
|
||||||
known_order = book._sent_orders.get(oid)
|
known_order = book._sent_orders.get(oid)
|
||||||
if not known_order:
|
if not known_order:
|
||||||
|
|
|
@ -11,11 +11,3 @@
|
||||||
|
|
||||||
# our async client for ``marketstore`` (the tsdb)
|
# our async client for ``marketstore`` (the tsdb)
|
||||||
-e git+https://github.com/pikers/anyio-marketstore.git@master#egg=anyio-marketstore
|
-e git+https://github.com/pikers/anyio-marketstore.git@master#egg=anyio-marketstore
|
||||||
|
|
||||||
|
|
||||||
# ``trimeter`` for asysnc history fetching
|
|
||||||
-e git+https://github.com/python-trio/trimeter.git@master#egg=trimeter
|
|
||||||
|
|
||||||
|
|
||||||
# ``asyncvnc`` for sending interactions to ib-gw inside docker
|
|
||||||
-e git+https://github.com/pikers/asyncvnc.git@vid_passthrough#egg=asyncvnc
|
|
||||||
|
|
29
setup.py
29
setup.py
|
@ -51,14 +51,10 @@ setup(
|
||||||
# async
|
# async
|
||||||
'trio',
|
'trio',
|
||||||
'trio-websocket',
|
'trio-websocket',
|
||||||
|
# 'tractor', # from github currently
|
||||||
'msgspec', # performant IPC messaging
|
'msgspec', # performant IPC messaging
|
||||||
'async_generator',
|
'async_generator',
|
||||||
|
|
||||||
# from github currently (see requirements.txt)
|
|
||||||
# 'trimeter', # not released yet..
|
|
||||||
# 'tractor',
|
|
||||||
# asyncvnc,
|
|
||||||
|
|
||||||
# brokers
|
# brokers
|
||||||
'asks==2.4.8',
|
'asks==2.4.8',
|
||||||
'ib_insync',
|
'ib_insync',
|
||||||
|
@ -69,37 +65,36 @@ setup(
|
||||||
'cython',
|
'cython',
|
||||||
'numpy',
|
'numpy',
|
||||||
'numba',
|
'numba',
|
||||||
|
'pandas',
|
||||||
|
|
||||||
# UI
|
# UI
|
||||||
'PyQt5',
|
'PyQt5',
|
||||||
# 'pyqtgraph', from our fork see reqs.txt
|
'pyqtgraph',
|
||||||
'qdarkstyle >= 3.0.2', # themeing
|
'qdarkstyle >= 3.0.2',
|
||||||
'fuzzywuzzy[speedup]', # fuzzy search
|
# fuzzy search
|
||||||
|
'fuzzywuzzy[speedup]',
|
||||||
|
|
||||||
# tsdbs
|
# tsdbs
|
||||||
# anyio-marketstore # from gh see reqs.txt
|
'pymarketstore',
|
||||||
],
|
],
|
||||||
extras_require={
|
extras_require={
|
||||||
|
|
||||||
|
# serialization
|
||||||
'tsdb': [
|
'tsdb': [
|
||||||
'docker',
|
'docker',
|
||||||
],
|
],
|
||||||
|
|
||||||
},
|
},
|
||||||
tests_require=['pytest'],
|
tests_require=['pytest'],
|
||||||
python_requires=">=3.10",
|
python_requires=">=3.9", # literally for ``datetime.datetime.fromisoformat``...
|
||||||
keywords=[
|
keywords=["async", "trading", "finance", "quant", "charting"],
|
||||||
"async",
|
|
||||||
"trading",
|
|
||||||
"finance",
|
|
||||||
"quant",
|
|
||||||
"charting",
|
|
||||||
],
|
|
||||||
classifiers=[
|
classifiers=[
|
||||||
'Development Status :: 3 - Alpha',
|
'Development Status :: 3 - Alpha',
|
||||||
'License :: OSI Approved :: ',
|
'License :: OSI Approved :: ',
|
||||||
'Operating System :: POSIX :: Linux',
|
'Operating System :: POSIX :: Linux',
|
||||||
"Programming Language :: Python :: Implementation :: CPython",
|
"Programming Language :: Python :: Implementation :: CPython",
|
||||||
"Programming Language :: Python :: 3 :: Only",
|
"Programming Language :: Python :: 3 :: Only",
|
||||||
|
"Programming Language :: Python :: 3.9",
|
||||||
"Programming Language :: Python :: 3.10",
|
"Programming Language :: Python :: 3.10",
|
||||||
'Intended Audience :: Financial and Insurance Industry',
|
'Intended Audience :: Financial and Insurance Industry',
|
||||||
'Intended Audience :: Science/Research',
|
'Intended Audience :: Science/Research',
|
||||||
|
|
Loading…
Reference in New Issue