Compare commits

..

No commits in common. "kraken_stale_ws_token" and "main" have entirely different histories.

58 changed files with 1600 additions and 2538 deletions

View File

@ -3,8 +3,7 @@
"allow": [
"Bash(chmod:*)",
"Bash(/tmp/piker_commits.txt)",
"Bash(python:*)",
"Bash(ls:*)"
"Bash(python:*)"
],
"deny": [],
"ask": []

View File

@ -32,14 +32,7 @@ option.log.disabled = true
[kraken]
# the reference fiat asset as can be set
# in an account's web-trading-UI prefs.
src_fiat = 'usd'
# NOTE for account defs, the following
# lines must match as follows.
accounts.spot = 'spot'
key_descr = 'spot'
key_descr = ''
api_key = ''
secret = ''
# ------ kraken ------

View File

@ -31,7 +31,6 @@ from piker.log import (
from ._util import (
BrokerError,
SymbolNotFound,
MarketNotFound as MarketNotFound,
NoData,
DataUnavailable,
DataThrottle,

View File

@ -20,17 +20,10 @@ Handy cross-broker utils.
"""
from __future__ import annotations
# from functools import partial
from typing import (
Type,
)
import json
import httpx
import logging
from msgspec import Struct
from tractor._exceptions import (
reg_err_types,
)
from piker.log import (
colorize_json,
@ -66,10 +59,6 @@ class SymbolNotFound(BrokerError):
"Symbol not found by broker search"
class MarketNotFound(SymbolNotFound):
"Mkt-pair not found by broker search"
# TODO: these should probably be moved to `.tsp/.data`?
class NoData(BrokerError):
'''
@ -108,19 +97,6 @@ class DataThrottle(BrokerError):
'''
# TODO: add in throttle metrics/feedback
class SchemaMismatch(BrokerError):
'''
Market `Pair` fields mismatch, likely due to provider API update.
'''
# auto-register all `BrokerError` subtypes for
# tractor IPC exc-marshalling.
reg_err_types([
BrokerError,
*BrokerError.__subclasses__(),
])
def resproc(
resp: httpx.Response,
@ -147,45 +123,3 @@ def resproc(
log.debug(f"Received json contents:\n{colorize_json(msg)}")
return msg if return_json else resp
def get_or_raise_on_pair_schema_mismatch(
pair_type: Type[Struct],
fields_data: dict,
provider_name: str,
api_url: str|None = None,
) -> Struct:
'''
Boilerplate helper around assset-`Pair` field schema mismatches,
normally due to provider API updates.
'''
try:
pair: Struct = pair_type(**fields_data)
return pair
except TypeError as err:
from tractor.devx.pformat import ppfmt
repr_data: str = ppfmt(fields_data)
report: str = (
f'Field mismatch we need to codify!\n'
f'\n'
f'{pair_type!r}({repr_data})'
f'\n'
f'^^^ {err.args[0]!r} ^^^\n'
f'\n'
f"Don't panic, prolly {provider_name!r} "
f"changed their symbology schema..\n"
)
if (
api_url
or
(api_url := pair_type._api_url)
):
report += (
f'\n'
f'Check out their API docs here:\n'
f'{api_url}\n'
)
raise SchemaMismatch(report) from err

View File

@ -49,9 +49,6 @@ from piker import config
from piker.clearing._messages import (
Order,
)
from piker.brokers._util import (
get_or_raise_on_pair_schema_mismatch,
)
from piker.accounting import (
Asset,
digits_to_dec,
@ -373,12 +370,20 @@ class Client:
item['filters'] = filters
pair_type: Type = PAIRTYPES[venue]
pair: Pair = get_or_raise_on_pair_schema_mismatch(
pair_type=pair_type,
fields_data=item,
provider_name='binance',
api_url='https://binance-docs.github.io/apidocs/spot/en/#exchange-information',
)
try:
pair: Pair = pair_type(**item)
except Exception as e:
e.add_note(
f'\n'
f'New or removed field we need to codify!\n'
f'pair-type: {pair_type!r}\n'
f'\n'
f"Don't panic, prolly stupid binance changed their symbology schema again..\n"
f'Check out their API docs here:\n'
f'\n'
f'https://binance-docs.github.io/apidocs/spot/en/#exchange-information\n'
)
raise
pair_table[pair.symbol.upper()] = pair
# update an additional top-level-cross-venue-table
@ -576,8 +581,8 @@ class Client:
self,
mkt: MktPair,
start_dt: datetime|None = None,
end_dt: datetime|None = None,
start_dt: datetime | None = None,
end_dt: datetime | None = None,
as_np: bool = True,
@ -604,11 +609,7 @@ class Client:
start_time = binance_timestamp(start_dt)
end_time = binance_timestamp(end_dt)
import tractor
with tractor.devx.maybe_open_crash_handler():
bs_pair: Pair = self._pairs[
mkt.bs_fqme.upper()
]
bs_pair: Pair = self._pairs[mkt.bs_fqme.upper()]
# https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-data
bars = await self._api(

View File

@ -48,7 +48,6 @@ import tractor
from piker.brokers import (
open_cached_client,
NoData,
MarketNotFound,
)
from piker._cacheables import (
async_lifo_cache,
@ -204,13 +203,9 @@ async def stream_messages(
yield 'trade', piker_quote
def make_sub(
pairs: list[str],
sub_name: str,
uid: int,
) -> dict[str, str]:
def make_sub(pairs: list[str], sub_name: str, uid: int) -> dict[str, str]:
'''
Create a request subscription packet `dict`.
Create a request subscription packet dict.
- spot:
https://binance-docs.github.io/apidocs/spot/en/#live-subscribing-unsubscribing-to-streams
@ -306,10 +301,6 @@ async def get_mkt_info(
# uppercase since kraken bs_mktid is always upper
if 'binance' not in fqme.lower():
log.warning(
f'Missing `.<provider>` part in fqme ??\n'
f'fqme: {fqme!r}\n'
)
fqme += '.binance'
mkt_mode: str = ''
@ -324,24 +315,6 @@ async def get_mkt_info(
venue: str = venue.upper()
venue_lower: str = venue.lower()
if not venue:
if expiry:
expiry = f'.{expiry}'
expected: str = (
f'{mkt_ep}'
f'.<venue>'
f'{expiry}'
f'.{broker}'
)
raise MarketNotFound(
f'Invalid or missing .<venue> part in fqme?\n'
f'\n'
f'fqme: {fqme!r}\n'
f'expected-form>> {expected}\n'
f'\n'
f'Maybe you are missing a ".spot." ?\n'
)
# XXX TODO: we should change the usdtm_futes name to just
# usdm_futes (dropping the tether part) since it turns out that
# there are indeed USD-tokens OTHER THEN tether being used as
@ -359,8 +332,7 @@ async def get_mkt_info(
# TODO: handle coinm futes which have a margin asset that
# is some crypto token!
# https://binance-docs.github.io/apidocs/delivery/en/#exchange-information
or
'btc' in venue_lower
or 'btc' in venue_lower
):
return None
@ -371,21 +343,16 @@ async def get_mkt_info(
if (
venue
and
'spot' not in venue_lower
and 'spot' not in venue_lower
# XXX: catch all in case user doesn't know which
# venue they want (usdtm vs. coinm) and we can choose
# a default (via config?) once we support coin-m APIs.
or
'perp' in venue_lower
or 'perp' in venue_lower
):
if not mkt_mode:
mkt_mode: str = f'{venue_lower}_futes'
# tracing
# await tractor.pause()
async with open_cached_client(
'binance',
) as client:

View File

@ -20,7 +20,6 @@ Per market data-type definitions and schemas types.
"""
from __future__ import annotations
from typing import (
ClassVar,
Literal,
)
from decimal import Decimal
@ -204,8 +203,6 @@ class FutesPair(Pair):
# NOTE: see `.data._symcache.SymbologyCache.load()` for why
ns_path: str = 'piker.brokers.binance:FutesPair'
_api_url: ClassVar[str] = 'https://binance-docs.github.io/apidocs/spot/en/#exchange-information'
# NOTE: for compat with spot pairs and `MktPair.src: Asset`
# processing..
@property

View File

@ -425,7 +425,7 @@ class DataFeed:
async def stream_to_file(
watchlist_name: str,
filename: str,
portal: tractor.Portal,
portal: tractor._portal.Portal,
tickers: List[str],
brokermod: ModuleType,
rate: int,

View File

@ -23,6 +23,7 @@ from contextlib import (
asynccontextmanager as acm,
)
from datetime import datetime
from functools import partial
import time
from typing import (
Any,
@ -523,12 +524,13 @@ async def maybe_open_feed_handler() -> trio.abc.ReceiveStream:
async def aio_price_feed_relay(
chan: to_asyncio.LinkedTaskChannel,
fh: FeedHandler,
instrument: Symbol,
from_trio: asyncio.Queue,
to_trio: trio.abc.SendChannel,
) -> None:
async def _trade(data: dict, receipt_timestamp):
chan.send_nowait(('trade', {
to_trio.send_nowait(('trade', {
'symbol': cb_sym_to_deribit_inst(
str_to_cb_sym(data.symbol)).lower(),
'last': data,
@ -538,7 +540,7 @@ async def aio_price_feed_relay(
}))
async def _l1(data: dict, receipt_timestamp):
chan.send_nowait(('l1', {
to_trio.send_nowait(('l1', {
'symbol': cb_sym_to_deribit_inst(
str_to_cb_sym(data.symbol)).lower(),
'ticks': [
@ -568,7 +570,7 @@ async def aio_price_feed_relay(
install_signal_handlers=False)
# sync with trio
chan.started_nowait(None)
to_trio.send_nowait(None)
await asyncio.sleep(float('inf'))
@ -579,9 +581,11 @@ async def open_price_feed(
) -> trio.abc.ReceiveStream:
async with maybe_open_feed_handler() as fh:
async with to_asyncio.open_channel_from(
aio_price_feed_relay,
fh=fh,
instrument=instrument,
partial(
aio_price_feed_relay,
fh,
instrument
)
) as (chan, first):
yield chan
@ -607,9 +611,10 @@ async def maybe_open_price_feed(
async def aio_order_feed_relay(
chan: to_asyncio.LinkedTaskChannel,
fh: FeedHandler,
instrument: Symbol,
from_trio: asyncio.Queue,
to_trio: trio.abc.SendChannel,
) -> None:
async def _fill(data: dict, receipt_timestamp):
breakpoint()
@ -632,7 +637,7 @@ async def aio_order_feed_relay(
install_signal_handlers=False)
# sync with trio
chan.started_nowait(None)
to_trio.send_nowait(None)
await asyncio.sleep(float('inf'))
@ -643,9 +648,11 @@ async def open_order_feed(
) -> trio.abc.ReceiveStream:
async with maybe_open_feed_handler() as fh:
async with to_asyncio.open_channel_from(
aio_order_feed_relay,
fh=fh,
instrument=instrument,
partial(
aio_order_feed_relay,
fh,
instrument
)
) as (chan, first):
yield chan

View File

@ -95,7 +95,6 @@ from .symbols import (
)
from ...log import get_logger
from .venues import (
is_expired,
is_venue_open,
sesh_times,
is_venue_closure,
@ -497,7 +496,7 @@ class Client:
await self.ib.reqContractDetailsAsync(contract)
)[0]
# convert to makt-native tz
tz: str = details.timeZoneId or 'EST'
tz: str = details.timeZoneId
end_dt = end_dt.in_tz(tz)
first_dt: DateTime = from_timestamp(first).in_tz(tz)
last_dt: DateTime = from_timestamp(last).in_tz(tz)
@ -509,18 +508,10 @@ class Client:
_open_now: bool = is_venue_open(
con_deats=details,
)
_is_expired: bool = is_expired(
con_deats=details,
)
# XXX, do gap detections.
has_closure_gap: bool = False
if (
# XXX, expired tracts can't be introspected
# for open/closure intervals due to ib's chitty
# details seemingly..
not _is_expired
and
last_dt.add(seconds=sample_period_s)
<
end_dt

View File

@ -231,21 +231,20 @@ async def handle_order_requests(
async def recv_trade_updates(
chan: tractor.to_asyncio.LinkedTaskChannel,
client: Client,
to_trio: trio.abc.SendChannel,
) -> None:
'''
Receive and relay order control and positioning
related events from `ib_async`, pack as tuples and
push over mem-chan to our trio relay task for
processing and relay to EMS.
Receive and relay order control and positioning related events
from `ib_async`, pack as tuples and push over mem-chan to our
trio relay task for processing and relay to EMS.
'''
client.inline_errors(chan)
client.inline_errors(to_trio)
# sync with trio task
chan.started_nowait(client.ib)
to_trio.send_nowait(client.ib)
def push_tradesies(
eventkit_obj,
@ -283,7 +282,7 @@ async def recv_trade_updates(
try:
# emit event name + relevant ibis internal objects
chan.send_nowait((event_name, emit))
to_trio.send_nowait((event_name, emit))
except trio.BrokenResourceError:
log.exception(f'Disconnected from {eventkit_obj} updates')
eventkit_obj.disconnect(push_tradesies)
@ -1307,15 +1306,7 @@ async def deliver_trade_events(
elif isinstance(err, str):
code_part, _, reason = err.rpartition(']')
if code_part:
for prefix_patt in [
'[Errno ',
'[code ',
]:
code_part, _, code = code_part.partition()
if code:
code = int(code)
break
_, _, code = code_part.partition('[code')
reqid: str = '<unknown>'
# "Warning:" msg codes,

View File

@ -501,7 +501,7 @@ async def update_ledger_from_api_trades(
for fill in fills:
con: Contract = fill.contract
conid: str = con.conId
pexch: str|None = con.primaryExchange or con.exchange
pexch: str | None = con.primaryExchange
if not pexch:
cons = await client.get_con(conid=conid)

View File

@ -33,21 +33,13 @@ from typing import (
)
import exchange_calendars as xcals
from exchange_calendars.errors import (
InvalidCalendarName,
)
from pendulum import (
parse,
now,
Duration,
Interval,
Time,
)
from piker.log import get_logger
log = get_logger(__name__)
if TYPE_CHECKING:
from ib_async import (
TradingSession,
@ -64,22 +56,6 @@ if TYPE_CHECKING:
)
def is_expired(
con_deats: ContractDetails,
) -> bool:
'''
Simple predicate whether the provided contract-deats match and
already lifetime-terminated instrument.
'''
expiry_str: str = con_deats.realExpirationDate
if not expiry_str:
return False
expiry_dt: datetime = parse(expiry_str)
return expiry_dt.date() >= now().date()
def has_weekend(
period: Interval,
) -> bool:
@ -114,28 +90,13 @@ def has_holiday(
con.exchange
)
# XXX, ad-hoc handle any IB exchange which are
# non-std via lookup table..
std_exch: str = {
# XXX, ad-hoc handle any IB exchange which are non-std
# via lookup table..
std_exch: dict = {
'ARCA': 'ARCX',
}.get(exch, exch)
try:
cal: ExchangeCalendar = xcals.get_calendar(
std_exch
)
except InvalidCalendarName:
# venue has no `exchange_calendars` entry
# (eg. IDEALPRO for forex, PAXOS for
# crypto) -> not a holiday by default since
# weekends are already handled by
# `has_weekend()`.
log.warning(
f'No exchange cal for {std_exch!r},'
f' skipping holiday check..\n'
)
return False
cal: ExchangeCalendar = xcals.get_calendar(std_exch)
end: datetime = period.end
# _start: datetime = period.start
# ?TODO, can rm ya?
@ -209,22 +170,7 @@ def sesh_times(
get the (day-agnostic) times for the start/end.
'''
# ?TODO, lookup the next front contract instead?
if is_expired(con_deats):
raise ValueError(
f'Contract is already expired!\n'
f'Choose an active alt contract instead.\n'
f'con_deats: {con_deats!r}\n'
)
maybe_sessions: list[Interval] = list(iter_sessions(con_deats))
if not maybe_sessions:
raise ValueError(
f'Contract has no trading-session info?\n'
f'con_deats: {con_deats!r}\n'
)
earliest_sesh: Interval = maybe_sessions[0]
earliest_sesh: Interval = next(iter_sessions(con_deats))
return (
earliest_sesh.start.time(),
earliest_sesh.end.time(),
@ -265,13 +211,7 @@ def is_venue_closure(
'''
open: Time
close: Time
maybe_oc: tuple|None = sesh_times(con_deats)
if maybe_oc is None:
# XXX, should never get here.
breakpoint()
return False
open, close = maybe_oc
open, close = sesh_times(con_deats)
# ensure times are in mkt-native timezone
tz: str = con_deats.timeZoneId

View File

@ -35,7 +35,6 @@ import hashlib
import hmac
import base64
import tractor
# from tractor._exceptions import reg_err_types
import trio
from piker import config
@ -53,7 +52,6 @@ from piker.brokers._util import (
SymbolNotFound,
BrokerError,
DataThrottle,
get_or_raise_on_pair_schema_mismatch,
)
from piker.accounting import Transaction
from piker.log import get_logger
@ -109,37 +107,15 @@ def get_kraken_signature(
return sigdigest.decode()
# class InvalidKey(ValueError):
# '''
# EAPI:Invalid key
class InvalidKey(ValueError):
'''
EAPI:Invalid key
This error is returned when the API key used for the call is
either expired or disabled, please review the API key in your
Settings -> API tab of account management or generate a new one
and update your application.
# This error is returned when the API key used for the call is
# either expired or disabled, please review the API key in your
# Settings -> API tab of account management or generate a new one
# and update your application.
# '''
# class InvalidSession(RuntimeError):
# '''
# ESession:Invalid session
# This error is returned when the ws API key used for an authenticated
# sub/endpoint becomes stale, normally after a sufficient network
# disconnect/outage.
# Normally the sub will need to be restarted, likely re-init of the
# auth handshake sequence.
# '''
# subscription: dict
# reg_err_types([
# InvalidKey,
# InvalidSession,
# ])
'''
class Client:
@ -167,16 +143,18 @@ class Client:
config: dict[str, str],
httpx_client: httpx.AsyncClient,
key_descr: str = '',
name: str = '',
api_key: str = '',
secret: str = ''
) -> None:
self._sesh: httpx.AsyncClient = httpx_client
self._key_descr = key_descr
self._name = name
self._api_key = api_key
self._secret = secret
self.conf: dict[str, str] = config
self._ws_token: str|None = None
@property
def pairs(self) -> dict[str, Pair]:
@ -261,39 +239,6 @@ class Client:
return balances
async def get_ws_token(
self,
params: dict = {},
force_renewal: bool = False,
) -> str:
'''
Get websocket token for authenticated data stream and cache
it for reuse.
Assert a value was actually received before return.
'''
if (
not force_renewal
and
self._ws_token
):
return self._ws_token
resp = await self.endpoint(
'GetWebSocketsToken',
{},
)
if err := resp.get('error'):
raise BrokerError(err)
# resp token for ws init
token: str = resp['result']['token']
assert token
self._ws_token: str = token
return token
async def get_assets(
self,
reload: bool = False,
@ -557,16 +502,7 @@ class Client:
# NOTE: always cache in pairs tables for faster lookup
with tractor.devx.maybe_open_crash_handler(): # as bxerr:
# pair = Pair(xname=xkey, **data)
pair: Pair = get_or_raise_on_pair_schema_mismatch(
pair_type=Pair,
fields_data=dict(
xname=xkey,
**data,
),
provider_name='kraken',
# api_url='https://binance-docs.github.io/apidocs/spot/en/#exchange-information',
)
pair = Pair(xname=xkey, **data)
# register the above `Pair` structs for all
# key-sets/monikers: a set of 4 (frickin) tables
@ -732,13 +668,7 @@ class Client:
@acm
async def get_client() -> Client:
'''
Load and deliver a `.kraken.api.Client`.
When defined, inject any config delivered from the user's
`brokers.toml` config file.
'''
conf: dict[str, Any] = get_config()
async with httpx.AsyncClient(
base_url=_url,
@ -749,14 +679,13 @@ async def get_client() -> Client:
# connections=4
) as trio_client:
if conf:
api_key_descr: str = conf['key_descr']
client = Client(
conf,
httpx_client=trio_client,
# TODO: don't break these up and just do internal
# conf lookups instead..
key_descr=api_key_descr,
name=conf['key_descr'],
api_key=conf['api_key'],
secret=conf['secret']
)

View File

@ -30,15 +30,12 @@ from typing import (
Any,
AsyncIterator,
Iterable,
Type,
Union,
)
from bidict import bidict
import trio
import tractor
from tractor.devx.pformat import ppfmt
from tractor._exceptions import reg_err_types
from piker.accounting import (
Position,
@ -48,9 +45,6 @@ from piker.accounting import (
open_trade_ledger,
open_account,
)
from piker.config import (
ConfigurationError,
)
from piker.clearing import(
OrderDialogs,
)
@ -73,7 +67,10 @@ from piker.log import (
get_logger,
)
from piker.data import open_symcache
from . import api
from .api import (
Client,
BrokerError,
)
from .feed import (
open_autorecon_ws,
NoBsWs,
@ -97,14 +94,11 @@ MsgUnion = Union[
]
# class TooFastEdit(Exception):
# 'Edit requests faster then api submissions'
class TooFastEdit(Exception):
'Edit requests faster then api submissions'
# reg_err_types([TooFastEdit])
# TODO: make this wrap the `api.Client` and `ws` instances
# TODO: make this wrap the `Client` and `ws` instances
# and give it methods to submit cancel vs. add vs. edit
# requests?
class BrokerClient:
@ -132,22 +126,23 @@ class BrokerClient:
async def handle_order_requests(
ws: NoBsWs,
client: api.Client,
client: Client,
ems_order_stream: tractor.MsgStream,
token: str,
apiflows: OrderDialogs,
ids: bidict[str, int],
reqids2txids: dict[int, str],
toofastedit: set[int],
) -> None:
'''
`trio.Task` which handles order ctl requests from the EMS and
deliver acks or errors back on that IPC dialog.
Process new order submission requests from the EMS
and deliver acks or errors.
'''
# XXX: UGH, let's unify this.. with ``msgspec``!!!
msg: dict|Order
msg: dict | Order
async for msg in ems_order_stream:
log.info(f'Rx order msg:\n{pformat(msg)}')
match msg:
@ -161,14 +156,8 @@ async def handle_order_requests(
txid = reqids2txids[reqid]
except KeyError:
# XXX: not sure if this block ever gets hit now?
# SEEMS TO on the race case with the update task?
# - update dark order quickly after
# triggered-submitted and then we have inavlid
# value in `reqids2txids` sent over ws.send()??
log.error('TOO FAST CANCEL/EDIT')
# reqids2txids[reqid] = TooFastEdit(reqid)
toofastedit.add(reqid)
reqids2txids[reqid] = reqid
reqids2txids[reqid] = TooFastEdit(reqid)
await ems_order_stream.send(
BrokerdError(
oid=msg['oid'],
@ -184,7 +173,7 @@ async def handle_order_requests(
# https://docs.kraken.com/websockets/#message-cancelOrder
await ws.send_msg({
'event': 'cancelOrder',
'token': await client.get_ws_token(),
'token': token,
'reqid': reqid,
'txid': [txid], # should be txid from submission
})
@ -196,7 +185,7 @@ async def handle_order_requests(
# validate
order = BrokerdOrder(**msg)
# logic from old `api.Client.submit_limit()`
# logic from old `Client.submit_limit()`
if order.oid in ids:
ep: str = 'editOrder'
reqid: int = ids[order.oid] # integer not txid
@ -206,16 +195,13 @@ async def handle_order_requests(
# XXX: not sure if this block ever gets hit now?
log.error('TOO FAST EDIT')
# reqids2txids[reqid] = TooFastEdit(reqid)
reqids2txids[reqid] = reqid
toofastedit.add(reqid)
await tractor.pause()
reqids2txids[reqid] = TooFastEdit(reqid)
await ems_order_stream.send(
BrokerdError(
oid=msg['oid'],
symbol=msg['symbol'],
reason=(
f'TooFastEdit reqid: {reqid}, cancelling..'
f'TooFastEdit reqid:{reqid}, cancelling..'
),
)
@ -261,7 +247,7 @@ async def handle_order_requests(
# https://docs.kraken.com/websockets/#message-addOrder
req = {
'event': ep,
'token': await client.get_ws_token(),
'token': token,
'reqid': reqid, # remapped-to-int uid from ems
# XXX: we set these to the same value since for us
@ -305,15 +291,13 @@ async def handle_order_requests(
symbol=msg['symbol'],
reason=(
'Invalid request msg:\n{msg}'
),
)
))
)
@acm
async def subscribe(
ws: NoBsWs,
client: api.Client,
token: str,
subs: list[tuple[str, dict]] = [
('ownTrades', {
@ -332,17 +316,12 @@ async def subscribe(
Setup ws api subscriptions:
https://docs.kraken.com/websockets/#message-subscribe
By default we sign up for trade and order (update) events per
`subs`.
By default we sign up for trade and order update events.
'''
# more specific logic for this in kraken's sync client:
# https://github.com/krakenfx/kraken-wsclient-py/blob/master/kraken_wsclient_py/kraken_wsclient_py.py#L188
assert (
token
and
token == await client.get_ws_token()
)
assert token
subnames: set[str] = set()
for name, sub_opts in subs:
@ -350,7 +329,7 @@ async def subscribe(
'event': 'subscribe',
'subscription': {
'name': name,
'token': await client.get_ws_token(),
'token': token,
**sub_opts,
}
}
@ -365,9 +344,7 @@ async def subscribe(
# wait on subscriptionn acks
with trio.move_on_after(5):
while True:
msg: dict = await ws.recv_msg()
fmt_msg: str = ppfmt(msg)
match msg:
match (msg := await ws.recv_msg()):
case {
'event': 'subscriptionStatus',
'status': 'subscribed',
@ -385,49 +362,10 @@ async def subscribe(
'event': 'subscriptionStatus',
'status': 'error',
'errorMessage': errmsg,
'subscription': sub_opts,
} as msg:
if errmsg:
etype_str, _, ev_msg = errmsg.partition(':')
etype: Type[Exception] = getattr(
api,
etype_str,
RuntimeError,
)
exc = etype(
f'{ev_msg}\n'
f'\n'
f'{fmt_msg}'
)
# !TODO, for `InvalidSession` we should
# attempt retries to resub and ensure all
# sibling (task) `token` holders update
# their refs accoridingly!
match (etype_str, ev_msg):
case (
'ESession',
'Invalid session',
):
# attempt ws-token refresh
token: str = await client.get_ws_token(
force_renewal=True
)
await tractor.pause()
continue
case _:
log.warning(
f'Unhandled subscription-status,\n'
f'{fmt_msg}'
)
raise exc
case _:
log.warning(
f'Unknown ws event rxed?\n'
f'{fmt_msg}'
raise RuntimeError(
f'{errmsg}\n\n'
f'{pformat(msg)}'
)
yield
@ -523,27 +461,11 @@ async def open_trade_dialog(
# (much like the web UI let's you set an "account currency")
# such that all positions (nested or flat) will be translated to
# this source currency's terms.
src_fiat = client.conf.get('src_fiat')
if not src_fiat:
raise ConfigurationError(
'No `src_fiat: str` field defined in `brokers.toml`'
)
src_fiat = client.conf['src_fiat']
# auth required block
conf: dict = client.conf
accounts: dict = conf.get('accounts')
acctid: str = client._key_descr
if not accounts.get(acctid):
raise ConfigurationError(
f'No API-key found for account-alias defined as {acctid!r} !\n'
f'\n'
f'Did set a `kraken.accounts.*` entry in your `brokers.toml`?\n'
f'It should look something like,\n'
f'\n'
f'[kraken]\n'
f'accounts.{acctid} = {acctid!r}\n'
)
fqan: str = f'kraken.{acctid}'
acctid = client._name
acc_name = 'kraken.' + acctid
# task local msg dialog tracking
apiflows = OrderDialogs()
@ -662,10 +584,7 @@ async def open_trade_dialog(
acctid,
)
# sync with EMS delivering pps and accounts
await ctx.started((
ppmsgs,
[fqan],
))
await ctx.started((ppmsgs, [acc_name]))
# TODO: ideally this blocks the this task
# as little as possible. we need to either do
@ -673,11 +592,14 @@ async def open_trade_dialog(
# async file IO api?
acnt.write_config()
token: str = await client.get_ws_token()
# Get websocket token for authenticated data stream
# Assert that a token was actually received.
resp = await client.endpoint('GetWebSocketsToken', {})
if err := resp.get('error'):
raise BrokerError(err)
# XXX tracks EMS orders which are updated too quickly
# on the emds side with sync-issues on the kraken side.
toofastedit: set[int] = set()
# resp token for ws init
token: str = resp['result']['token']
ws: NoBsWs
async with (
@ -686,24 +608,23 @@ async def open_trade_dialog(
'wss://ws-auth.kraken.com/',
fixture=partial(
subscribe,
client=client,
token=token,
),
) as ws,
aclosing(stream_messages(ws)) as stream,
trio.open_nursery() as tn,
trio.open_nursery() as nurse,
):
# task for processing inbound requests from ems
tn.start_soon(partial(
nurse.start_soon(
handle_order_requests,
ws=ws,
client=client,
ems_stream=ems_stream,
apiflows=apiflows,
ids=ids,
reqids2txids=reqids2txids,
toofastedit=toofastedit,
))
ws,
client,
ems_stream,
token,
apiflows,
ids,
reqids2txids,
)
# enter relay loop
await handle_order_updates(
@ -714,23 +635,22 @@ async def open_trade_dialog(
apiflows=apiflows,
ids=ids,
reqids2txids=reqids2txids,
toofastedit=toofastedit,
acnt=acnt,
ledger=ledger,
acctid=acctid,
acc_name=fqan,
acc_name=acc_name,
token=token,
)
async def handle_order_updates(
client: api.Client, # only for pairs table needed in ledger proc
client: Client, # only for pairs table needed in ledger proc
ws: NoBsWs,
ws_stream: AsyncIterator,
ems_stream: tractor.MsgStream,
apiflows: OrderDialogs,
ids: bidict[str, int],
reqids2txids: bidict[int, str],
toofastedit: set[int],
acnt: Account,
# transaction records which will be updated
@ -739,6 +659,7 @@ async def handle_order_updates(
# ledger_trans: dict[str, Transaction],
acctid: str,
acc_name: str,
token: str,
) -> None:
'''
@ -1038,12 +959,10 @@ async def handle_order_updates(
# <-> ems dialog.
if (
status == 'open'
and
reqid in toofastedit
# isinstance(
# reqids2txids.get(reqid),
# TooFastEdit
# )
and isinstance(
reqids2txids.get(reqid),
TooFastEdit
)
):
# TODO: don't even allow this case
# by not moving the client side line
@ -1058,8 +977,7 @@ async def handle_order_updates(
# https://docs.kraken.com/websockets/#message-cancelOrder
await ws.send_msg({
'event': 'cancelOrder',
# 'token': token,
'token': await client.get_ws_token(),
'token': token,
'reqid': reqid or 0,
'txid': [txid],
})
@ -1205,9 +1123,7 @@ async def handle_order_updates(
txid
# we throttle too-fast-requests on the ems side
and
reqid in toofastedit
# not isinstance(txid, TooFastEdit)
and not isinstance(txid, TooFastEdit)
):
# client was editting too quickly
# so we instead cancel this order
@ -1215,8 +1131,7 @@ async def handle_order_updates(
f'Cancelling {reqid}@{txid} due to:\n {event}')
await ws.send_msg({
'event': 'cancelOrder',
# 'token': token,
'token': await client.get_ws_token(),
'token': token,
'reqid': reqid or 0,
'txid': [txid],
})

View File

@ -19,9 +19,6 @@ Symbology defs and search.
'''
from decimal import Decimal
from typing import (
ClassVar,
)
import tractor
@ -89,14 +86,9 @@ class Pair(Struct):
short_position_limit: float = 0
long_position_limit: float = float('inf')
# TODO, add API note when this was added!
execution_venue: str|None = None
# TODO: should we make this a literal NamespacePath ref?
ns_path: str = 'piker.brokers.kraken:Pair'
_api_url: ClassVar[str] = 'https://docs.kraken.com/api/docs/rest-api/get-tradable-asset-pairs'
@property
def bs_mktid(self) -> str:
'''

View File

@ -95,9 +95,6 @@ _time_frames = {
class QuestradeError(Exception):
"Non-200 OK response code"
from tractor._exceptions import reg_err_types
reg_err_types([QuestradeError])
class ContractsKey(NamedTuple):
symbol: str

View File

@ -27,7 +27,7 @@ from types import ModuleType
import click
import trio
import tractor
from tractor.discovery._multiaddr import parse_maddr
from tractor._multiaddr import parse_maddr
from ..log import (
get_console_log,
@ -345,7 +345,7 @@ def services(
if not ports:
ports: list[int] = [_default_registry_port]
addr = tractor.discovery._addr.wrap_address(
addr = tractor._addr.wrap_address(
addr=(host, ports[0])
)

View File

@ -36,8 +36,6 @@ except ModuleNotFoundError:
import tomli as tomllib
from tractor._exceptions import reg_err_types
from tractor.devx.pformat import ppfmt
from .log import get_logger
log = get_logger('broker-config')
@ -174,12 +172,6 @@ class ConfigurationError(Exception):
class NoSignature(ConfigurationError):
'No credentials setup for broker backend!'
# auto-register for tractor IPC exc-marshalling.
reg_err_types([
ConfigurationError,
*ConfigurationError.__subclasses__(),
])
def _override_config_dir(
path: str
@ -358,56 +350,30 @@ def write(
def load_accounts(
providers: list[str]|None = None
) -> bidict[str, str|None]:
providers: list[str] | None = None
) -> bidict[str, str | None]:
conf, path = load(
conf_name='brokers',
)
accounts = bidict({
# XXX, default paper-engine entry; this MUST be set.
'paper': None,
})
msg: str = (
'Loading account(s) from `brokers.toml`,\n'
)
for (
provider_name,
section,
) in conf.items():
accounts_section: dict[str, str] = section.get('accounts')
if accounts_section is None:
msg += f'No accounts declared for {provider_name!r}?\n'
continue
# msg += f'Loaded accounts for {provider_name!r}?\n'
accounts = bidict()
for provider_name, section in conf.items():
accounts_section = section.get('accounts')
if (
providers is None
or (
providers
and
provider_name in providers
)
providers is None or
providers and provider_name in providers
):
for (
label,
value,
) in accounts_section.items():
account_alias: str = f'{provider_name}.{label}'
accounts[account_alias] = value
msg += f'{account_alias} = {value!r}\n'
if accounts_section is None:
log.warning(f'No accounts named for {provider_name}?')
continue
else:
for label, value in accounts_section.items():
accounts[
f'{provider_name}.{label}'
] = value
else:
log.debug(
f'NOT loading account(s) for entry in `brokers.toml`,\n'
f'The account provider was not requested for loading.\n'
f'requested-providers: {providers!r}\n'
f'this-provider: {provider_name!r}\n'
f'\n'
f'{ppfmt(accounts_section)}\n'
)
# ?TODO? mk this bp work?
# breakpoint()
# our default paper engine entry
accounts['paper'] = None
log.info(msg)
return accounts

View File

@ -77,7 +77,7 @@ from ._sampling import (
if TYPE_CHECKING:
from .flows import Flume
from tractor.discovery._addr import Address
from tractor._addr import Address
from tractor.msg.types import Aid
@ -973,6 +973,9 @@ async def open_feed(
# assert flume.mkt.fqme == fqme
feed.flumes[fqme] = flume
# TODO: do we need this?
flume.feed = feed
# attach and cache shm handles
rt_shm = flume.rt_shm
assert rt_shm

View File

@ -22,6 +22,9 @@ real-time data processing data-structures.
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
)
import tractor
import pendulum
@ -35,6 +38,9 @@ from tractor.ipc._shm import (
)
from piker.accounting import MktPair
if TYPE_CHECKING:
from piker.data.feed import Feed
class Flume(Struct):
'''
@ -74,6 +80,10 @@ class Flume(Struct):
izero_rt: int = 0
throttle_rate: int | None = None
# TODO: do we need this really if we can pull the `Portal` from
# ``tractor``'s internals?
feed: Feed|None = None
@property
def rt_shm(self) -> ShmArray:
@ -146,6 +156,7 @@ class Flume(Struct):
# will get instead some kind of msg-compat version
# that it can load.
msg.pop('stream')
msg.pop('feed')
msg.pop('_rt_shm')
msg.pop('_hist_shm')

View File

@ -28,7 +28,6 @@ from typing import (
)
from msgspec import field
from tractor._exceptions import reg_err_types
from piker.types import Struct
from piker.accounting import (
@ -44,8 +43,6 @@ class FeedInitializationError(ValueError):
'''
reg_err_types([FeedInitializationError])
class FeedInit(Struct, frozen=True):
'''

View File

@ -91,7 +91,7 @@ async def open_piker_runtime(
try:
actor = tractor.current_actor()
except tractor._exceptions.NoRuntime:
tractor.runtime._state._runtime_vars[
tractor._state._runtime_vars[
'piker_vars'
] = tractor_runtime_overrides
@ -264,7 +264,7 @@ async def maybe_open_pikerd(
**kwargs,
) -> (
tractor.Portal
tractor._portal.Portal
|ClassVar[Services]
):
'''

View File

@ -49,7 +49,6 @@ from requests.exceptions import (
ReadTimeout,
)
from tractor._exceptions import reg_err_types
from piker.log import (
get_console_log,
get_logger,
@ -67,11 +66,6 @@ class DockerNotStarted(Exception):
class ApplicationLogError(Exception):
'App in container reported an error in logs'
reg_err_types([
DockerNotStarted,
ApplicationLogError,
])
@acm
async def open_docker(

View File

@ -79,17 +79,10 @@ async def maybe_spawn_daemon(
lock = Services.locks[service_name]
await lock.acquire()
if not pikerd_kwargs:
# XXX NOTE, pin to apprope `tractor` branch!
rtvs: dict = tractor.get_runtime_vars()
registry_addrs: list[tuple] = list(
map(tuple, rtvs['_registry_addrs'])
)
try:
async with find_service(
service_name,
registry_addrs=registry_addrs,
registry_addrs=[('127.0.0.1', 6116)],
) as portal:
if portal is not None:
lock.release()
@ -106,7 +99,6 @@ async def maybe_spawn_daemon(
# process tree
async with maybe_open_pikerd(
loglevel=loglevel,
registry_addrs=registry_addrs,
**pikerd_kwargs,
) as pikerd_portal:
@ -150,65 +142,7 @@ async def maybe_spawn_daemon(
async with tractor.wait_for_actor(service_name) as portal:
lock.release()
yield portal
# --- ---- ---
# XXX NOTE XXX
# --- ---- ---
# DO NOT PUT A `portal.cancel_actor()` here (as was prior)!
#
# Doing so will cause an "out-of-band" ctxc
# (`tractor.ContextCancelled`) to be raised inside the
# `ServiceMngr.open_context_in_task()`'s call to
# `ctx.wait_for_result()` AND the internal self-ctxc
# "graceful capture" WILL NOT CATCH IT!
#
# This can cause certain types of operations to raise
# that ctxc BEFORE THEY `return`, resulting in
# a "false-negative" ctxc being raised when really
# nothing actually failed, other then our semantic
# "failure" to suppress an expected, graceful,
# self-cancel scenario..
#
# bUt wHy duZ It WorK lIKe dis..
# ------------------------------
# from the perspective of the `tractor.Context` this
# cancel request was conducted "out of band" since
# `Context.cancel()` was never called and thus the
# `._cancel_called: bool` was never set. Despite the
# remote `.canceller` being set to `pikerd` (i.e. the
# same `Actor.uid` of the raising service-mngr task) the
# service-task's ctx itself was never marked as having
# requested cancellation and thus still raises the ctxc
# bc it was unaware of any such request.
#
# How to make grokin these cases easier tho?
# ------------------------------------------
# Because `Portal.cancel_actor()` was called it requests
# "full-`Actor`-runtime-cancellation" of it's peer
# process which IS NOT THE SAME as a single inter-actor
# RPC task cancelling its local context with a remote
# peer `Task` in that same peer process.
#
# ?TODO? It might be better if we do one (or all) of the
# following:
#
# -[ ] at least set a special message for the
# `ContextCancelled` when raised locally by the
# unaware ctx task such that we check for the
# `.canceller` being *our `Actor`* and in the case
# where `Context._cancel_called == False` we specially
# note that this is likely an "out-of-band"
# runtime-cancel request triggered by some call to
# `Portal.cancel_actor()`, possibly even reporting the
# exact LOC of that caller by tracking it inside our
# portal-type?
# -[ ] possibly add another field `ContextCancelled` like
# maybe a,
# `.request_type: Literal['os', 'proc', 'actor',
# 'ctx']` type thing which would allow immediately
# being able to tell what kind of cancellation caused
# the unexpected ctxc?
# -[ ] REMOVE THIS COMMENT, once we've settled on how to
# better augment `tractor` to be more explicit on this!
await portal.cancel_actor()
except BaseException as _err:
err = _err
@ -218,13 +152,11 @@ async def maybe_spawn_daemon(
lock.statistics().owner is current_task()
):
log.exception(
f'Releasing stale lock after crash..?\n'
f'\n'
f'Releasing stale lock after crash..?'
f'{err!r}\n'
)
lock.release()
raise
raise err
async def spawn_emsd(

View File

@ -48,7 +48,7 @@ log = get_logger(name=__name__)
# new actors and supervises them to completion?
class Services:
actor_n: tractor.ActorNursery
actor_n: tractor._supervise.ActorNursery
service_n: trio.Nursery
debug_mode: bool # tractor sub-actor debug mode flag
service_tasks: dict[

View File

@ -42,7 +42,6 @@ from msgspec.msgpack import (
# import pyqtgraph as pg
import numpy as np
import tractor
from tractor._exceptions import reg_err_types
from trio_websocket import open_websocket_url
from anyio_marketstore import ( # noqa
open_marketstore_client,
@ -383,8 +382,6 @@ def quote_to_marketstore_structarray(
class MarketStoreError(Exception):
"Generic marketstore client error"
reg_err_types([MarketStoreError])
# def err_on_resp(response: dict) -> None:
# """Raise any errors found in responses from client request.

View File

@ -42,17 +42,15 @@ from typing import (
)
import numpy as np
from tractor._exceptions import reg_err_types
from piker import config
from piker.log import (
from .. import config
from ..service import (
check_for_service,
)
from ..log import (
get_logger,
get_console_log,
)
from piker.service import (
check_for_service,
)
subsys: str = 'piker.storage'
log = get_logger(subsys)
@ -153,12 +151,6 @@ class StorageConnectionError(ConnectionError):
'''
reg_err_types([
TimeseriesNotFound,
StorageConnectionError,
])
def get_storagemod(
name: str,

View File

@ -292,11 +292,6 @@ def ldshm(
f'Something is wrong with time period for {shm}:\n{times}'
)
period_s: float = float(max(d1, d2, med))
log.info(
f'Processing shm buffer:\n'
f' file: {shmfile.name}\n'
f' period: {period_s}s\n'
)
null_segs: tuple = tsp.get_null_segs(
frame=shm.array,
@ -306,7 +301,7 @@ def ldshm(
# TODO: call null-seg fixer somehow?
if null_segs:
if tractor.runtime._state.is_debug_mode():
if tractor._state.is_debug_mode():
await tractor.pause()
# async with (
# trio.open_nursery() as tn,

View File

@ -276,41 +276,14 @@ def get_null_segs(
absi_zdiff: np.ndarray = np.diff(absi_zeros)
if zero_t.size < 2:
idx: int = zero_t['index'][0]
idx_before: int = idx - 1
idx_after: int = idx + 1
index = frame['index']
before_cond = idx_before <= index
after_cond = index <= idx_after
bars: np.ndarray = frame[
before_cond
&
after_cond
]
time: np.ndarray = bars['time']
from pendulum import (
from_timestamp,
Interval,
)
gap: Interval = (
from_timestamp(time[-1])
-
from_timestamp(time[0])
)
log.warning(
f'Single OHLCV-bar null-segment detected??\n'
f'gap -> {gap}\n'
)
# ^^XXX, if you want to debug the above bar-gap^^
# try:
# breakpoint()
# except RuntimeError:
# # XXX, if greenback not active from
# # piker store ldshm cmd..
# log.exception(
# "Can't debug single-sample null!\n"
# )
try:
breakpoint()
except RuntimeError:
# XXX, if greenback not active from
# piker store ldshm cmd..
log.exception(
"Can't debug single-sample null!\n"
)
return None

View File

@ -30,11 +30,6 @@ import tractor
from piker.data._formatters import BGM
from piker.storage import log
from piker.toolz.profile import (
Profiler,
pg_profile_enabled,
ms_slower_then,
)
from piker.ui._style import get_fonts
if TYPE_CHECKING:
@ -97,22 +92,12 @@ async def markup_gaps(
# gap's duration.
show_txt: bool = False,
# A/B comparison: render individual arrows alongside batch
# for visual comparison
show_individual_arrows: bool = False,
) -> dict[int, dict]:
'''
Remote annotate time-gaps in a dt-fielded ts (normally OHLC)
with rectangles.
'''
profiler = Profiler(
msg=f'markup_gaps() for {gaps.height} gaps',
disabled=False,
ms_threshold=0.0,
)
# XXX: force chart redraw FIRST to ensure PlotItem coordinate
# system is properly initialized before we position annotations!
# Without this, annotations may be misaligned on first creation
@ -121,19 +106,6 @@ async def markup_gaps(
fqme=fqme,
timeframe=timeframe,
)
profiler('first `.redraw()` before annot creation')
log.info(
f'markup_gaps() called:\n'
f' fqme: {fqme}\n'
f' timeframe: {timeframe}s\n'
f' gaps.height: {gaps.height}\n'
)
# collect all annotation specs for batch submission
rect_specs: list[dict] = []
arrow_specs: list[dict] = []
text_specs: list[dict] = []
aids: dict[int] = {}
for i in range(gaps.height):
@ -196,7 +168,7 @@ async def markup_gaps(
prev_r: pl.DataFrame = prev_row_by_i
# debug any missing pre-row
if tractor.runtime._state.is_debug_mode():
if tractor._state.is_debug_mode():
await tractor.pause()
istart: int = prev_r['index'][0]
@ -245,38 +217,56 @@ async def markup_gaps(
# 1: 'wine', # down-gap
# }[sgn]
# collect rect spec (no fqme/timeframe, added by batch
# API)
rect_spec: dict[str, Any] = dict(
meth='set_view_pos',
rect_kwargs: dict[str, Any] = dict(
fqme=fqme,
timeframe=timeframe,
start_pos=lc,
end_pos=ro,
color=color,
update_label=False,
start_time=start_time,
end_time=end_time,
)
rect_specs.append(rect_spec)
# add up/down rects
aid: int|None = await actl.add_rect(**rect_kwargs)
if aid is None:
log.error(
f'Failed to add rect for,\n'
f'{rect_kwargs!r}\n'
f'\n'
f'Skipping to next gap!\n'
)
continue
assert aid
aids[aid] = rect_kwargs
direction: str = (
'down' if down_gap
else 'up'
)
# collect arrow spec
# TODO! mk this a `msgspec.Struct` which we deserialize
# on the server side!
# XXX: send timestamp for server-side index lookup
# to ensure alignment with current shm state
gap_time: float = row['time'][0]
arrow_spec: dict[str, Any] = dict(
arrow_kwargs: dict[str, Any] = dict(
fqme=fqme,
timeframe=timeframe,
x=iend, # fallback if timestamp lookup fails
y=cls,
time=gap_time, # for server-side index lookup
color=color,
alpha=169,
pointing=direction,
# TODO: expose these as params to markup_gaps()?
headLen=10,
headWidth=2.222,
pxMode=True,
)
arrow_specs.append(arrow_spec)
aid: int = await actl.add_arrow(
**arrow_kwargs
)
# add duration label to RHS of arrow
if up_gap:
@ -288,12 +278,15 @@ async def markup_gaps(
assert flat
anchor = (0, 0) # up from bottom
# collect text spec if enabled
if show_txt:
font, small_font = get_fonts()
font_size: int = small_font.px_size - 1
# use a slightly smaller font for gap label txt.
font, small_font = get_fonts()
font_size: int = small_font.px_size - 1
assert isinstance(font_size, int)
text_spec: dict[str, Any] = dict(
if show_txt:
text_aid: int = await actl.add_text(
fqme=fqme,
timeframe=timeframe,
text=gap_label,
x=iend + 1, # fallback if timestamp lookup fails
y=cls,
@ -302,46 +295,12 @@ async def markup_gaps(
anchor=anchor,
font_size=font_size,
)
text_specs.append(text_spec)
aids[text_aid] = {'text': gap_label}
# submit all annotations in single batch IPC msg
log.info(
f'Submitting batch annotations:\n'
f' rects: {len(rect_specs)}\n'
f' arrows: {len(arrow_specs)}\n'
f' texts: {len(text_specs)}\n'
)
profiler('built all annotation specs')
result: dict[str, list[int]] = await actl.add_batch(
fqme=fqme,
timeframe=timeframe,
rects=rect_specs,
arrows=arrow_specs,
texts=text_specs,
show_individual_arrows=show_individual_arrows,
)
profiler('batch `.add_batch()` IPC call complete')
# build aids dict from batch results
for aid in result['rects']:
aids[aid] = {'type': 'rect'}
for aid in result['arrows']:
aids[aid] = {'type': 'arrow'}
for aid in result['texts']:
aids[aid] = {'type': 'text'}
log.info(
f'Batch submission complete: {len(aids)} annotation(s) '
f'created'
)
profiler('built aids result dict')
# tell chart to redraw all its graphics view layers
# tell chart to redraw all its
# graphics view layers Bo
await actl.redraw(
fqme=fqme,
timeframe=timeframe,
)
profiler('final `.redraw()` after annot creation')
return aids

View File

@ -738,21 +738,12 @@ async def start_backfill(
# including the dst[/src] source asset token. SO,
# 'tsla.nasdaq.ib' over 'tsla/usd.nasdaq.ib' for
# historical reasons ONLY.
if (
mkt.dst.atype not in {
'crypto',
'crypto_currency',
'fiat', # a "forex pair"
'perpetual_future', # stupid "perps" from cex land
}
and not (
mkt.src.atype == 'crypto_currency'
and
mkt.dst.atype in {
'future',
}
)
):
if mkt.dst.atype not in {
'crypto',
'crypto_currency',
'fiat', # a "forex pair"
'perpetual_future', # stupid "perps" from cex land
}:
col_sym_key: str = mkt.get_fqme(
delim_char='',
without_src=True,

View File

@ -24,11 +24,8 @@ from pyqtgraph import (
Point,
functions as fn,
Color,
GraphicsObject,
)
from pyqtgraph.Qt import internals
import numpy as np
import pyqtgraph as pg
from piker.ui.qt import (
QtCore,
@ -38,10 +35,6 @@ from piker.ui.qt import (
QRectF,
QGraphicsPathItem,
)
from piker.ui._style import hcolor
from piker.log import get_logger
log = get_logger(__name__)
def mk_marker_path(
@ -111,7 +104,7 @@ def mk_marker_path(
class LevelMarker(QGraphicsPathItem):
'''
An arrow marker path graphic which redraws itself
An arrow marker path graphich which redraws itself
to the specified view coordinate level on each paint cycle.
'''
@ -258,9 +251,9 @@ def qgo_draw_markers(
) -> float:
'''
Paint markers in ``pg.GraphicsItem`` style by first removing the
view transform for the painter, drawing the markers in scene
coords, then restoring the view coords.
Paint markers in ``pg.GraphicsItem`` style by first
removing the view transform for the painter, drawing the markers
in scene coords, then restoring the view coords.
'''
# paint markers in native coordinate system
@ -302,449 +295,3 @@ def qgo_draw_markers(
p.setTransform(orig_tr)
return max(sizes)
class GapAnnotations(GraphicsObject):
'''
Batch-rendered gap annotations using Qt's efficient drawing
APIs.
Instead of creating individual `QGraphicsItem` instances per
gap (which is very slow for 1000+ gaps), this class stores all
gap rectangles and arrows in numpy-backed arrays and renders
them in single batch paint calls.
Performance: ~1000x faster than individual items for large gap
counts.
Based on patterns from:
- `pyqtgraph.BarGraphItem` (batch rect rendering)
- `pyqtgraph.ScatterPlotItem` (fragment rendering)
- `piker.ui._curve.FlowGraphic` (single path pattern)
'''
def __init__(
self,
gap_specs: list[dict],
array: np.ndarray|None = None,
color: str = 'dad_blue',
alpha: int = 169,
arrow_size: float = 10.0,
fqme: str|None = None,
timeframe: float|None = None,
) -> None:
'''
gap_specs: list of dicts with keys:
- start_pos: (x, y) tuple for left corner of rect
- end_pos: (x, y) tuple for right corner of rect
- arrow_x: x position for arrow
- arrow_y: y position for arrow
- pointing: 'up' or 'down' for arrow direction
- start_time: (optional) timestamp for repositioning
- end_time: (optional) timestamp for repositioning
array: optional OHLC numpy array for repositioning on
backfill updates (when abs-index changes)
fqme: symbol name for these gaps (for logging/debugging)
timeframe: period in seconds that these gaps were
detected on (used to skip reposition when
called with wrong timeframe's array)
'''
super().__init__()
self._gap_specs = gap_specs
self._array = array
self._fqme = fqme
self._timeframe = timeframe
n_gaps = len(gap_specs)
# shared pen/brush matching original SelectRect/ArrowItem style
base_color = pg.mkColor(hcolor(color))
# rect pen: base color, fully opaque for outline
self._rect_pen = pg.mkPen(base_color, width=1)
# rect brush: base color with alpha=66 (SelectRect default)
rect_fill = pg.mkColor(hcolor(color))
rect_fill.setAlpha(66)
self._rect_brush = pg.functions.mkBrush(rect_fill)
# arrow pen: same as rects
self._arrow_pen = pg.mkPen(base_color, width=1)
# arrow brush: base color with user-specified alpha (default 169)
arrow_fill = pg.mkColor(hcolor(color))
arrow_fill.setAlpha(alpha)
self._arrow_brush = pg.functions.mkBrush(arrow_fill)
# allocate rect array using Qt's efficient storage
self._rectarray = internals.PrimitiveArray(
QtCore.QRectF,
4,
)
self._rectarray.resize(n_gaps)
rect_memory = self._rectarray.ndarray()
# fill rect array from gap specs
for (
i,
spec,
) in enumerate(gap_specs):
(
start_x,
start_y,
) = spec['start_pos']
(
end_x,
end_y,
) = spec['end_pos']
# QRectF expects (x, y, width, height)
rect_memory[i, 0] = start_x
rect_memory[i, 1] = min(start_y, end_y)
rect_memory[i, 2] = end_x - start_x
rect_memory[i, 3] = abs(end_y - start_y)
# build single QPainterPath for all arrows
self._arrow_path = QtGui.QPainterPath()
self._arrow_size = arrow_size
for spec in gap_specs:
arrow_x = spec['arrow_x']
arrow_y = spec['arrow_y']
pointing = spec['pointing']
# create arrow polygon
if pointing == 'down':
# arrow points downward
arrow_poly = QtGui.QPolygonF([
QPointF(arrow_x, arrow_y), # tip
QPointF(
arrow_x - arrow_size/2,
arrow_y - arrow_size,
), # left
QPointF(
arrow_x + arrow_size/2,
arrow_y - arrow_size,
), # right
])
else: # up
# arrow points upward
arrow_poly = QtGui.QPolygonF([
QPointF(arrow_x, arrow_y), # tip
QPointF(
arrow_x - arrow_size/2,
arrow_y + arrow_size,
), # left
QPointF(
arrow_x + arrow_size/2,
arrow_y + arrow_size,
), # right
])
self._arrow_path.addPolygon(arrow_poly)
self._arrow_path.closeSubpath()
# cache bounding rect
self._br: QRectF|None = None
def boundingRect(self) -> QRectF:
'''
Compute bounding rect from rect array and arrow path.
'''
if self._br is not None:
return self._br
# get rect bounds
rect_memory = self._rectarray.ndarray()
if len(rect_memory) == 0:
self._br = QRectF()
return self._br
x_min = rect_memory[:, 0].min()
y_min = rect_memory[:, 1].min()
x_max = (rect_memory[:, 0] + rect_memory[:, 2]).max()
y_max = (rect_memory[:, 1] + rect_memory[:, 3]).max()
# expand for arrow path
arrow_br = self._arrow_path.boundingRect()
x_min = min(x_min, arrow_br.left())
y_min = min(y_min, arrow_br.top())
x_max = max(x_max, arrow_br.right())
y_max = max(y_max, arrow_br.bottom())
self._br = QRectF(
x_min,
y_min,
x_max - x_min,
y_max - y_min,
)
return self._br
def paint(
self,
p: QtGui.QPainter,
opt: QtWidgets.QStyleOptionGraphicsItem,
w: QtWidgets.QWidget,
) -> None:
'''
Batch render all rects and arrows in minimal paint calls.
'''
# draw all rects in single batch call (data coordinates)
p.setPen(self._rect_pen)
p.setBrush(self._rect_brush)
drawargs = self._rectarray.drawargs()
p.drawRects(*drawargs)
# draw arrows in scene/pixel coordinates so they maintain
# size regardless of zoom level
orig_tr = p.transform()
p.resetTransform()
# rebuild arrow path in scene coordinates
arrow_path_scene = QtGui.QPainterPath()
# arrow geometry matching pg.ArrowItem defaults
# headLen=10, headWidth=2.222
# headWidth is the half-width (center to edge distance)
head_len = self._arrow_size
head_width = head_len * 0.2222 # 2.222 at size=10
for spec in self._gap_specs:
if 'arrow_x' not in spec:
continue
arrow_x = spec['arrow_x']
arrow_y = spec['arrow_y']
pointing = spec['pointing']
# transform data coords to scene coords
scene_pt = orig_tr.map(QPointF(arrow_x, arrow_y))
sx = scene_pt.x()
sy = scene_pt.y()
# create arrow polygon in scene/pixel coords
# matching pg.ArrowItem geometry but rotated for up/down
if pointing == 'down':
# tip points downward (negative y direction)
arrow_poly = QtGui.QPolygonF([
QPointF(sx, sy), # tip
QPointF(
sx - head_width,
sy - head_len,
), # left base
QPointF(
sx + head_width,
sy - head_len,
), # right base
])
else: # up
# tip points upward (positive y direction)
arrow_poly = QtGui.QPolygonF([
QPointF(sx, sy), # tip
QPointF(
sx - head_width,
sy + head_len,
), # left base
QPointF(
sx + head_width,
sy + head_len,
), # right base
])
arrow_path_scene.addPolygon(arrow_poly)
arrow_path_scene.closeSubpath()
p.setPen(self._arrow_pen)
p.setBrush(self._arrow_brush)
p.drawPath(arrow_path_scene)
# restore original transform
p.setTransform(orig_tr)
def reposition(
self,
array: np.ndarray|None = None,
fqme: str|None = None,
timeframe: float|None = None,
) -> None:
'''
Reposition all annotations based on timestamps.
Used when viz is updated (eg during backfill) and abs-index
range changes - we need to lookup new indices from timestamps.
'''
# skip reposition if timeframe doesn't match
# (e.g., 1s gaps being repositioned with 60s array)
if (
timeframe is not None
and
self._timeframe is not None
and
timeframe != self._timeframe
):
log.debug(
f'Skipping reposition for {self._fqme} gaps:\n'
f' gap timeframe: {self._timeframe}s\n'
f' array timeframe: {timeframe}s\n'
)
return
if array is None:
array = self._array
if array is None:
log.warning(
'GapAnnotations.reposition() called but no array '
'provided'
)
return
# collect all unique timestamps we need to lookup
timestamps: set[float] = set()
for spec in self._gap_specs:
if spec.get('start_time') is not None:
timestamps.add(spec['start_time'])
if spec.get('end_time') is not None:
timestamps.add(spec['end_time'])
if spec.get('time') is not None:
timestamps.add(spec['time'])
# vectorized timestamp -> row lookup using binary search
time_to_row: dict[float, dict] = {}
if timestamps:
import numpy as np
time_arr = array['time']
ts_array = np.array(list(timestamps))
search_indices = np.searchsorted(
time_arr,
ts_array,
)
# vectorized bounds check and exact match verification
valid_mask = (
(search_indices < len(array))
& (time_arr[search_indices] == ts_array)
)
valid_indices = search_indices[valid_mask]
valid_timestamps = ts_array[valid_mask]
matched_rows = array[valid_indices]
time_to_row = {
float(ts): {
'index': float(row['index']),
'open': float(row['open']),
'close': float(row['close']),
}
for ts, row in zip(
valid_timestamps,
matched_rows,
)
}
# rebuild rect array from gap specs with new indices
rect_memory = self._rectarray.ndarray()
for (
i,
spec,
) in enumerate(self._gap_specs):
start_time = spec.get('start_time')
end_time = spec.get('end_time')
if (
start_time is None
or end_time is None
):
continue
start_row = time_to_row.get(start_time)
end_row = time_to_row.get(end_time)
if (
start_row is None
or end_row is None
):
log.warning(
f'Timestamp lookup failed for gap[{i}] during '
f'reposition:\n'
f' fqme: {fqme}\n'
f' timeframe: {timeframe}s\n'
f' start_time: {start_time}\n'
f' end_time: {end_time}\n'
f' array time range: '
f'{array["time"][0]} -> {array["time"][-1]}\n'
)
continue
start_idx = start_row['index']
end_idx = end_row['index']
start_close = start_row['close']
end_open = end_row['open']
from_idx: float = 0.16 - 0.06
start_x = start_idx + 1 - from_idx
end_x = end_idx + from_idx
# update rect in array
rect_memory[i, 0] = start_x
rect_memory[i, 1] = min(start_close, end_open)
rect_memory[i, 2] = end_x - start_x
rect_memory[i, 3] = abs(end_open - start_close)
# rebuild arrow path with new indices
self._arrow_path.clear()
for spec in self._gap_specs:
time_val = spec.get('time')
if time_val is None:
continue
arrow_row = time_to_row.get(time_val)
if arrow_row is None:
continue
arrow_x = arrow_row['index']
arrow_y = arrow_row['close']
pointing = spec['pointing']
# create arrow polygon
if pointing == 'down':
arrow_poly = QtGui.QPolygonF([
QPointF(arrow_x, arrow_y),
QPointF(
arrow_x - self._arrow_size/2,
arrow_y - self._arrow_size,
),
QPointF(
arrow_x + self._arrow_size/2,
arrow_y - self._arrow_size,
),
])
else: # up
arrow_poly = QtGui.QPolygonF([
QPointF(arrow_x, arrow_y),
QPointF(
arrow_x - self._arrow_size/2,
arrow_y + self._arrow_size,
),
QPointF(
arrow_x + self._arrow_size/2,
arrow_y + self._arrow_size,
),
])
self._arrow_path.addPolygon(arrow_poly)
self._arrow_path.closeSubpath()
# invalidate bounding rect cache
self._br = None
self.prepareGeometryChange()
self.update()

View File

@ -131,11 +131,11 @@ async def _async_main(
async with (
tractor.trionics.collapse_eg(),
trio.open_nursery() as tn,
trio.open_nursery() as root_n,
):
# set root nursery and task stack for spawning other charts/feeds
# that run cached in the bg
godwidget._root_n = tn
godwidget._root_n = root_n
# setup search widget and focus main chart view at startup
# search widget is a singleton alongside the godwidget
@ -165,7 +165,7 @@ async def _async_main(
# load other providers into search **after**
# the chart's select cache
for brokername, mod in needed_brokermods.items():
tn.start_soon(
root_n.start_soon(
load_provider_search,
mod,
loglevel,

View File

@ -20,9 +20,8 @@ Chart axes graphics and behavior.
"""
from __future__ import annotations
from functools import lru_cache
from math import floor
import platform
from typing import Callable
from math import floor
import polars as pl
import pyqtgraph as pg
@ -43,7 +42,6 @@ from ._style import DpiAwareFont, hcolor, _font
from ._interaction import ChartView
from ._dataviz import Viz
_friggin_macos: bool = platform.system() == 'Darwin'
_axis_pen = pg.mkPen(hcolor('bracket'))
@ -77,9 +75,6 @@ class Axis(pg.AxisItem):
self.pi = plotitem
self._dpi_font = _font
# store for later recalculation on zoom
self._typical_max_str = typical_max_str
self.setTickFont(_font.font)
font_size = self._dpi_font.font.pixelSize()
@ -161,42 +156,6 @@ class Axis(pg.AxisItem):
def size_to_values(self) -> None:
pass
def update_fonts(self, font: DpiAwareFont) -> None:
'''Update font and recalculate axis sizing after zoom change.'''
# IMPORTANT: tell Qt we're about to change geometry
self.prepareGeometryChange()
self._dpi_font = font
self.setTickFont(font.font)
font_size = font.font.pixelSize()
# recalculate text offset based on new font size
text_offset = None
if self.orientation in ('bottom',):
text_offset = floor(0.25 * font_size)
elif self.orientation in ('left', 'right'):
text_offset = floor(font_size / 2)
if text_offset:
self.setStyle(tickTextOffset=text_offset)
# recalculate bounding rect with new font
# Note: typical_max_str should be stored from init
if not hasattr(self, '_typical_max_str'):
self._typical_max_str = '100 000.000 ' # fallback default
self.typical_br = font._qfm.boundingRect(self._typical_max_str)
# Update PyQtGraph's internal text size tracking
# This is critical - PyQtGraph uses these internally for auto-expand
if self.orientation in ['left', 'right']:
self.textWidth = self.typical_br.width()
else:
self.textHeight = self.typical_br.height()
# resize axis to fit new font - this triggers PyQtGraph's auto-expand
self.size_to_values()
def txt_offsets(self) -> tuple[int, int]:
return tuple(self.style['tickTextOffset'])
@ -296,22 +255,8 @@ class PriceAxis(Axis):
) -> None:
self._min_tick = size
if _friggin_macos:
def size_to_values(self) -> None:
# Call PyQtGraph's internal width update mechanism
# This respects autoExpandTextSpace and updates min/max constraints
self._updateWidth()
# tell Qt our preferred size changed so layout recalculates
self.updateGeometry()
# force parent plot item to recalculate its layout
if self.pi and hasattr(self.pi, 'updateGeometry'):
self.pi.updateGeometry()
else:
def size_to_values(self) -> None:
# XXX, old code!
self.setWidth(self.typical_br.width())
def size_to_values(self) -> None:
self.setWidth(self.typical_br.width())
# XXX: drop for now since it just eats up h space
@ -354,21 +299,8 @@ class DynamicDateAxis(Axis):
1: '%H:%M:%S',
}
if _friggin_macos:
def size_to_values(self) -> None:
# Call PyQtGraph's internal height update mechanism
# This respects autoExpandTextSpace and updates min/max constraints
self._updateHeight()
# tell Qt our preferred size changed so layout recalculates
self.updateGeometry()
# force parent plot item to recalculate its layout
if self.pi and hasattr(self.pi, 'updateGeometry'):
self.pi.updateGeometry()
else:
def size_to_values(self) -> None:
# XXX, old code!
self.setHeight(self.typical_br.height() + 1)
def size_to_values(self) -> None:
self.setHeight(self.typical_br.height() + 1)
def _indexes_to_timestrs(
self,

View File

@ -1346,6 +1346,7 @@ async def display_symbol_data(
fqmes,
loglevel=loglevel,
tick_throttle=cycles_per_feed,
) as feed,
):
@ -1460,7 +1461,7 @@ async def display_symbol_data(
async with (
tractor.trionics.collapse_eg(),
trio.open_nursery() as tn,
trio.open_nursery() as ln,
):
# if available load volume related built-in display(s)
vlm_charts: dict[
@ -1471,7 +1472,7 @@ async def display_symbol_data(
flume.has_vlm()
and vlm_chart is None
):
vlm_chart = vlm_charts[fqme] = await tn.start(
vlm_chart = vlm_charts[fqme] = await ln.start(
open_vlm_displays,
rt_linked,
flume,
@ -1479,7 +1480,7 @@ async def display_symbol_data(
# load (user's) FSP set (otherwise known as "indicators")
# from an input config.
tn.start_soon(
ln.start_soon(
start_fsp_displays,
rt_linked,
flume,
@ -1603,11 +1604,11 @@ async def display_symbol_data(
# start update loop task
dss: dict[str, DisplayState] = {}
tn.start_soon(
ln.start_soon(
partial(
graphics_update_loop,
dss=dss,
nurse=tn,
nurse=ln,
godwidget=godwidget,
feed=feed,
# min_istream,
@ -1622,6 +1623,7 @@ async def display_symbol_data(
order_ctl_fqme: str = fqmes[0]
mode: OrderMode
async with (
open_order_mode(
feed,
godwidget,

View File

@ -168,7 +168,7 @@ class ArrowEditor(Struct):
'''
uid: str = arrow._uid
arrows: list[pg.ArrowItem] = self._arrows[uid]
log.debug(
log.info(
f'Removing arrow from views\n'
f'uid: {uid!r}\n'
f'{arrow!r}\n'
@ -286,9 +286,7 @@ class LineEditor(Struct):
for line in lines:
line.show_labels()
line.hide_markers()
log.debug(
f'Line active @ level: {line.value()!r}'
)
log.debug(f'Level active for level: {line.value()}')
# TODO: other flashy things to indicate the order is active
return lines
@ -331,11 +329,7 @@ class LineEditor(Struct):
if line in hovered:
hovered.remove(line)
log.debug(
f'Deleting level-line\n'
f'line: {line!r}\n'
f'oid: {uuid!r}\n'
)
log.debug(f'deleting {line} with oid: {uuid}')
line.delete()
# make sure the xhair doesn't get left off
@ -343,11 +337,7 @@ class LineEditor(Struct):
cursor.show_xhair()
else:
log.warning(
f'Could not find line for removal ??\n'
f'\n'
f'{line!r}\n'
)
log.warning(f'Could not find line for {line}')
return lines
@ -579,11 +569,11 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
if update_label:
self.init_label(view_rect)
log.debug(
f'SelectRect modify,\n'
print(
'SelectRect modify:\n'
f'QRectF: {view_rect}\n'
f'start_pos: {start_pos!r}\n'
f'end_pos: {end_pos!r}\n'
f'start_pos: {start_pos}\n'
f'end_pos: {end_pos}\n'
)
self.show()
@ -650,11 +640,8 @@ class SelectRect(QtWidgets.QGraphicsRectItem):
dmn=dmn,
))
# tracing
# log.info(
# f'x2, y2: {(x2, y2)}\n'
# f'xmn, ymn: {(xmn, ymx)}\n'
# )
# print(f'x2, y2: {(x2, y2)}')
# print(f'xmn, ymn: {(xmn, ymx)}')
label_anchor = Point(
xmx + 2,

View File

@ -203,9 +203,6 @@ def run_qtractor(
if is_windows:
window.configure_to_desktop()
# install global keyboard shortcuts for UI zoom
window.install_global_zoom_filter()
# actually render to screen
window.show()
app.exec_()

View File

@ -124,13 +124,6 @@ class Edit(QLineEdit):
self.sizeHint()
self.update()
def update_fonts(self, font: DpiAwareFont) -> None:
'''Update font and recalculate widget size.'''
self.dpi_font = font
self.setFont(font.font)
# tell Qt our size hint changed so it recalculates layout
self.updateGeometry()
def focus(self) -> None:
self.selectAll()
self.show()
@ -248,14 +241,6 @@ class Selection(QComboBox):
icon_size = round(h * 0.75)
self.setIconSize(QSize(icon_size, icon_size))
def update_fonts(self, font: DpiAwareFont) -> None:
'''Update font and recalculate widget size.'''
self.setFont(font.font)
# recalculate heights with new font
self.resize()
# tell Qt our size hint changed so it recalculates layout
self.updateGeometry()
def set_items(
self,
keys: list[str],
@ -446,39 +431,6 @@ class FieldsForm(QWidget):
self.fields[key] = select
return select
def update_fonts(self) -> None:
'''Update font sizes after zoom change.'''
from ._style import _font, _font_small
# update stored font size
self._font_size = _font_small.px_size - 2
# update all labels
for name, label in self.labels.items():
if hasattr(label, 'update_font'):
label.update_font(_font.font, self._font_size - 1)
# update all fields (edits, selects)
for key, field in self.fields.items():
# first check for our custom update_fonts method (Edit, Selection)
if hasattr(field, 'update_fonts'):
field.update_fonts(_font)
# then handle stylesheet updates for those without custom methods
elif hasattr(field, 'setStyleSheet'):
# regenerate stylesheet with new font size
field.setStyleSheet(
f"""QLineEdit {{
color : {hcolor('gunmetal')};
font-size : {self._font_size}px;
}}
"""
)
field.setFont(_font.font)
# for Selection widgets that need style updates
if hasattr(field, 'set_style'):
field.set_style(color='gunmetal', font_size=self._font_size)
async def handle_field_input(
@ -681,37 +633,6 @@ class FillStatusBar(QProgressBar):
self.setRange(0, int(slots))
self.setValue(value)
def update_fonts(self, font_size: int) -> None:
'''Update font size after zoom change.'''
from ._style import _font_small
self.font_size = font_size
# regenerate stylesheet with new font size
self.setStyleSheet(
f"""
QProgressBar {{
text-align: center;
font-size : {self.font_size - 2}px;
background-color: {hcolor('papas_special')};
color : {hcolor('papas_special')};
border: {self.border_px}px solid {hcolor('default_light')};
border-radius: 2px;
}}
QProgressBar::chunk {{
background-color: {hcolor('default_spotlight')};
color: {hcolor('bracket')};
border-radius: 2px;
}}
"""
)
self.setFont(_font_small.font)
def mk_fill_status_bar(

View File

@ -334,19 +334,3 @@ class FormatLabel(QLabel):
out = self.fmt_str.format(**fields)
self.setText(out)
return out
def update_font(
self,
font: QtGui.QFont,
font_size: int,
font_color: str = 'default_lightest',
) -> None:
'''Update font after zoom change.'''
self.setStyleSheet(
f"""QLabel {{
color : {hcolor(font_color)};
font-size : {font_size}px;
}}
"""
)
self.setFont(font)

View File

@ -38,6 +38,7 @@ from piker.ui.qt import (
QtGui,
QGraphicsPathItem,
QStyleOptionGraphicsItem,
QGraphicsItem,
QGraphicsScene,
QWidget,
QPointF,

View File

@ -178,26 +178,6 @@ class SettingsPane:
# encompasing high level namespace
order_mode: OrderMode | None = None # typing: ignore # noqa
def update_fonts(self) -> None:
'''Update font sizes after zoom change.'''
from ._style import _font_small
# update form fields
if self.form and hasattr(self.form, 'update_fonts'):
self.form.update_fonts()
# update fill status bar
if self.fill_bar and hasattr(self.fill_bar, 'update_fonts'):
self.fill_bar.update_fonts(_font_small.px_size)
# update labels with new fonts
if self.step_label:
self.step_label.setFont(_font_small.font)
if self.pnl_label:
self.pnl_label.setFont(_font_small.font)
if self.limit_label:
self.limit_label.setFont(_font_small.font)
def set_accounts(
self,
names: list[str],

View File

@ -22,7 +22,6 @@ a chart from some other actor.
from __future__ import annotations
from contextlib import (
asynccontextmanager as acm,
contextmanager as cm,
AsyncExitStack,
)
from functools import partial
@ -47,7 +46,6 @@ from piker.log import get_logger
from piker.types import Struct
from piker.service import find_service
from piker.brokers import SymbolNotFound
from piker.toolz import Profiler
from piker.ui.qt import (
QGraphicsItem,
)
@ -100,8 +98,6 @@ def rm_annot(
annot: ArrowEditor|SelectRect|pg.TextItem
) -> bool:
global _editors
from piker.ui._annotate import GapAnnotations
match annot:
case pg.ArrowItem():
editor = _editors[annot._uid]
@ -126,35 +122,9 @@ def rm_annot(
scene.removeItem(annot)
return True
case GapAnnotations():
scene = annot.scene()
if scene:
scene.removeItem(annot)
return True
return False
@cm
def no_qt_updates(*items):
'''
Disable Qt widget/item updates during context to batch
render operations and only trigger single repaint on exit.
Accepts both QWidgets and QGraphicsItems.
'''
for item in items:
if hasattr(item, 'setUpdatesEnabled'):
item.setUpdatesEnabled(False)
try:
yield
finally:
for item in items:
if hasattr(item, 'setUpdatesEnabled'):
item.setUpdatesEnabled(True)
async def serve_rc_annots(
ipc_key: str,
annot_req_stream: MsgStream,
@ -459,333 +429,6 @@ async def serve_rc_annots(
aids.add(aid)
await annot_req_stream.send(aid)
case {
'cmd': 'batch',
'fqme': fqme,
'timeframe': timeframe,
'rects': list(rect_specs),
'arrows': list(arrow_specs),
'texts': list(text_specs),
'show_individual_arrows': bool(show_individual_arrows),
}:
# batch submission handler - process multiple
# annotations in single IPC round-trip
ds: DisplayState = _dss[fqme]
try:
chart: ChartPlotWidget = {
60: ds.hist_chart,
1: ds.chart,
}[timeframe]
except KeyError:
msg: str = (
f'No chart for timeframe={timeframe}s, '
f'skipping batch annotation'
)
log.error(msg)
await annot_req_stream.send({'error': msg})
continue
cv: ChartView = chart.cv
viz: Viz = chart.get_viz(fqme)
shm = viz.shm
arr = shm.array
result: dict[str, list[int]] = {
'rects': [],
'arrows': [],
'texts': [],
}
profiler = Profiler(
msg=(
f'Batch annotate {len(rect_specs)} gaps '
f'on {fqme}@{timeframe}s'
),
disabled=False,
delayed=False,
)
aids_set: set[int] = ctxs[ipc_key][1]
# build unified gap_specs for GapAnnotations class
from piker.ui._annotate import GapAnnotations
gap_specs: list[dict] = []
n_gaps: int = max(
len(rect_specs),
len(arrow_specs),
)
profiler('setup batch annot creation')
# collect all unique timestamps for vectorized lookup
timestamps: list[float] = []
for rect_spec in rect_specs:
if start_time := rect_spec.get('start_time'):
timestamps.append(start_time)
if end_time := rect_spec.get('end_time'):
timestamps.append(end_time)
for arrow_spec in arrow_specs:
if time_val := arrow_spec.get('time'):
timestamps.append(time_val)
profiler('collect `timestamps: list` complet!')
# build timestamp -> row mapping using binary search
# O(m log n) instead of O(n*m) with np.isin
time_to_row: dict[float, dict] = {}
if timestamps:
import numpy as np
time_arr = arr['time']
ts_array = np.array(timestamps)
# binary search for each timestamp in sorted time array
search_indices = np.searchsorted(
time_arr,
ts_array,
)
profiler('`np.searchsorted()` complete!')
# vectorized bounds check and exact match verification
valid_mask = (
(search_indices < len(arr))
& (time_arr[search_indices] == ts_array)
)
# get all valid indices and timestamps
valid_indices = search_indices[valid_mask]
valid_timestamps = ts_array[valid_mask]
# use fancy indexing to get all rows at once
matched_rows = arr[valid_indices]
# extract fields to plain arrays BEFORE dict building
indices_arr = matched_rows['index'].astype(float)
opens_arr = matched_rows['open'].astype(float)
closes_arr = matched_rows['close'].astype(float)
profiler('extracted field arrays')
# build dict from plain arrays (much faster)
time_to_row: dict[float, dict] = {
float(ts): {
'index': idx,
'open': opn,
'close': cls,
}
for (
ts,
idx,
opn,
cls,
) in zip(
valid_timestamps,
indices_arr,
opens_arr,
closes_arr,
)
}
profiler('`time_to_row` creation complete!')
profiler(f'built timestamp lookup for {len(timestamps)} times')
# build gap_specs from rect+arrow specs
for i in range(n_gaps):
gap_spec: dict = {}
# get rect spec for this gap
if i < len(rect_specs):
rect_spec: dict = rect_specs[i].copy()
start_time = rect_spec.get('start_time')
end_time = rect_spec.get('end_time')
if (
start_time is not None
and end_time is not None
):
# lookup from pre-built mapping
start_row = time_to_row.get(start_time)
end_row = time_to_row.get(end_time)
if (
start_row is None
or end_row is None
):
log.warning(
f'Timestamp lookup failed for '
f'gap[{i}], skipping'
)
continue
start_idx = start_row['index']
end_idx = end_row['index']
start_close = start_row['close']
end_open = end_row['open']
from_idx: float = 0.16 - 0.06
gap_spec['start_pos'] = (
start_idx + 1 - from_idx,
start_close,
)
gap_spec['end_pos'] = (
end_idx + from_idx,
end_open,
)
gap_spec['start_time'] = start_time
gap_spec['end_time'] = end_time
gap_spec['color'] = rect_spec.get(
'color',
'dad_blue',
)
# get arrow spec for this gap
if i < len(arrow_specs):
arrow_spec: dict = arrow_specs[i].copy()
x: float = float(arrow_spec.get('x', 0))
y: float = float(arrow_spec.get('y', 0))
time_val: float|None = arrow_spec.get('time')
# timestamp-based index lookup (only for x, NOT y!)
# y is already set to the PREVIOUS bar's close
if time_val is not None:
arrow_row = time_to_row.get(time_val)
if arrow_row is not None:
x = arrow_row['index']
# NOTE: do NOT update y! it's the
# previous bar's close, not current
else:
log.warning(
f'Arrow timestamp {time_val} not '
f'found for gap[{i}], using x={x}'
)
gap_spec['arrow_x'] = x
gap_spec['arrow_y'] = y
gap_spec['time'] = time_val
gap_spec['pointing'] = arrow_spec.get(
'pointing',
'down',
)
gap_spec['alpha'] = arrow_spec.get('alpha', 169)
gap_specs.append(gap_spec)
profiler(f'built {len(gap_specs)} gap_specs')
# create single GapAnnotations item for all gaps
if gap_specs:
gaps_item = GapAnnotations(
gap_specs=gap_specs,
array=arr,
color=gap_specs[0].get('color', 'dad_blue'),
alpha=gap_specs[0].get('alpha', 169),
arrow_size=10.0,
fqme=fqme,
timeframe=timeframe,
)
chart.plotItem.addItem(gaps_item)
# register single item for repositioning
aid: int = id(gaps_item)
annots[aid] = gaps_item
aids_set.add(aid)
result['rects'].append(aid)
profiler(
f'created GapAnnotations item for {len(gap_specs)} '
f'gaps'
)
# A/B comparison: optionally create individual arrows
# alongside batch for visual comparison
if show_individual_arrows:
godw = chart.linked.godwidget
arrows: ArrowEditor = ArrowEditor(godw=godw)
for i, spec in enumerate(gap_specs):
if 'arrow_x' not in spec:
continue
aid_str: str = str(uuid4())
arrow: pg.ArrowItem = arrows.add(
plot=chart.plotItem,
uid=aid_str,
x=spec['arrow_x'],
y=spec['arrow_y'],
pointing=spec['pointing'],
color='bracket', # different color
alpha=spec.get('alpha', 169),
headLen=10.0,
headWidth=2.222,
pxMode=True,
)
arrow._abs_x = spec['arrow_x']
arrow._abs_y = spec['arrow_y']
annots[aid_str] = arrow
_editors[aid_str] = arrows
aids_set.add(aid_str)
result['arrows'].append(aid_str)
profiler(
f'created {len(gap_specs)} individual arrows '
f'for comparison'
)
# handle text items separately (less common, keep
# individual items)
n_texts: int = 0
for text_spec in text_specs:
kwargs: dict = text_spec.copy()
text: str = kwargs.pop('text')
x: float = float(kwargs.pop('x'))
y: float = float(kwargs.pop('y'))
time_val: float|None = kwargs.pop('time', None)
# timestamp-based index lookup
if time_val is not None:
matches = arr[arr['time'] == time_val]
if len(matches) > 0:
x = float(matches[0]['index'])
y = float(matches[0]['close'])
color = kwargs.pop('color', 'dad_blue')
anchor = kwargs.pop('anchor', (0, 1))
font_size = kwargs.pop('font_size', None)
text_item: pg.TextItem = pg.TextItem(
text,
color=hcolor(color),
anchor=anchor,
)
if font_size is None:
from ._style import get_fonts
font, font_small = get_fonts()
font_size = font_small.px_size - 1
qfont: QFont = text_item.textItem.font()
qfont.setPixelSize(font_size)
text_item.setFont(qfont)
text_item.setPos(float(x), float(y))
chart.plotItem.addItem(text_item)
text_item._abs_x = float(x)
text_item._abs_y = float(y)
aid: str = str(uuid4())
annots[aid] = text_item
aids_set.add(aid)
result['texts'].append(aid)
n_texts += 1
profiler(
f'created text annotations: {n_texts} texts'
)
profiler.finish()
await annot_req_stream.send(result)
case {
'cmd': 'remove',
'aid': int(aid)|str(aid),
@ -828,26 +471,10 @@ async def serve_rc_annots(
# XXX: reposition all annotations to ensure they
# stay aligned with viz data after reset (eg during
# backfill when abs-index range changes)
chart: ChartPlotWidget = {
60: ds.hist_chart,
1: ds.chart,
}[timeframe]
viz: Viz = chart.get_viz(fqme)
arr = viz.shm.array
n_repositioned: int = 0
for aid, annot in annots.items():
# GapAnnotations batch items have .reposition()
if hasattr(annot, 'reposition'):
annot.reposition(
array=arr,
fqme=fqme,
timeframe=timeframe,
)
n_repositioned += 1
# arrows and text items use abs x,y coords
elif (
if (
hasattr(annot, '_abs_x')
and
hasattr(annot, '_abs_y')
@ -912,21 +539,12 @@ async def remote_annotate(
finally:
# ensure all annots for this connection are deleted
# on any final teardown
profiler = Profiler(
msg=f'Annotation teardown for ctx {ctx.cid}',
disabled=False,
ms_threshold=0.0,
)
(_ctx, aids) = _ctxs[ctx.cid]
assert _ctx is ctx
profiler(f'got {len(aids)} aids to remove')
for aid in aids:
annot: QGraphicsItem = _annots[aid]
assert rm_annot(annot)
profiler(f'removed all {len(aids)} annotations')
class AnnotCtl(Struct):
'''
@ -1128,64 +746,6 @@ class AnnotCtl(Struct):
)
return aid
async def add_batch(
self,
fqme: str,
timeframe: float,
rects: list[dict]|None = None,
arrows: list[dict]|None = None,
texts: list[dict]|None = None,
show_individual_arrows: bool = False,
from_acm: bool = False,
) -> dict[str, list[int]]:
'''
Batch submit multiple annotations in single IPC msg for
much faster remote annotation vs. per-annot round-trips.
Returns dict of annotation IDs:
{
'rects': [aid1, aid2, ...],
'arrows': [aid3, aid4, ...],
'texts': [aid5, aid6, ...],
}
'''
ipc: MsgStream = self._get_ipc(fqme)
with trio.fail_after(10):
await ipc.send({
'fqme': fqme,
'cmd': 'batch',
'timeframe': timeframe,
'rects': rects or [],
'arrows': arrows or [],
'texts': texts or [],
'show_individual_arrows': show_individual_arrows,
})
result: dict = await ipc.receive()
match result:
case {'error': str(msg)}:
log.error(msg)
return {
'rects': [],
'arrows': [],
'texts': [],
}
# register all AIDs with their IPC streams
for aid_list in result.values():
for aid in aid_list:
self._ipcs[aid] = ipc
if not from_acm:
self._annot_stack.push_async_callback(
partial(
self.remove,
aid,
)
)
return result
async def add_text(
self,
fqme: str,
@ -1321,14 +881,3 @@ async def open_annot_ctl(
_annot_stack=annots_stack,
)
yield client
# client exited, measure teardown time
teardown_profiler = Profiler(
msg='Client AnnotCtl teardown',
disabled=False,
ms_threshold=0.0,
)
teardown_profiler('exiting annots_stack')
teardown_profiler('annots_stack exited')
teardown_profiler('exiting gather_contexts')

View File

@ -174,13 +174,6 @@ class CompleterView(QTreeView):
self.setStyleSheet(f"font: {size}px")
def update_fonts(self) -> None:
'''Update font sizes after zoom change.'''
self.set_font_size(_font.px_size)
self.setIndentation(_font.px_size)
self.setFont(_font.font)
self.updateGeometry()
def resize_to_results(
self,
w: float | None = 0,
@ -637,29 +630,6 @@ class SearchWidget(QtWidgets.QWidget):
| align_flag.AlignLeft,
)
def update_fonts(self) -> None:
'''Update font sizes after zoom change.'''
# regenerate label stylesheet with new font size
self.label.setStyleSheet(
f"""QLabel {{
color : {hcolor('default_lightest')};
font-size : {_font.px_size - 2}px;
}}
"""
)
self.label.setFont(_font.font)
# update search bar and view fonts
if hasattr(self.bar, 'update_fonts'):
self.bar.update_fonts(_font)
elif hasattr(self.bar, 'setFont'):
self.bar.setFont(_font.font)
if hasattr(self.view, 'update_fonts'):
self.view.update_fonts()
self.updateGeometry()
def focus(self) -> None:
self.show()
self.bar.focus()

View File

@ -79,13 +79,9 @@ class DpiAwareFont:
self._font_inches: float = None
self._screen = None
def _set_qfont_px_size(
self,
px_size: int,
) -> int:
self._qfont.setPixelSize(int(px_size))
def _set_qfont_px_size(self, px_size: int) -> None:
self._qfont.setPixelSize(px_size)
self._qfm = QtGui.QFontMetrics(self._qfont)
return self.px_size
@property
def screen(self) -> QtGui.QScreen:
@ -128,22 +124,17 @@ class DpiAwareFont:
return size
def configure_to_dpi(
self,
screen: QtGui.QScreen | None = None,
zoom_level: float = 1.0,
) -> int:
def configure_to_dpi(self, screen: QtGui.QScreen | None = None):
'''
Set an appropriately sized font size depending on the screen DPI
or scale the size according to `zoom_level`.
Set an appropriately sized font size depending on the screen DPI.
If we end up needing to generalize this more here there are
resources listed in the script in
``snippets/qt_screen_info.py``.
If we end up needing to generalize this more here there are resources
listed in the script in ``snippets/qt_screen_info.py``.
'''
if self._font_size is not None:
return self._set_qfont_px_size(self._font_size * zoom_level)
self._set_qfont_px_size(self._font_size)
return
# NOTE: if no font size set either in the [ui] section of the
# config or not yet computed from our magic scaling calcs,
@ -162,7 +153,7 @@ class DpiAwareFont:
ldpi = pdpi
mx_dpi = max(pdpi, ldpi)
# mn_dpi = min(pdpi, ldpi)
mn_dpi = min(pdpi, ldpi)
scale = round(ldpi/pdpi, ndigits=2)
if mx_dpi <= 97: # for low dpi use larger font sizes
@ -171,7 +162,7 @@ class DpiAwareFont:
else: # hidpi use smaller font sizes
inches = _font_sizes['hi'][self._font_size_calc_key]
# dpi = mn_dpi
dpi = mn_dpi
mult = 1.0
@ -206,25 +197,24 @@ class DpiAwareFont:
# always going to hit that error in range mapping from inches:
# float to px size: int.
self._font_inches = inches
font_size = math.floor(inches * pdpi)
# apply zoom level multiplier
font_size = int(font_size * zoom_level)
font_size = math.floor(inches * dpi)
log.debug(
f"screen:{screen.name()}\n"
f"pDPI: {pdpi}, lDPI: {ldpi}, scale: {scale}\n"
f"zoom_level: {zoom_level}\n"
f"\nOur best guess font size is {font_size}\n"
)
# apply the size
return self._set_qfont_px_size(font_size)
self._set_qfont_px_size(font_size)
def boundingRect(self, value: str) -> QtCore.QRectF:
if self.screen is None:
screen = self.screen
if screen is None:
raise RuntimeError("You must call .configure_to_dpi() first!")
unscaled_br: QtCore.QRectF = self._qfm.boundingRect(value)
unscaled_br = self._qfm.boundingRect(value)
return QtCore.QRectF(
0,
0,
@ -238,22 +228,12 @@ _font = DpiAwareFont()
_font_small = DpiAwareFont(_font_size_key='small')
def _config_fonts_to_screen(
zoom_level: float = 1.0
) -> int:
'''
Configure global DPI aware font size(s).
def _config_fonts_to_screen() -> None:
'configure global DPI aware font sizes'
If `zoom_level` is provided we apply it to auto-calculated
DPI-aware font.
Return the new `DpiAwareFont.px_size`.
'''
global _font, _font_small
_font.configure_to_dpi(zoom_level=zoom_level)
_font_small.configure_to_dpi(zoom_level=zoom_level)
return _font.px_size
_font.configure_to_dpi()
_font_small.configure_to_dpi()
def get_fonts() -> tuple[

View File

@ -18,7 +18,6 @@
Qt main window singletons and stuff.
"""
from __future__ import annotations
import os
import signal
import time
@ -39,107 +38,15 @@ from piker.ui.qt import (
QScreen,
QCloseEvent,
QSettings,
QEvent,
QObject,
)
from ..log import get_logger
from . import _style
from ._style import (
_font_small,
hcolor,
)
from ._style import _font_small, hcolor
from ._widget import GodWidget
log = get_logger(__name__)
class GlobalZoomEventFilter(QObject):
'''
Application-level event filter for global UI zoom shortcuts.
This filter intercepts keyboard events BEFORE they reach widgets,
allowing us to implement global UI zoom shortcuts that take precedence
over widget-specific shortcuts.
Shortcuts:
- Ctrl+Shift+Plus/Equal: Zoom in
- Ctrl+Shift+Minus: Zoom out
- Ctrl+Shift+0: Reset zoom
'''
def __init__(self, main_window: MainWindow):
super().__init__()
self.main_window = main_window
def eventFilter(self, obj: QObject, event: QEvent) -> bool:
'''
Filter keyboard events for global zoom shortcuts.
Returns True to filter out (consume) the event, False to pass through.
'''
if event.type() == QEvent.Type.KeyPress:
key = event.key()
mods = event.modifiers()
# Mask out the KeypadModifier which Qt sometimes adds
mods = mods & ~Qt.KeyboardModifier.KeypadModifier
# Check if we have Ctrl+Shift (both required)
has_ctrl = bool(
mods
&
Qt.KeyboardModifier.ControlModifier
)
_has_shift = bool(
mods
&
Qt.KeyboardModifier.ShiftModifier
)
# Only handle UI zoom if BOTH Ctrl and Shift are pressed
# For Plus key: user presses Cmd+Shift+Equal (which makes Plus)
# For Minus key: user presses Cmd+Shift+Minus
if (
has_ctrl
# and
# has_shift
):
# Zoom in: Ctrl+Shift+Plus
# Note: Plus key usually comes as Key_Equal with Shift modifier
if key in (
Qt.Key.Key_Plus,
Qt.Key.Key_Equal,
):
self.main_window.zoom_in()
return True # consume event
# Zoom out: Ctrl+Shift+Minus
# Note: On some keyboards Shift+Minus produces '_' (Underscore)
elif key in (
Qt.Key.Key_Minus,
Qt.Key.Key_Underscore,
):
self.main_window.zoom_out()
return True # consume event
# Reset zoom: Ctrl+Shift+0
# Note: On some keyboards Shift+0 produces ')' (ParenRight)
elif key in (
Qt.Key.Key_0,
Qt.Key.Key_ParenRight,
):
self.main_window.reset_zoom()
return True # consume event
# Pass through if only Ctrl (no Shift) - this goes to chart zoom
# Pass through all other events too
return False
return False
class MultiStatus:
bar: QStatusBar
@ -282,24 +189,6 @@ class MainWindow(QMainWindow):
self.restoreGeometry(geometry)
log.debug('Restored window geometry from previous session')
# zoom level for UI scaling (1.0 = 100%, 1.5 = 150%, etc)
# Change this value to set the default startup zoom level
self._zoom_level: float = 1.0 # Start at 100% (normal)
self._min_zoom: float = 0.5
self._max_zoom: float = 3.0 # Reduced from 10.0 to prevent extreme cropping
self._zoom_step: float = 0.2 # 20% per keypress
# event filter for global zoom shortcuts
self._zoom_filter: GlobalZoomEventFilter | None = None
def install_global_zoom_filter(self) -> None:
'''Install application-level event filter for global UI zoom shortcuts.'''
if self._zoom_filter is None:
self._zoom_filter = GlobalZoomEventFilter(self)
app = QApplication.instance()
app.installEventFilter(self._zoom_filter)
log.info('Installed global zoom shortcuts: Ctrl+Shift+Plus/Minus/0')
@property
def mode_label(self) -> QLabel:
@ -468,201 +357,6 @@ class MainWindow(QMainWindow):
self.godwidget.on_win_resize(event)
event.accept()
def zoom_in(self) -> None:
'''
Increase overall UI-widgets zoom level by scaling it the
global font sizes.
'''
new_zoom: float = min(
self._zoom_level + self._zoom_step,
self._max_zoom,
)
if new_zoom != self._zoom_level:
self._zoom_level = new_zoom
font_size: int = self._apply_zoom()
log.info(
f'Zoomed in UI\n'
f'zoom_step: {self._zoom_step!r}\n'
f'zoom_level(%): {self._zoom_level:.1%}\n'
f'font_size: {font_size!r}'
)
def zoom_out(self) -> float:
'''
Decrease UI zoom level.
'''
new_zoom: float = max(self._zoom_level - self._zoom_step, self._min_zoom)
if new_zoom != self._zoom_level:
self._zoom_level = new_zoom
font_size: int = self._apply_zoom()
log.info(
f'Zoomed out UI\n'
f'zoom_step: {self._zoom_step!r}\n'
f'zoom_level(%): {self._zoom_level:.1%}\n'
f'font_size: {font_size!r}'
)
return new_zoom
def reset_zoom(self) -> None:
'''
Reset UI zoom to 100%.
'''
if self._zoom_level != 1.0:
self._zoom_level = 1.0
font_size: int = self._apply_zoom()
log.info(
f'Reset zoom level\n'
f'zoom_step: {self._zoom_step!r}\n'
f'zoom_level(%): {self._zoom_level:.1%}\n'
f'font_size: {font_size!r}'
)
return self._zoom_level
def _apply_zoom(self) -> int:
'''
Apply current zoom level to all UI elements.
'''
# reconfigure fonts with zoom multiplier
font_size: int = _style._config_fonts_to_screen(
zoom_level=self._zoom_level
)
# update status bar styling with new font size
if self._status_bar:
sb = self.statusBar()
sb.setStyleSheet((
f"color : {hcolor('gunmetal')};"
f"background : {hcolor('default_dark')};"
f"font-size : {_style._font_small.px_size}px;"
"padding : 0px;"
))
# force update of mode label if it exists
if self._status_label:
self._status_label.setFont(_style._font_small.font)
# update godwidget and its children
if self.godwidget:
# update search widget if it exists
if hasattr(self.godwidget, 'search') and self.godwidget.search:
self.godwidget.search.update_fonts()
# update order mode panes in all chart views
self._update_chart_order_panes()
# recursively update all other widgets with stylesheets
self._refresh_widget_fonts(self.godwidget)
self.godwidget.update()
return font_size
def _update_chart_order_panes(self) -> None:
'''
Update order entry panels in all charts.
'''
if not self.godwidget:
return
# iterate through all linked splits (hist and rt)
for splits_name in ['hist_linked', 'rt_linked']:
splits = getattr(self.godwidget, splits_name, None)
if not splits:
continue
# get main chart
chart = getattr(splits, 'chart', None)
if chart:
# update axes
self._update_chart_axes(chart)
# update order pane
if hasattr(chart, 'view'):
view = chart.view
if hasattr(view, 'order_mode') and view.order_mode:
order_mode = view.order_mode
if hasattr(order_mode, 'pane') and order_mode.pane:
order_mode.pane.update_fonts()
# also check subplots
subplots = getattr(splits, 'subplots', {})
for name, subplot_chart in subplots.items():
# update subplot axes
self._update_chart_axes(subplot_chart)
# update subplot order pane
if hasattr(subplot_chart, 'view'):
subplot_view = subplot_chart.view
if hasattr(subplot_view, 'order_mode') and subplot_view.order_mode:
subplot_order_mode = subplot_view.order_mode
if hasattr(subplot_order_mode, 'pane') and subplot_order_mode.pane:
subplot_order_mode.pane.update_fonts()
# resize all sidepanes to match main chart's sidepane width
# this ensures volume/subplot sidepanes match the main chart
if splits and hasattr(splits, 'resize_sidepanes'):
splits.resize_sidepanes()
def _update_chart_axes(self, chart) -> None:
'''Update axis fonts and sizing for a chart.'''
from . import _style
# update price axis (right side)
if hasattr(chart, 'pi') and chart.pi:
plot_item = chart.pi
# get all axes from plot item
for axis_name in ['left', 'right', 'bottom', 'top']:
axis = plot_item.getAxis(axis_name)
if axis and hasattr(axis, 'update_fonts'):
axis.update_fonts(_style._font)
# force plot item to recalculate its entire layout
plot_item.updateGeometry()
# force chart widget to update
if hasattr(chart, 'updateGeometry'):
chart.updateGeometry()
# trigger a full scene update
if hasattr(chart, 'update'):
chart.update()
def _refresh_widget_fonts(self, widget: QWidget) -> None:
'''
Recursively update font sizes in all child widgets.
This handles widgets that have font-size hardcoded in their stylesheets.
'''
from . import _style
# recursively process all children
for child in widget.findChildren(QWidget):
# skip widgets that have their own update_fonts method (handled separately)
if hasattr(child, 'update_fonts'):
continue
# update child's stylesheet if it has font-size
child_stylesheet = child.styleSheet()
if child_stylesheet and 'font-size' in child_stylesheet:
# for labels and simple widgets, regenerate stylesheet
# this is a heuristic - may need refinement
try:
child.setFont(_style._font.font)
except (AttributeError, RuntimeError):
pass
# update child's font
try:
child.setFont(_style._font.font)
except (AttributeError, RuntimeError):
pass
# singleton app per actor
_qt_win: QMainWindow = None

View File

@ -167,7 +167,7 @@ async def stream_symbol_selection():
async def _async_main(
name: str,
portal: tractor.Portal,
portal: tractor._portal.Portal,
symbols: List[str],
brokermod: ModuleType,
loglevel: str = 'info',

View File

@ -436,7 +436,7 @@ class OptionChain(object):
async def new_chain_ui(
portal: tractor.Portal,
portal: tractor._portal.Portal,
symbol: str,
brokermod: types.ModuleType,
rate: int = 1,

View File

@ -1022,22 +1022,13 @@ async def open_order_mode(
started.set()
for oid, msg in ems_dialog_msgs.items():
# HACK ALERT: ensure a resp field is filled out since
# techincally the call below expects a ``Status``. TODO:
# parse into proper ``Status`` equivalents ems-side?
# msg.setdefault('resp', msg['broker_details']['resp'])
# msg.setdefault('oid', msg['broker_details']['oid'])
ya_msg: dict = msg.setdefault(
'brokerd_msg',
msg,
)
if msg is not ya_msg:
log.warning(
f'A `.brokerd_msg` was already set for ems-dialog msg?\n'
f'oid: {oid!r}\n'
f'ya_msg: {ya_msg!r}\n'
f'msg: {ya_msg!r}\n'
)
msg['brokerd_msg'] = msg
await process_trade_msg(
mode,

View File

@ -42,7 +42,6 @@ from PyQt6.QtCore import (
QSize,
QModelIndex,
QItemSelectionModel,
QObject,
pyqtBoundSignal,
pyqtRemoveInputHook,
QSettings,

1263
poetry.lock generated 100644

File diff suppressed because it is too large Load Diff

View File

@ -106,7 +106,7 @@ default-groups = [
[dependency-groups]
uis = [
"pyqtgraph >= 0.14.0",
"pyqtgraph",
"qdarkstyle >=3.0.2, <4.0.0",
"pyqt6 >=6.7.0, <7.0.0",
@ -193,12 +193,9 @@ include = ["piker"]
[tool.uv.sources]
pyqtgraph = { git = "https://github.com/pikers/pyqtgraph.git" }
tomlkit = { git = "https://github.com/pikers/tomlkit.git", branch ="piker_pin" }
pyvnc = { git = "https://github.com/regulad/pyvnc.git" }
# pyqtgraph = { git = "https://github.com/pyqtgraph/pyqtgraph.git", branch = 'master' }
# pyqtgraph = { path = '../pyqtgraph', editable = true }
# ?TODO, resync our fork?
# pyqtgraph = { git = "https://github.com/pikers/pyqtgraph.git" }
# to get fancy next-cmd/suggestion feats prior to 0.22.2 B)
# https://github.com/xonsh/xonsh/pull/6037
@ -206,8 +203,8 @@ pyvnc = { git = "https://github.com/regulad/pyvnc.git" }
# xonsh = { git = 'https://github.com/xonsh/xonsh.git', branch = 'main' }
# XXX since, we're like, always hacking new shite all-the-time. Bp
# tractor = { git = "https://github.com/goodboy/tractor.git", branch ="main" }
tractor = { git = "https://github.com/goodboy/tractor.git", branch ="main" }
# tractor = { git = "https://pikers.dev/goodboy/tractor", branch = "piker_pin" }
# ------ goodboy ------
# hackin dev-envs, usually there's something new he's hackin in..
tractor = { path = "../tractor", editable = true }
# tractor = { path = "../tractor", editable = true }

View File

@ -1,64 +0,0 @@
#!env xonsh
'''
Compute the pxs-per-inch (PPI) naively for the local DE.
NOTE, currently this only supports the `sway`-TWM on wayland.
!TODO!
- [ ] support Xorg (and possibly other OSs as well?
- [ ] conver this to pure py code, dropping the `.xsh` specifics
instead for `subprocess` API calls?
- [ ] possibly unify all this with `./qt_screen_info.py` as part of
a "PPI config wizard" or something, but more then likely we'll
have lib-ified version inside modden/piker by then?
'''
import math
import json
# XXX, xonsh part using "subprocess mode"
disp_infos: list[dict] = json.loads($(wlr-randr --json))
lappy: dict = disp_infos[0]
dims: dict[str, int] = lappy['physical_size']
w_cm: int = dims['width']
h_cm: int = dims['height']
# cm per inch
cpi: float = 25.4
# compute "diagonal" size (aka hypot)
diag_inches: float = math.sqrt((h_cm/cpi)**2 + (w_cm/cpi)**2)
# compute reso-hypot / inches-hypot
hi_res: dict[str, float|bool] = lappy['modes'][0]
w_px: int = hi_res['width']
h_px: int = hi_res['height']
diag_pxs: float = math.sqrt(h_px**2 + w_px**2)
unscaled_ppi: float = diag_pxs/diag_inches
# retrieve TWM info on the display (including scaling info)
sway_disp_info: dict = json.loads($(swaymsg -r -t get_outputs))[0]
scale: float = sway_disp_info['scale']
print(
f'output: {sway_disp_info["name"]!r}\n'
f'--- DIMENSIONS ---\n'
f'w_cm: {w_cm!r}\n'
f'h_cm: {h_cm!r}\n'
f'w_px: {w_px!r}\n'
f'h_cm: {h_px!r}\n'
f'\n'
f'--- DIAGONALS ---\n'
f'diag_inches: {diag_inches!r}\n'
f'diag_pxs: {diag_pxs!r}\n'
f'\n'
f'--- PPI-related-info ---\n'
f'(DE reported) scale: {scale!r}\n'
f'unscaled PPI: {unscaled_ppi!r}\n'
f'|_ =sqrt(h_px**2 + w_px**2) / sqrt(h_in**2 + w_in**2)\n'
f'scaled PPI: {unscaled_ppi/scale!r}\n'
f'|_ =unscaled_ppi/scale\n'
)

View File

@ -31,8 +31,8 @@ Resource list for mucking with DPIs on multiple screens:
- https://doc.qt.io/qt-5/qguiapplication.html#screenAt
'''
import os
from pyqtgraph import QtGui
from PyQt6 import (
QtCore,
QtWidgets,
@ -43,11 +43,6 @@ from PyQt6.QtCore import (
QSize,
QRect,
)
from pyqtgraph import QtGui
# https://doc.qt.io/qt-6/highdpi.html#environment-variable-reference
os.environ['QT_USE_PHYSICAL_DPI'] = '1'
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute
# must be set before creating the application
@ -63,22 +58,13 @@ if hasattr(Qt, 'AA_UseHighDpiPixmaps'):
True,
)
# NOTE, inherits `QGuiApplication`
# https://doc.qt.io/qt-6/qapplication.html
# https://doc.qt.io/qt-6/qguiapplication.html
app = QtWidgets.QApplication([])
#
# ^TODO? various global DPI settings?
# [ ] DPI rounding policy,
# - https://doc.qt.io/qt-6/qt.html#HighDpiScaleFactorRoundingPolicy-enum
# - https://doc.qt.io/qt-6/qguiapplication.html#setHighDpiScaleFactorRoundingPolicy
window = QtWidgets.QMainWindow()
main_widget = QtWidgets.QWidget()
window.setCentralWidget(main_widget)
window.show()
_main_pxr: float = main_widget.devicePixelRatioF()
pxr: float = main_widget.devicePixelRatioF()
# explicitly get main widget and primary displays
current_screen: QtGui.QScreen = app.screenAt(
@ -91,13 +77,7 @@ for screen in app.screens():
name: str = screen.name()
model: str = screen.model().rstrip()
size: QSize = screen.size()
geo: QRect = screen.geometry()
# device-pixel-ratio
# https://doc.qt.io/qt-6/highdpi.html
pxr: float = screen.devicePixelRatio()
unscaled_size: QSize = pxr * size
geo: QRect = screen.availableGeometry()
phydpi: float = screen.physicalDotsPerInch()
logdpi: float = screen.logicalDotsPerInch()
is_primary: bool = screen is primary_screen
@ -108,12 +88,11 @@ for screen in app.screens():
f'|_primary: {is_primary}\n'
f' _current: {is_current}\n'
f' _model: {model}\n'
f' _size: {size}\n'
f' _geometry: {geo}\n'
f' _devicePixelRatio(): {pxr}\n'
f' _unscaled-size: {unscaled_size!r}\n'
f' _physical-dpi: {phydpi}\n'
f' _logical-dpi: {logdpi}\n'
f' _screen size: {size}\n'
f' _screen geometry: {geo}\n'
f' _devicePixelRationF(): {pxr}\n'
f' _physical dpi: {phydpi}\n'
f' _logical dpi: {logdpi}\n'
)
# app-wide font info
@ -131,8 +110,8 @@ str_w: int = str_br.width()
print(
f'------ global font settings ------\n'
f'font dpi: {fontdpi!r}\n'
f'font height: {font_h!r}\n'
f'string bounding rect: {str_br!r}\n'
f'string width : {str_w!r}\n'
f'font dpi: {fontdpi}\n'
f'font height: {font_h}\n'
f'string bounding rect: {str_br}\n'
f'string width : {str_w}\n'
)

View File

@ -92,7 +92,8 @@ def log(
@acm
async def _open_test_pikerd(
tmpconfdir: str,
reg_addr: tuple[str, int|str],
reg_addr: tuple[str, int] | None = None,
loglevel: str = 'warning',
debug_mode: bool = False,
@ -112,10 +113,16 @@ async def _open_test_pikerd(
to boot the root actor / tractor runtime.
'''
import random
from piker.service import maybe_open_pikerd
if reg_addr is None:
port = random.randint(6e3, 7e3)
reg_addr = ('127.0.0.1', port)
async with (
maybe_open_pikerd(
registry_addrs=[reg_addr],
registry_addr=reg_addr,
loglevel=loglevel,
tractor_runtime_overrides={
@ -132,14 +139,13 @@ async def _open_test_pikerd(
async with tractor.wait_for_actor(
'pikerd',
registry_addr=reg_addr,
arbiter_sockaddr=reg_addr,
) as portal:
raddr = portal.chan.raddr
uw_raddr: tuple = raddr.unwrap()
assert uw_raddr == reg_addr
raddr = portal.channel.raddr
assert raddr == reg_addr
yield (
raddr._host,
raddr._port,
raddr[0],
raddr[1],
portal,
service_manager,
)
@ -196,10 +202,7 @@ def open_test_pikerd(
request: pytest.FixtureRequest,
tmp_path: Path,
tmpconfdir: Path,
# XXX from `tractor._testing.pytest` plugin
loglevel: str,
reg_addr: tuple,
):
tmpconfdir_str: str = str(tmpconfdir)
@ -233,13 +236,10 @@ def open_test_pikerd(
# bwitout clobbering each other's config state.
tmpconfdir=tmpconfdir_str,
# NOTE these come verbatim from `tractor`'s builtin plugin!
#
# per-tpt compat registrar address.
reg_addr=reg_addr,
# bind in level from fixture.
# (can be set with `--ll <value>` flag to `pytest`).
# bind in level from fixture, which is itself set by
# `--ll <value>` cli flag.
loglevel=loglevel,
debug_mode=debug_mode,
)

View File

@ -1,36 +0,0 @@
import pytest
from piker.ui._style import DpiAwareFont
class MockScreen:
def __init__(self, pdpi, ldpi, name="MockScreen"):
self._pdpi = pdpi
self._ldpi = ldpi
self._name = name
def physicalDotsPerInch(self):
return self._pdpi
def logicalDotsPerInch(self):
return self._ldpi
def name(self):
return self._name
@pytest.mark.parametrize(
"pdpi, ldpi, expected_px",
[
(96, 96, 9), # normal DPI
(169, 96, 15), # HiDPI
(120, 96, 10), # mid-DPI
]
)
def test_font_px_size(pdpi, ldpi, expected_px):
font = DpiAwareFont()
font.configure_to_dpi(screen=MockScreen(pdpi, ldpi))
px = font.px_size
print(f"{pdpi}x{ldpi} DPI -> Computed pixel size: {px}")
assert px == expected_px

View File

@ -23,35 +23,13 @@ from piker.accounting import (
'fqmes',
[
# binance
(100, {
# !TODO, write a suite which validates raising against
# bad/legacy fqmes such as this!
# 'btcusdt.binance',
'btcusdt.spot.binance',
'ethusdt.spot.binance',
}, False),
(100, {'btcusdt.binance', 'ethusdt.binance'}, False),
# kraken
(20, {
# !TODO, write a suite which validates raising against
# bad/legacy fqmes such as this!
# 'ethusdt.kraken',
# 'xbtusd.kraken',
'ethusdt.spot.kraken',
'xbtusd.spot.kraken',
}, True),
(20, {'ethusdt.kraken', 'xbtusd.kraken'}, True),
# binance + kraken
(100, {
# !TODO, write a suite which validates raising against
# bad/legacy fqmes such as this!
# 'btcusdt.binance',
# 'xbtusd.kraken',
'btcusdt.spot.binance',
'xbtusd.spot.kraken',
}, False),
(100, {'btcusdt.binance', 'xbtusd.kraken'}, False),
],
ids=lambda param: f'quotes={param[0]}@fqmes={param[1]}',
)
@ -70,17 +48,12 @@ def test_multi_fqsn_feed(
if (
ci_env
and
not run_in_ci
and not run_in_ci
):
pytest.skip(
'CI-disabled-test due to live-feed restrictions'
)
pytest.skip('Skipping CI disabled test due to feed restrictions')
brokers = set()
for fqme in fqmes:
# ?TODO, add this unpack + normalize check to a symbology
# helper fn?
brokername, *_ = unpack_fqme(fqme)
brokers.add(brokername)

58
uv.lock
View File

@ -1034,7 +1034,7 @@ requires-dist = [
{ name = "tomli", specifier = ">=2.0.1,<3.0.0" },
{ name = "tomli-w", specifier = ">=1.0.0,<2.0.0" },
{ name = "tomlkit", git = "https://github.com/pikers/tomlkit.git?branch=piker_pin" },
{ name = "tractor", editable = "../tractor" },
{ name = "tractor", git = "https://github.com/goodboy/tractor.git?branch=main" },
{ name = "trio", specifier = ">=0.27" },
{ name = "trio-typing", specifier = ">=0.10.0" },
{ name = "trio-util", specifier = ">=0.7.0,<0.8.0" },
@ -1055,7 +1055,7 @@ dev = [
{ name = "prompt-toolkit", specifier = "==3.0.40" },
{ name = "pyperclip", specifier = ">=1.9.0" },
{ name = "pyqt6", specifier = ">=6.7.0,<7.0.0" },
{ name = "pyqtgraph", specifier = ">=0.14.0" },
{ name = "pyqtgraph", git = "https://github.com/pikers/pyqtgraph.git" },
{ name = "pytest" },
{ name = "qdarkstyle", specifier = ">=3.0.2,<4.0.0" },
{ name = "rapidfuzz", specifier = ">=3.2.0,<4.0.0" },
@ -1073,7 +1073,7 @@ repl = [
testing = [{ name = "pytest" }]
uis = [
{ name = "pyqt6", specifier = ">=6.7.0,<7.0.0" },
{ name = "pyqtgraph", specifier = ">=0.14.0" },
{ name = "pyqtgraph", git = "https://github.com/pikers/pyqtgraph.git" },
{ name = "qdarkstyle", specifier = ">=3.0.2,<4.0.0" },
{ name = "rapidfuzz", specifier = ">=3.2.0,<4.0.0" },
]
@ -1365,15 +1365,11 @@ wheels = [
[[package]]
name = "pyqtgraph"
version = "0.14.0"
source = { registry = "https://pypi.org/simple" }
version = "0.12.3"
source = { git = "https://github.com/pikers/pyqtgraph.git#373f9561ea8ec4fef9b4e8bdcdd4bbf372dd6512" }
dependencies = [
{ name = "colorama" },
{ name = "numpy" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/32/36/4c242f81fdcbfa4fb62a5645f6af79191f4097a0577bd5460c24f19cc4ef/pyqtgraph-0.14.0-py3-none-any.whl", hash = "sha256:7abb7c3e17362add64f8711b474dffac5e7b0e9245abdf992e9a44119b7aa4f5", size = 1924755, upload-time = "2025-11-16T19:43:22.251Z" },
]
[[package]]
name = "pyreadline3"
@ -1680,7 +1676,7 @@ wheels = [
[[package]]
name = "tractor"
version = "0.1.0a6.dev0"
source = { editable = "../tractor" }
source = { git = "https://github.com/goodboy/tractor.git?branch=main#e77198bb64f0467a50e251ed140daee439752354" }
dependencies = [
{ name = "bidict" },
{ name = "cffi" },
@ -1693,48 +1689,6 @@ dependencies = [
{ name = "wrapt" },
]
[package.metadata]
requires-dist = [
{ name = "bidict", specifier = ">=0.23.1" },
{ name = "cffi", specifier = ">=1.17.1" },
{ name = "colorlog", specifier = ">=6.8.2,<7" },
{ name = "msgspec", specifier = ">=0.19.0" },
{ name = "pdbp", specifier = ">=1.8.2,<2" },
{ name = "platformdirs", specifier = ">=4.4.0" },
{ name = "tricycle", specifier = ">=0.4.1,<0.5" },
{ name = "trio", specifier = ">0.27" },
{ name = "wrapt", specifier = ">=1.16.0,<2" },
]
[package.metadata.requires-dev]
dev = [
{ name = "greenback", specifier = ">=1.2.1,<2" },
{ name = "pexpect", specifier = ">=4.9.0,<5" },
{ name = "prompt-toolkit", specifier = ">=3.0.50" },
{ name = "psutil", specifier = ">=7.0.0" },
{ name = "pyperclip", specifier = ">=1.9.0" },
{ name = "pytest", specifier = ">=8.3.5" },
{ name = "stackscope", specifier = ">=0.2.2,<0.3" },
{ name = "typing-extensions", specifier = ">=4.14.1" },
{ name = "xonsh", specifier = ">=0.22.2" },
]
devx = [
{ name = "greenback", specifier = ">=1.2.1,<2" },
{ name = "stackscope", specifier = ">=0.2.2,<0.3" },
{ name = "typing-extensions", specifier = ">=4.14.1" },
]
lint = [{ name = "ruff", specifier = ">=0.9.6" }]
repl = [
{ name = "prompt-toolkit", specifier = ">=3.0.50" },
{ name = "psutil", specifier = ">=7.0.0" },
{ name = "pyperclip", specifier = ">=1.9.0" },
{ name = "xonsh", specifier = ">=0.22.2" },
]
testing = [
{ name = "pexpect", specifier = ">=4.9.0,<5" },
{ name = "pytest", specifier = ">=8.3.5" },
]
[[package]]
name = "tricycle"
version = "0.4.1"