Merge pull request #190 from pikers/ems_to_bidir_streaming

Ems to bidir streaming
ems_hotfixes
goodboy 2021-06-10 08:45:44 -04:00 committed by GitHub
commit 689bc0cde0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 1349 additions and 677 deletions

View File

@ -102,7 +102,9 @@ async def open_pikerd(
assert _services is None assert _services is None
# XXX: this may open a root actor as well # XXX: this may open a root actor as well
async with tractor.open_root_actor( async with (
tractor.open_root_actor(
# passed through to ``open_root_actor`` # passed through to ``open_root_actor``
arbiter_addr=_tractor_kwargs['arbiter_addr'], arbiter_addr=_tractor_kwargs['arbiter_addr'],
name=_root_dname, name=_root_dname,
@ -113,10 +115,10 @@ async def open_pikerd(
# TODO: eventually we should be able to avoid # TODO: eventually we should be able to avoid
# having the root have more then permissions to # having the root have more then permissions to
# spawn other specialized daemons I think? # spawn other specialized daemons I think?
# enable_modules=[__name__],
enable_modules=_root_modules, enable_modules=_root_modules,
) as _,
) as _, tractor.open_nursery() as actor_nursery: tractor.open_nursery() as actor_nursery,
):
async with trio.open_nursery() as service_nursery: async with trio.open_nursery() as service_nursery:
# setup service mngr singleton instance # setup service mngr singleton instance
@ -137,6 +139,7 @@ async def open_pikerd(
async def maybe_open_runtime( async def maybe_open_runtime(
loglevel: Optional[str] = None, loglevel: Optional[str] = None,
**kwargs, **kwargs,
) -> None: ) -> None:
""" """
Start the ``tractor`` runtime (a root actor) if none exists. Start the ``tractor`` runtime (a root actor) if none exists.
@ -159,6 +162,7 @@ async def maybe_open_runtime(
async def maybe_open_pikerd( async def maybe_open_pikerd(
loglevel: Optional[str] = None, loglevel: Optional[str] = None,
**kwargs, **kwargs,
) -> Union[tractor._portal.Portal, Services]: ) -> Union[tractor._portal.Portal, Services]:
"""If no ``pikerd`` daemon-root-actor can be found start it and """If no ``pikerd`` daemon-root-actor can be found start it and
yield up (we should probably figure out returning a portal to self yield up (we should probably figure out returning a portal to self
@ -197,6 +201,66 @@ _data_mods = [
] ]
class Brokerd:
locks = defaultdict(trio.Lock)
@asynccontextmanager
async def maybe_spawn_daemon(
service_name: str,
spawn_func: Callable,
spawn_args: dict[str, Any],
loglevel: Optional[str] = None,
**kwargs,
) -> tractor.Portal:
"""
If no ``service_name`` daemon-actor can be found,
spawn one in a local subactor and return a portal to it.
"""
if loglevel:
get_console_log(loglevel)
# serialize access to this section to avoid
# 2 or more tasks racing to create a daemon
lock = Brokerd.locks[service_name]
await lock.acquire()
# attach to existing brokerd if possible
async with tractor.find_actor(service_name) as portal:
if portal is not None:
lock.release()
yield portal
return
# ask root ``pikerd`` daemon to spawn the daemon we need if
# pikerd is not live we now become the root of the
# process tree
async with maybe_open_pikerd(
loglevel=loglevel,
**kwargs,
) as pikerd_portal:
if pikerd_portal is None:
# we are root so spawn brokerd directly in our tree
# the root nursery is accessed through process global state
await spawn_func(**spawn_args)
else:
await pikerd_portal.run(
spawn_func,
**spawn_args,
)
async with tractor.wait_for_actor(service_name) as portal:
lock.release()
yield portal
async def spawn_brokerd( async def spawn_brokerd(
brokername: str, brokername: str,
@ -205,8 +269,6 @@ async def spawn_brokerd(
) -> tractor._portal.Portal: ) -> tractor._portal.Portal:
from .data import _setup_persistent_brokerd
log.info(f'Spawning {brokername} broker daemon') log.info(f'Spawning {brokername} broker daemon')
brokermod = get_brokermod(brokername) brokermod = get_brokermod(brokername)
@ -226,13 +288,9 @@ async def spawn_brokerd(
**tractor_kwargs **tractor_kwargs
) )
# TODO: so i think this is the perfect use case for supporting
# a cross-actor async context manager api instead of this
# shoort-and-forget task spawned in the root nursery, we'd have an
# async exit stack that we'd register the `portal.open_context()`
# call with and then have the ability to unwind the call whenevs.
# non-blocking setup of brokerd service nursery # non-blocking setup of brokerd service nursery
from .data import _setup_persistent_brokerd
await _services.open_remote_ctx( await _services.open_remote_ctx(
portal, portal,
_setup_persistent_brokerd, _setup_persistent_brokerd,
@ -242,10 +300,6 @@ async def spawn_brokerd(
return dname return dname
class Brokerd:
locks = defaultdict(trio.Lock)
@asynccontextmanager @asynccontextmanager
async def maybe_spawn_brokerd( async def maybe_spawn_brokerd(
@ -253,57 +307,24 @@ async def maybe_spawn_brokerd(
loglevel: Optional[str] = None, loglevel: Optional[str] = None,
**kwargs, **kwargs,
) -> tractor._portal.Portal: ) -> tractor.Portal:
""" '''Helper to spawn a brokerd service.
If no ``brokerd.{brokername}`` daemon-actor can be found,
spawn one in a local subactor and return a portal to it.
""" '''
if loglevel: async with maybe_spawn_daemon(
get_console_log(loglevel)
dname = f'brokerd.{brokername}' f'brokerd.{brokername}',
spawn_func=spawn_brokerd,
# serialize access to this section to avoid spawn_args={'brokername': brokername, 'loglevel': loglevel},
# 2 or more tasks racing to create a daemon
lock = Brokerd.locks[brokername]
await lock.acquire()
# attach to existing brokerd if possible
async with tractor.find_actor(dname) as portal:
if portal is not None:
lock.release()
yield portal
return
# ask root ``pikerd`` daemon to spawn the daemon we need if
# pikerd is not live we now become the root of the
# process tree
async with maybe_open_pikerd(
loglevel=loglevel, loglevel=loglevel,
**kwargs, **kwargs,
) as pikerd_portal:
if pikerd_portal is None: ) as portal:
# we are root so spawn brokerd directly in our tree yield portal
# the root nursery is accessed through process global state
await spawn_brokerd(brokername, loglevel=loglevel)
else:
await pikerd_portal.run(
spawn_brokerd,
brokername=brokername,
loglevel=loglevel,
)
async with tractor.wait_for_actor(dname) as portal:
lock.release()
yield portal
async def spawn_emsd( async def spawn_emsd(
brokername: str,
loglevel: Optional[str] = None, loglevel: Optional[str] = None,
**extra_tractor_kwargs **extra_tractor_kwargs
@ -314,10 +335,10 @@ async def spawn_emsd(
""" """
log.info('Spawning emsd') log.info('Spawning emsd')
# TODO: raise exception when _services == None?
global _services global _services
assert _services
await _services.actor_n.start_actor( portal = await _services.actor_n.start_actor(
'emsd', 'emsd',
enable_modules=[ enable_modules=[
'piker.clearing._ems', 'piker.clearing._ems',
@ -327,4 +348,34 @@ async def spawn_emsd(
debug_mode=_services.debug_mode, # set by pikerd flag debug_mode=_services.debug_mode, # set by pikerd flag
**extra_tractor_kwargs **extra_tractor_kwargs
) )
# non-blocking setup of clearing service
from .clearing._ems import _setup_persistent_emsd
await _services.open_remote_ctx(
portal,
_setup_persistent_emsd,
)
return 'emsd' return 'emsd'
@asynccontextmanager
async def maybe_open_emsd(
brokername: str,
loglevel: Optional[str] = None,
**kwargs,
) -> tractor._portal.Portal: # noqa
async with maybe_spawn_daemon(
'emsd',
spawn_func=spawn_emsd,
spawn_args={'loglevel': loglevel},
loglevel=loglevel,
**kwargs,
) as portal:
yield portal

View File

@ -25,7 +25,7 @@ from contextlib import asynccontextmanager
from dataclasses import asdict from dataclasses import asdict
from datetime import datetime from datetime import datetime
from functools import partial from functools import partial
from typing import List, Dict, Any, Tuple, Optional, AsyncIterator, Callable from typing import List, Dict, Any, Tuple, Optional, AsyncIterator
import asyncio import asyncio
from pprint import pformat from pprint import pformat
import inspect import inspect
@ -39,7 +39,8 @@ import tractor
from async_generator import aclosing from async_generator import aclosing
from ib_insync.wrapper import RequestError from ib_insync.wrapper import RequestError
from ib_insync.contract import Contract, ContractDetails, Option from ib_insync.contract import Contract, ContractDetails, Option
from ib_insync.order import Order from ib_insync.order import Order, Trade, OrderStatus
from ib_insync.objects import Fill, Execution
from ib_insync.ticker import Ticker from ib_insync.ticker import Ticker
from ib_insync.objects import Position from ib_insync.objects import Position
import ib_insync as ibis import ib_insync as ibis
@ -53,6 +54,12 @@ from .._daemon import maybe_spawn_brokerd
from ..data._source import from_df from ..data._source import from_df
from ..data._sharedmem import ShmArray from ..data._sharedmem import ShmArray
from ._util import SymbolNotFound, NoData from ._util import SymbolNotFound, NoData
from ..clearing._messages import (
BrokerdOrder, BrokerdOrderAck, BrokerdStatus,
BrokerdPosition, BrokerdCancel,
BrokerdFill,
# BrokerdError,
)
log = get_logger(__name__) log = get_logger(__name__)
@ -472,7 +479,7 @@ class Client:
# XXX: by default 0 tells ``ib_insync`` methods that there is no # XXX: by default 0 tells ``ib_insync`` methods that there is no
# existing order so ask the client to create a new one (which it # existing order so ask the client to create a new one (which it
# seems to do by allocating an int counter - collision prone..) # seems to do by allocating an int counter - collision prone..)
brid: int = None, reqid: int = None,
) -> int: ) -> int:
"""Place an order and return integer request id provided by client. """Place an order and return integer request id provided by client.
@ -488,7 +495,7 @@ class Client:
trade = self.ib.placeOrder( trade = self.ib.placeOrder(
contract, contract,
Order( Order(
orderId=brid or 0, # stupid api devs.. orderId=reqid or 0, # stupid api devs..
action=action.upper(), # BUY/SELL action=action.upper(), # BUY/SELL
orderType='LMT', orderType='LMT',
lmtPrice=price, lmtPrice=price,
@ -582,6 +589,7 @@ class Client:
self, self,
to_trio: trio.abc.SendChannel, to_trio: trio.abc.SendChannel,
) -> None: ) -> None:
# connect error msgs # connect error msgs
def push_err( def push_err(
reqId: int, reqId: int,
@ -589,13 +597,16 @@ class Client:
errorString: str, errorString: str,
contract: Contract, contract: Contract,
) -> None: ) -> None:
log.error(errorString) log.error(errorString)
try: try:
to_trio.send_nowait(( to_trio.send_nowait((
'error', 'error',
# error "object" # error "object"
{'reqid': reqId, {'reqid': reqId,
'message': errorString, 'reason': errorString,
'contract': contract} 'contract': contract}
)) ))
except trio.BrokenResourceError: except trio.BrokenResourceError:
@ -635,6 +646,8 @@ async def _aio_get_client(
"""Return an ``ib_insync.IB`` instance wrapped in our client API. """Return an ``ib_insync.IB`` instance wrapped in our client API.
Client instances are cached for later use. Client instances are cached for later use.
TODO: consider doing this with a ctx mngr eventually?
""" """
# first check cache for existing client # first check cache for existing client
@ -738,7 +751,7 @@ async def _trio_run_client_method(
class _MethodProxy: class _MethodProxy:
def __init__( def __init__(
self, self,
portal: tractor._portal.Portal portal: tractor.Portal
) -> None: ) -> None:
self._portal = portal self._portal = portal
@ -755,7 +768,12 @@ class _MethodProxy:
) )
def get_client_proxy(portal, target=Client) -> _MethodProxy: def get_client_proxy(
portal: tractor.Portal,
target=Client,
) -> _MethodProxy:
proxy = _MethodProxy(portal) proxy = _MethodProxy(portal)
@ -843,7 +861,7 @@ async def get_bars(
end_dt: str = "", end_dt: str = "",
) -> (dict, np.ndarray): ) -> (dict, np.ndarray):
_err = None _err: Optional[Exception] = None
fails = 0 fails = 0
for _ in range(2): for _ in range(2):
@ -880,12 +898,12 @@ async def get_bars(
raise NoData(f'Symbol: {sym}') raise NoData(f'Symbol: {sym}')
break break
else: else:
log.exception( log.exception(
"Data query rate reached: Press `ctrl-alt-f`" "Data query rate reached: Press `ctrl-alt-f`"
"in TWS" "in TWS"
) )
print(_err)
# TODO: should probably create some alert on screen # TODO: should probably create some alert on screen
# and then somehow get that to trigger an event here # and then somehow get that to trigger an event here
@ -932,7 +950,7 @@ async def backfill_bars(
if fails is None or fails > 1: if fails is None or fails > 1:
break break
if out is (None, None): if out == (None, None):
# could be trying to retreive bars over weekend # could be trying to retreive bars over weekend
# TODO: add logic here to handle tradable hours and only grab # TODO: add logic here to handle tradable hours and only grab
# valid bars in the range # valid bars in the range
@ -1183,112 +1201,213 @@ def pack_position(pos: Position) -> Dict[str, Any]:
else: else:
symbol = con.symbol symbol = con.symbol
return { return BrokerdPosition(
'broker': 'ib', broker='ib',
'account': pos.account, account=pos.account,
'symbol': symbol, symbol=symbol,
'currency': con.currency, currency=con.currency,
'size': float(pos.position), size=float(pos.position),
'avg_price': float(pos.avgCost) / float(con.multiplier or 1.0), avg_price=float(pos.avgCost) / float(con.multiplier or 1.0),
} )
@tractor.msg.pub( async def handle_order_requests(
send_on_connect={'local_trades': 'start'}
) ems_order_stream: tractor.MsgStream,
async def stream_trades(
) -> None:
# request_msg: dict
async for request_msg in ems_order_stream:
log.info(f'Received order request {request_msg}')
action = request_msg['action']
if action in {'buy', 'sell'}:
# validate
order = BrokerdOrder(**request_msg)
# call our client api to submit the order
reqid = await _trio_run_client_method(
method='submit_limit',
oid=order.oid,
symbol=order.symbol,
price=order.price,
action=order.action,
size=order.size,
# XXX: by default 0 tells ``ib_insync`` methods that
# there is no existing order so ask the client to create
# a new one (which it seems to do by allocating an int
# counter - collision prone..)
reqid=order.reqid,
)
# deliver ack that order has been submitted to broker routing
await ems_order_stream.send(
BrokerdOrderAck(
# ems order request id
oid=order.oid,
# broker specific request id
reqid=reqid,
time_ns=time.time_ns(),
).dict()
)
elif action == 'cancel':
msg = BrokerdCancel(**request_msg)
await _trio_run_client_method(
method='submit_cancel',
reqid=msg.reqid
)
else:
log.error(f'Unknown order command: {request_msg}')
@tractor.context
async def trades_dialogue(
ctx: tractor.Context,
loglevel: str = None, loglevel: str = None,
get_topics: Callable = None,
) -> AsyncIterator[Dict[str, Any]]: ) -> AsyncIterator[Dict[str, Any]]:
# XXX: required to propagate ``tractor`` loglevel to piker logging # XXX: required to propagate ``tractor`` loglevel to piker logging
get_console_log(loglevel or tractor.current_actor().loglevel) get_console_log(loglevel or tractor.current_actor().loglevel)
stream = await _trio_run_client_method( ib_trade_events_stream = await _trio_run_client_method(
method='recv_trade_updates', method='recv_trade_updates',
) )
# deliver positions to subscriber before anything else # deliver positions to subscriber before anything else
positions = await _trio_run_client_method(method='positions') positions = await _trio_run_client_method(method='positions')
all_positions = {}
for pos in positions: for pos in positions:
yield {'local_trades': ('position', pack_position(pos))} msg = pack_position(pos)
all_positions[msg.symbol] = msg.dict()
await ctx.started(all_positions)
action_map = {'BOT': 'buy', 'SLD': 'sell'} action_map = {'BOT': 'buy', 'SLD': 'sell'}
async for event_name, item in stream: async with (
ctx.open_stream() as ems_stream,
trio.open_nursery() as n,
):
# start order request handler **before** local trades event loop
n.start_soon(handle_order_requests, ems_stream)
# XXX: begin normalization of nonsense ib_insync internal async for event_name, item in ib_trade_events_stream:
# object-state tracking representations...
if event_name == 'status': # XXX: begin normalization of nonsense ib_insync internal
# object-state tracking representations...
# unwrap needed data from ib_insync internal objects if event_name == 'status':
trade = item
status = trade.orderStatus
# skip duplicate filled updates - we get the deats # unwrap needed data from ib_insync internal types
# from the execution details event trade: Trade = item
msg = { status: OrderStatus = trade.orderStatus
'reqid': trade.order.orderId,
'status': status.status,
'filled': status.filled,
'reason': status.whyHeld,
# this seems to not be necessarily up to date in the # skip duplicate filled updates - we get the deats
# execDetails event.. so we have to send it here I guess? # from the execution details event
'remaining': status.remaining, msg = BrokerdStatus(
}
elif event_name == 'fill': reqid=trade.order.orderId,
time_ns=time.time_ns(), # cuz why not
status=status.status.lower(), # force lower case
trade, fill = item filled=status.filled,
execu = fill.execution reason=status.whyHeld,
# this seems to not be necessarily up to date in the
# execDetails event.. so we have to send it here I guess?
remaining=status.remaining,
msg = { broker_details={'name': 'ib'},
'reqid': execu.orderId, )
'execid': execu.execId,
# supposedly IB server fill time elif event_name == 'fill':
'broker_time': execu.time, # converted to float by us
# ns from main TCP handler by us inside ``ib_insync`` override
'time': fill.time,
'time_ns': time.time_ns(), # cuz why not
'action': action_map[execu.side],
'size': execu.shares,
'price': execu.price,
}
elif event_name == 'error': # for wtv reason this is a separate event type
msg = item # from IB, not sure why it's needed other then for extra
# complexity and over-engineering :eyeroll:.
# we may just end up dropping these events (or
# translating them to ``Status`` msgs) if we can
# show the equivalent status events are no more latent.
# f$#$% gawd dammit insync.. # unpack ib_insync types
con = msg['contract'] # pep-0526 style:
if isinstance(con, Contract): # https://www.python.org/dev/peps/pep-0526/#global-and-local-variable-annotations
msg['contract'] = asdict(con) trade: Trade
fill: Fill
trade, fill = item
execu: Execution = fill.execution
if msg['reqid'] == -1: # TODO: normalize out commissions details?
log.error(pformat(msg)) details = {
'contract': asdict(fill.contract),
'execution': asdict(fill.execution),
'commissions': asdict(fill.commissionReport),
'broker_time': execu.time, # supposedly IB server fill time
'name': 'ib',
}
# don't forward, it's pointless.. msg = BrokerdFill(
continue # should match the value returned from `.submit_limit()`
reqid=execu.orderId,
time_ns=time.time_ns(), # cuz why not
elif event_name == 'position': action=action_map[execu.side],
msg = pack_position(item) size=execu.shares,
price=execu.price,
if msg.get('reqid', 0) < -1: broker_details=details,
# it's a trade event generated by TWS usage. # XXX: required by order mode currently
log.warning(f"TWS triggered trade:\n{pformat(msg)}") broker_time=details['broker_time'],
msg['reqid'] = 'tws-' + str(-1 * msg['reqid']) )
# mark msg as from "external system" elif event_name == 'error':
# TODO: probably something better then this..
msg['external'] = True
yield {'remote_trades': (event_name, msg)} err: dict = item
continue
yield {'local_trades': (event_name, msg)} # f$#$% gawd dammit insync..
con = err['contract']
if isinstance(con, Contract):
err['contract'] = asdict(con)
if err['reqid'] == -1:
log.error(f'TWS external order error:\n{pformat(err)}')
# don't forward for now, it's unecessary.. but if we wanted to,
# msg = BrokerdError(**err)
continue
elif event_name == 'position':
msg = pack_position(item)
if getattr(msg, 'reqid', 0) < -1:
# it's a trade event generated by TWS usage.
log.warning(f"TWS triggered trade:\n{pformat(msg)}")
msg.reqid = 'tws-' + str(-1 * msg.reqid)
# mark msg as from "external system"
# TODO: probably something better then this.. and start
# considering multiplayer/group trades tracking
msg['external'] = True
continue
# XXX: we always serialize to a dict for msgpack
# translations, ideally we can move to an msgspec (or other)
# encoder # that can be enabled in ``tractor`` ahead of
# time so we can pass through the message types directly.
await ems_stream.send(msg.dict())
@tractor.context @tractor.context

View File

@ -19,32 +19,23 @@ Orders and execution client API.
""" """
from contextlib import asynccontextmanager from contextlib import asynccontextmanager
from typing import Dict, Tuple, List from typing import Dict
from pprint import pformat from pprint import pformat
from dataclasses import dataclass, field from dataclasses import dataclass, field
import trio import trio
import tractor import tractor
# import msgspec
from ..data._source import Symbol from ..data._source import Symbol
from ..log import get_logger from ..log import get_logger
from ._ems import _emsd_main from ._ems import _emsd_main
from .._daemon import maybe_open_emsd
from ._messages import Order, Cancel
log = get_logger(__name__) log = get_logger(__name__)
# class Order(msgspec.Struct):
# action: str
# price: float
# size: float
# symbol: str
# brokers: List[str]
# oid: str
# exec_mode: str
@dataclass @dataclass
class OrderBook: class OrderBook:
"""Buy-side (client-side ?) order book ctl and tracking. """Buy-side (client-side ?) order book ctl and tracking.
@ -62,31 +53,34 @@ class OrderBook:
_to_ems: trio.abc.SendChannel _to_ems: trio.abc.SendChannel
_from_order_book: trio.abc.ReceiveChannel _from_order_book: trio.abc.ReceiveChannel
_sent_orders: Dict[str, dict] = field(default_factory=dict) _sent_orders: Dict[str, Order] = field(default_factory=dict)
_ready_to_receive: trio.Event = trio.Event() _ready_to_receive: trio.Event = trio.Event()
def send( def send(
self, self,
uuid: str, uuid: str,
symbol: str, symbol: str,
brokers: List[str], brokers: list[str],
price: float, price: float,
size: float, size: float,
action: str, action: str,
exec_mode: str, exec_mode: str,
) -> dict: ) -> dict:
cmd = { msg = Order(
'action': action, action=action,
'price': price, price=price,
'size': size, size=size,
'symbol': symbol, symbol=symbol,
'brokers': brokers, brokers=brokers,
'oid': uuid, oid=uuid,
'exec_mode': exec_mode, # dark or live exec_mode=exec_mode, # dark or live
} )
self._sent_orders[uuid] = cmd
self._to_ems.send_nowait(cmd) self._sent_orders[uuid] = msg
return cmd self._to_ems.send_nowait(msg.dict())
return msg
def update( def update(
self, self,
@ -94,29 +88,29 @@ class OrderBook:
**data: dict, **data: dict,
) -> dict: ) -> dict:
cmd = self._sent_orders[uuid] cmd = self._sent_orders[uuid]
cmd.update(data) msg = cmd.dict()
self._sent_orders[uuid] = cmd msg.update(data)
self._to_ems.send_nowait(cmd) self._sent_orders[uuid] = Order(**msg)
self._to_ems.send_nowait(msg)
return cmd return cmd
def cancel(self, uuid: str) -> bool: def cancel(self, uuid: str) -> bool:
"""Cancel an order (or alert) from the EMS. """Cancel an order (or alert) in the EMS.
""" """
cmd = self._sent_orders[uuid] cmd = self._sent_orders[uuid]
msg = { msg = Cancel(
'action': 'cancel', oid=uuid,
'oid': uuid, symbol=cmd.symbol,
'symbol': cmd['symbol'], )
} self._to_ems.send_nowait(msg.dict())
self._to_ems.send_nowait(msg)
_orders: OrderBook = None _orders: OrderBook = None
def get_orders( def get_orders(
emsd_uid: Tuple[str, str] = None emsd_uid: tuple[str, str] = None
) -> OrderBook: ) -> OrderBook:
"""" """"
OrderBook singleton factory per actor. OrderBook singleton factory per actor.
@ -136,7 +130,14 @@ def get_orders(
return _orders return _orders
async def send_order_cmds(symbol_key: str): # TODO: we can get rid of this relay loop once we move
# order_mode inputs to async code!
async def relay_order_cmds_from_sync_code(
symbol_key: str,
to_ems_stream: tractor.MsgStream,
) -> None:
""" """
Order streaming task: deliver orders transmitted from UI Order streaming task: deliver orders transmitted from UI
to downstream consumers. to downstream consumers.
@ -156,16 +157,15 @@ async def send_order_cmds(symbol_key: str):
book = get_orders() book = get_orders()
orders_stream = book._from_order_book orders_stream = book._from_order_book
# signal that ems connection is up and ready
book._ready_to_receive.set()
async for cmd in orders_stream: async for cmd in orders_stream:
print(cmd) print(cmd)
if cmd['symbol'] == symbol_key: if cmd['symbol'] == symbol_key:
# send msg over IPC / wire # send msg over IPC / wire
log.info(f'Send order cmd:\n{pformat(cmd)}') log.info(f'Send order cmd:\n{pformat(cmd)}')
yield cmd await to_ems_stream.send(cmd)
else: else:
# XXX BRUTAL HACKZORZES !!! # XXX BRUTAL HACKZORZES !!!
# re-insert for another consumer # re-insert for another consumer
@ -174,36 +174,12 @@ async def send_order_cmds(symbol_key: str):
book._to_ems.send_nowait(cmd) book._to_ems.send_nowait(cmd)
@asynccontextmanager
async def maybe_open_emsd(
brokername: str,
) -> tractor._portal.Portal: # noqa
async with tractor.find_actor('emsd') as portal:
if portal is not None:
yield portal
return
# ask remote daemon tree to spawn it
from .._daemon import spawn_emsd
async with tractor.find_actor('pikerd') as portal:
assert portal
name = await portal.run(
spawn_emsd,
brokername=brokername,
)
async with tractor.wait_for_actor(name) as portal:
yield portal
@asynccontextmanager @asynccontextmanager
async def open_ems( async def open_ems(
broker: str, broker: str,
symbol: Symbol, symbol: Symbol,
) -> None:
) -> (OrderBook, tractor.MsgStream, dict):
"""Spawn an EMS daemon and begin sending orders and receiving """Spawn an EMS daemon and begin sending orders and receiving
alerts. alerts.
@ -237,32 +213,31 @@ async def open_ems(
- 'broker_filled' - 'broker_filled'
""" """
actor = tractor.current_actor()
# wait for service to connect back to us signalling # wait for service to connect back to us signalling
# ready for order commands # ready for order commands
book = get_orders() book = get_orders()
async with maybe_open_emsd(broker) as portal: async with maybe_open_emsd(broker) as portal:
async with portal.open_stream_from( async with (
_emsd_main, # connect to emsd
client_actor_name=actor.name, portal.open_context(
broker=broker,
symbol=symbol.key,
) as trades_stream: _emsd_main,
with trio.fail_after(10): broker=broker,
await book._ready_to_receive.wait() symbol=symbol.key,
try: ) as (ctx, positions),
yield book, trades_stream
finally: # open 2-way trade command stream
# TODO: we want to eventually keep this up (by having ctx.open_stream() as trades_stream,
# the exec loop keep running in the pikerd tree) but for ):
# now we have to kill the context to avoid backpressure async with trio.open_nursery() as n:
# build-up on the shm write loop. n.start_soon(
with trio.CancelScope(shield=True): relay_order_cmds_from_sync_code,
await trades_stream.aclose() symbol.key,
trades_stream
)
yield book, trades_stream, positions

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,238 @@
# piker: trading gear for hackers
# Copyright (C) Tyler Goodlet (in stewardship for piker0)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Clearing system messagingn types and protocols.
"""
from typing import Optional, Union
# TODO: try out just encoding/send direction for now?
# import msgspec
from pydantic import BaseModel
# Client -> emsd
class Cancel(BaseModel):
'''Cancel msg for removing a dark (ems triggered) or
broker-submitted (live) trigger/order.
'''
action: str = 'cancel'
oid: str # uuid4
symbol: str
class Order(BaseModel):
action: str # {'buy', 'sell', 'alert'}
# internal ``emdsd`` unique "order id"
oid: str # uuid4
symbol: str
price: float
size: float
brokers: list[str]
# Assigned once initial ack is received
# ack_time_ns: Optional[int] = None
# determines whether the create execution
# will be submitted to the ems or directly to
# the backend broker
exec_mode: str # {'dark', 'live', 'paper'}
# Client <- emsd
# update msgs from ems which relay state change info
# from the active clearing engine.
class Status(BaseModel):
name: str = 'status'
oid: str # uuid4
time_ns: int
# {
# 'dark_submitted',
# 'dark_cancelled',
# 'dark_triggered',
# 'broker_submitted',
# 'broker_cancelled',
# 'broker_executed',
# 'broker_filled',
# 'alert_submitted',
# 'alert_triggered',
# 'position',
# }
resp: str # "response", see above
# symbol: str
# trigger info
trigger_price: Optional[float] = None
# price: float
# broker: Optional[str] = None
# this maps normally to the ``BrokerdOrder.reqid`` below, an id
# normally allocated internally by the backend broker routing system
broker_reqid: Optional[Union[int, str]] = None
# for relaying backend msg data "through" the ems layer
brokerd_msg: dict = {}
# emsd -> brokerd
# requests *sent* from ems to respective backend broker daemon
class BrokerdCancel(BaseModel):
action: str = 'cancel'
oid: str # piker emsd order id
time_ns: int
# "broker request id": broker specific/internal order id if this is
# None, creates a new order otherwise if the id is valid the backend
# api must modify the existing matching order. If the broker allows
# for setting a unique order id then this value will be relayed back
# on the emsd order request stream as the ``BrokerdOrderAck.reqid``
# field
reqid: Optional[Union[int, str]] = None
class BrokerdOrder(BaseModel):
action: str # {buy, sell}
oid: str
time_ns: int
# "broker request id": broker specific/internal order id if this is
# None, creates a new order otherwise if the id is valid the backend
# api must modify the existing matching order. If the broker allows
# for setting a unique order id then this value will be relayed back
# on the emsd order request stream as the ``BrokerdOrderAck.reqid``
# field
reqid: Optional[Union[int, str]] = None
symbol: str # symbol.<providername> ?
price: float
size: float
# emsd <- brokerd
# requests *received* to ems from broker backend
class BrokerdOrderAck(BaseModel):
'''Immediate reponse to a brokerd order request providing
the broker specifci unique order id.
'''
name: str = 'ack'
# defined and provided by backend
reqid: Union[int, str]
# emsd id originally sent in matching request msg
oid: str
class BrokerdStatus(BaseModel):
name: str = 'status'
reqid: Union[int, str]
time_ns: int
# {
# 'submitted',
# 'cancelled',
# 'executed',
# }
status: str
filled: float = 0.0
reason: str = ''
remaining: float = 0.0
# XXX: better design/name here?
# flag that can be set to indicate a message for an order
# event that wasn't originated by piker's emsd (eg. some external
# trading system which does it's own order control but that you
# might want to "track" using piker UIs/systems).
external: bool = False
# XXX: not required schema as of yet
broker_details: dict = {
'name': '',
}
class BrokerdFill(BaseModel):
'''A single message indicating a "fill-details" event from the broker
if avaiable.
'''
name: str = 'fill'
reqid: Union[int, str]
time_ns: int
# order exeuction related
action: str
size: float
price: float
broker_details: dict = {} # meta-data (eg. commisions etc.)
# brokerd timestamp required for order mode arrow placement on x-axis
# TODO: maybe int if we force ns?
# we need to normalize this somehow since backends will use their
# own format and likely across many disparate epoch clocks...
broker_time: float
class BrokerdError(BaseModel):
'''Optional error type that can be relayed to emsd for error handling.
This is still a TODO thing since we're not sure how to employ it yet.
'''
name: str = 'error'
reqid: Union[int, str]
symbol: str
reason: str
broker_details: dict = {}
class BrokerdPosition(BaseModel):
'''Position update event from brokerd.
'''
name: str = 'position'
broker: str
account: str
symbol: str
currency: str
size: float
avg_price: float

View File

@ -18,17 +18,28 @@
Fake trading for forward testing. Fake trading for forward testing.
""" """
from contextlib import asynccontextmanager
from datetime import datetime from datetime import datetime
from operator import itemgetter from operator import itemgetter
import time import time
from typing import Tuple, Optional from typing import Tuple, Optional, Callable
import uuid import uuid
from bidict import bidict from bidict import bidict
import trio import trio
import tractor
from dataclasses import dataclass from dataclasses import dataclass
from .. import data
from ..data._normalize import iterticks from ..data._normalize import iterticks
from ..log import get_logger
from ._messages import (
BrokerdCancel, BrokerdOrder, BrokerdOrderAck, BrokerdStatus,
BrokerdFill,
)
log = get_logger(__name__)
@dataclass @dataclass
@ -41,8 +52,8 @@ class PaperBoi:
""" """
broker: str broker: str
_to_trade_stream: trio.abc.SendChannel
trade_stream: trio.abc.ReceiveChannel ems_trades_stream: tractor.MsgStream
# map of paper "live" orders which be used # map of paper "live" orders which be used
# to simulate fills based on paper engine settings # to simulate fills based on paper engine settings
@ -61,20 +72,20 @@ class PaperBoi:
price: float, price: float,
action: str, action: str,
size: float, size: float,
brid: Optional[str], reqid: Optional[str],
) -> int: ) -> int:
"""Place an order and return integer request id provided by client. """Place an order and return integer request id provided by client.
""" """
is_modify: bool = False
if brid is None: if reqid is None:
reqid = str(uuid.uuid4()) reqid = str(uuid.uuid4())
else: else:
# order is already existing, this is a modify # order is already existing, this is a modify
(oid, symbol, action, old_price) = self._reqids[brid] (oid, symbol, action, old_price) = self._reqids[reqid]
assert old_price != price assert old_price != price
reqid = brid is_modify = True
# register order internally # register order internally
self._reqids[reqid] = (oid, symbol, action, price) self._reqids[reqid] = (oid, symbol, action, price)
@ -90,22 +101,16 @@ class PaperBoi:
# in the broker trades event processing loop # in the broker trades event processing loop
await trio.sleep(0.05) await trio.sleep(0.05)
await self._to_trade_stream.send({ msg = BrokerdStatus(
status='submitted',
'local_trades': ('status', { reqid=reqid,
broker=self.broker,
'time_ns': time.time_ns(), time_ns=time.time_ns(),
'reqid': reqid, filled=0.0,
reason='paper_trigger',
'status': 'submitted', remaining=size,
'broker': self.broker, )
# 'cmd': cmd, # original request message await self.ems_trades_stream.send(msg.dict())
'paper_info': {
'oid': oid,
},
}),
})
# if we're already a clearing price simulate an immediate fill # if we're already a clearing price simulate an immediate fill
if ( if (
@ -129,7 +134,7 @@ class PaperBoi:
# and trigger by the simulated clearing task normally # and trigger by the simulated clearing task normally
# running ``simulate_fills()``. # running ``simulate_fills()``.
if brid is not None: if is_modify:
# remove any existing order for the old price # remove any existing order for the old price
orders[symbol].pop((oid, old_price)) orders[symbol].pop((oid, old_price))
@ -144,7 +149,6 @@ class PaperBoi:
) -> None: ) -> None:
# TODO: fake market simulation effects # TODO: fake market simulation effects
# await self._to_trade_stream.send(
oid, symbol, action, price = self._reqids[reqid] oid, symbol, action, price = self._reqids[reqid]
if action == 'buy': if action == 'buy':
@ -155,21 +159,14 @@ class PaperBoi:
# TODO: net latency model # TODO: net latency model
await trio.sleep(0.05) await trio.sleep(0.05)
await self._to_trade_stream.send({ msg = BrokerdStatus(
status='cancelled',
'local_trades': ('status', { oid=oid,
reqid=reqid,
'time_ns': time.time_ns(), broker=self.broker,
'oid': oid, time_ns=time.time_ns(),
'reqid': reqid, )
await self.ems_trades_stream.send(msg.dict())
'status': 'cancelled',
'broker': self.broker,
# 'cmd': cmd, # original request message
'paper': True,
}),
})
async def fake_fill( async def fake_fill(
self, self,
@ -191,56 +188,49 @@ class PaperBoi:
# TODO: net latency model # TODO: net latency model
await trio.sleep(0.05) await trio.sleep(0.05)
# the trades stream expects events in the form msg = BrokerdFill(
# {'local_trades': (event_name, msg)}
await self._to_trade_stream.send({
'local_trades': ('fill', { reqid=reqid,
time_ns=time.time_ns(),
'status': 'filled', action=action,
'broker': self.broker, size=size,
# converted to float by us in ib backend price=price,
'broker_time': datetime.now().timestamp(),
'action': action,
'size': size,
'price': price,
'remaining': 0 if order_complete else remaining,
# normally filled by real `brokerd` daemon
'time': time.time_ns(),
'time_ns': time.time_ns(), # cuz why not
# fake ids
'reqid': reqid,
broker_time=datetime.now().timestamp(),
broker_details={
'paper_info': { 'paper_info': {
'oid': oid, 'oid': oid,
}, },
# mocking ib
'name': self.broker + '_paper',
},
)
await self.ems_trades_stream.send(msg.dict())
# XXX: fields we might not need to emulate?
# execution id from broker
# 'execid': execu.execId,
# 'cmd': cmd, # original request message?
}),
})
if order_complete: if order_complete:
await self._to_trade_stream.send({
'local_trades': ('status', { msg = BrokerdStatus(
'reqid': reqid,
'status': 'filled',
'broker': self.broker,
'filled': size,
'remaining': 0 if order_complete else remaining,
# converted to float by us in ib backend reqid=reqid,
'broker_time': datetime.now().timestamp(), time_ns=time.time_ns(),
status='filled',
filled=size,
remaining=0 if order_complete else remaining,
action=action,
size=size,
price=price,
broker_details={
'paper_info': { 'paper_info': {
'oid': oid, 'oid': oid,
}, },
}), 'name': self.broker,
}) },
)
await self.ems_trades_stream.send(msg.dict())
async def simulate_fills( async def simulate_fills(
@ -327,3 +317,145 @@ async def simulate_fills(
else: else:
# prices are iterated in sorted order so we're done # prices are iterated in sorted order so we're done
break break
async def handle_order_requests(
client: PaperBoi,
ems_order_stream: tractor.MsgStream,
) -> None:
# order_request: dict
async for request_msg in ems_order_stream:
action = request_msg['action']
if action in {'buy', 'sell'}:
# validate
order = BrokerdOrder(**request_msg)
# call our client api to submit the order
reqid = await client.submit_limit(
oid=order.oid,
symbol=order.symbol,
price=order.price,
action=order.action,
size=order.size,
# XXX: by default 0 tells ``ib_insync`` methods that
# there is no existing order so ask the client to create
# a new one (which it seems to do by allocating an int
# counter - collision prone..)
reqid=order.reqid,
)
# deliver ack that order has been submitted to broker routing
await ems_order_stream.send(
BrokerdOrderAck(
# ems order request id
oid=order.oid,
# broker specific request id
reqid=reqid,
).dict()
)
elif action == 'cancel':
msg = BrokerdCancel(**request_msg)
await client.submit_cancel(
reqid=msg.reqid
)
else:
log.error(f'Unknown order command: {request_msg}')
@tractor.context
async def trades_dialogue(
ctx: tractor.Context,
broker: str,
symbol: str,
loglevel: str = None,
) -> None:
async with (
data.open_feed(
broker,
[symbol],
loglevel=loglevel,
) as feed,
):
# TODO: load paper positions per broker from .toml config file
# and pass as symbol to position data mapping: ``dict[str, dict]``
# await ctx.started(all_positions)
await ctx.started({})
async with (
ctx.open_stream() as ems_stream,
trio.open_nursery() as n,
):
client = PaperBoi(
broker,
ems_stream,
_buys={},
_sells={},
_reqids={},
)
n.start_soon(handle_order_requests, client, ems_stream)
# paper engine simulator clearing task
await simulate_fills(feed.stream, client)
@asynccontextmanager
async def open_paperboi(
broker: str,
symbol: str,
loglevel: str,
) -> Callable:
'''Spawn a paper engine actor and yield through access to
its context.
'''
service_name = f'paperboi.{broker}'
async with (
tractor.find_actor(service_name) as portal,
tractor.open_nursery() as tn,
):
# only spawn if no paperboi already is up
# (we likely don't need more then one proc for basic
# simulated order clearing)
if portal is None:
portal = await tn.start_actor(
service_name,
enable_modules=[__name__]
)
async with portal.open_context(
trades_dialogue,
broker=broker,
symbol=symbol,
loglevel=loglevel,
) as (ctx, first):
try:
yield ctx, first
finally:
# be sure to tear down the paper service on exit
with trio.CancelScope(shield=True):
await portal.cancel_actor()

View File

@ -9,6 +9,7 @@ import tractor
from ..log import get_console_log, get_logger, colorize_json from ..log import get_console_log, get_logger, colorize_json
from ..brokers import get_brokermod, config from ..brokers import get_brokermod, config
from .._daemon import _tractor_kwargs
log = get_logger('cli') log = get_logger('cli')
@ -101,8 +102,9 @@ def cli(ctx, brokers, loglevel, tl, configdir):
def services(config, tl, names): def services(config, tl, names):
async def list_services(): async def list_services():
async with tractor.get_arbiter( async with tractor.get_arbiter(
*tractor.current_actor()._arb_addr *_tractor_kwargs['arbiter_addr']
) as portal: ) as portal:
registry = await portal.run('self', 'get_registry') registry = await portal.run('self', 'get_registry')
json_d = {} json_d = {}
@ -118,6 +120,7 @@ def services(config, tl, names):
list_services, list_services,
name='service_query', name='service_query',
loglevel=config['loglevel'] if tl else None, loglevel=config['loglevel'] if tl else None,
arbiter_addr=_tractor_kwargs['arbiter_addr'],
) )

View File

@ -64,13 +64,13 @@ class NoBsWs:
async def _connect( async def _connect(
self, self,
tries: int = 10000, tries: int = 1000,
) -> None: ) -> None:
while True: while True:
try: try:
await self._stack.aclose() await self._stack.aclose()
except (DisconnectionTimeout, RuntimeError): except (DisconnectionTimeout, RuntimeError):
await trio.sleep(1) await trio.sleep(0.5)
else: else:
break break
@ -95,7 +95,7 @@ class NoBsWs:
f'{self} connection bail with ' f'{self} connection bail with '
f'{type(err)}...retry attempt {i}' f'{type(err)}...retry attempt {i}'
) )
await trio.sleep(1) await trio.sleep(0.5)
continue continue
else: else:
log.exception('ws connection fail...') log.exception('ws connection fail...')

View File

@ -217,8 +217,8 @@ async def allocate_persistent_feed(
times = shm.array['time'] times = shm.array['time']
delay_s = times[-1] - times[times != times[-1]][-1] delay_s = times[-1] - times[times != times[-1]][-1]
# pass OHLC sample rate in seconds # pass OHLC sample rate in seconds (be sure to use python int type)
init_msg[symbol]['sample_rate'] = delay_s init_msg[symbol]['sample_rate'] = int(delay_s)
# yield back control to starting nursery # yield back control to starting nursery
task_status.started((init_msg, first_quote)) task_status.started((init_msg, first_quote))

View File

@ -127,10 +127,6 @@ class OrderMode:
""" """
line = self.lines.commit_line(uuid) line = self.lines.commit_line(uuid)
req_msg = self.book._sent_orders.get(uuid)
if req_msg:
req_msg['ack_time_ns'] = time.time_ns()
return line return line
def on_fill( def on_fill(
@ -196,8 +192,10 @@ class OrderMode:
def submit_exec( def submit_exec(
self, self,
size: Optional[float] = None, size: Optional[float] = None,
) -> LevelLine: ) -> LevelLine:
"""Send execution order to EMS. """Send execution order to EMS return a level line to
represent the order on a chart.
""" """
# register the "staged" line under the cursor # register the "staged" line under the cursor
@ -226,6 +224,9 @@ class OrderMode:
exec_mode=self._exec_mode, exec_mode=self._exec_mode,
) )
# TODO: update the line once an ack event comes back
# from the EMS!
# make line graphic if order push was # make line graphic if order push was
# sucessful # sucessful
line = self.lines.create_order_line( line = self.lines.create_order_line(
@ -266,14 +267,6 @@ class OrderMode:
price=line.value(), price=line.value(),
) )
# def on_key_press(
# self,
# key:
# mods:
# text: str,
# ) -> None:
# pass
@asynccontextmanager @asynccontextmanager
async def open_order_mode( async def open_order_mode(
@ -320,10 +313,14 @@ async def start_order_mode(
# spawn EMS actor-service # spawn EMS actor-service
async with ( async with (
open_ems(brokername, symbol) as (book, trades_stream), open_ems(brokername, symbol) as (book, trades_stream, positions),
open_order_mode(symbol, chart, book) as order_mode open_order_mode(symbol, chart, book) as order_mode
): ):
# update any exising positions
for sym, msg in positions.items():
order_mode.on_position_update(msg)
def get_index(time: float): def get_index(time: float):
# XXX: not sure why the time is so off here # XXX: not sure why the time is so off here
@ -346,16 +343,15 @@ async def start_order_mode(
fmsg = pformat(msg) fmsg = pformat(msg)
log.info(f'Received order msg:\n{fmsg}') log.info(f'Received order msg:\n{fmsg}')
resp = msg['resp'] name = msg['name']
if name in (
if resp in (
'position', 'position',
): ):
# show line label once order is live # show line label once order is live
order_mode.on_position_update(msg) order_mode.on_position_update(msg)
continue continue
# delete the line from view resp = msg['resp']
oid = msg['oid'] oid = msg['oid']
# response to 'action' request (buy/sell) # response to 'action' request (buy/sell)
@ -378,21 +374,21 @@ async def start_order_mode(
order_mode.on_cancel(oid) order_mode.on_cancel(oid)
elif resp in ( elif resp in (
'dark_executed' 'dark_triggered'
): ):
log.info(f'Dark order triggered for {fmsg}') log.info(f'Dark order triggered for {fmsg}')
# for alerts add a triangle and remove the elif resp in (
# level line 'alert_triggered'
if msg['cmd']['action'] == 'alert': ):
# should only be one "fill" for an alert
# should only be one "fill" for an alert # add a triangle and remove the level line
order_mode.on_fill( order_mode.on_fill(
oid, oid,
price=msg['trigger_price'], price=msg['trigger_price'],
arrow_index=get_index(time.time()) arrow_index=get_index(time.time())
) )
await order_mode.on_exec(oid, msg) await order_mode.on_exec(oid, msg)
# response to completed 'action' request for buy/sell # response to completed 'action' request for buy/sell
elif resp in ( elif resp in (
@ -403,12 +399,15 @@ async def start_order_mode(
# each clearing tick is responded individually # each clearing tick is responded individually
elif resp in ('broker_filled',): elif resp in ('broker_filled',):
action = msg['action'] action = book._sent_orders[oid].action
details = msg['brokerd_msg']
# TODO: some kinda progress system # TODO: some kinda progress system
order_mode.on_fill( order_mode.on_fill(
oid, oid,
price=msg['price'], price=details['price'],
arrow_index=get_index(msg['broker_time']),
pointing='up' if action == 'buy' else 'down', pointing='up' if action == 'buy' else 'down',
# TODO: put the actual exchange timestamp
arrow_index=get_index(details['broker_time']),
) )